code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import json
import zipfile
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# reading training data
df = pd.read_csv('train.csv', converters={'POLYLINE': lambda x: json.loads(x)[:]},nrows=1000)
latLong = np.array([])
allTrajectoryLatLong=[p for p in df['POLYLINE'] if len(p)>0]
#for oneTrajectoryLatLong in allTrajectoryLatLong:
# oneTrajectory=np.array([[i,oneTrajectoryLatLong[i][0],oneTrajectoryLatLong[i][1]]
# for i in range(len(oneTrajectoryLatLong))])
#
#latlonglist = np.array([i,[p[i][1], p[i][0]] for i in range[p for p in df['POLYLINE'] if len(p)>0]])
#lat_low, lat_hgh = np.percentile(latlong[:,0], [2, 98])
#lon_low, lon_hgh = np.percentile(latlong[:,1], [2, 98])
#
## create image
#bins = 513
#lat_bins = np.linspace(lat_low, lat_hgh, bins)
#lon_bins = np.linspace(lon_low, lon_hgh, bins)
#H2, _, _ = np.histogram2d(latlong[:,0], latlong[:,1], bins=(lat_bins, lon_bins))
#
#img = np.log(H2[::-1, :] + 1)
plt.figure()
for oneTrajectoryLatLong in allTrajectoryLatLong:
Lats = [p[1] for p in oneTrajectoryLatLong]
Longs = [p[0] for p in oneTrajectoryLatLong]
plt.plot(Lats,Longs)
#plt.axis('off')
plt.title('Taxi trip end points')
plt.show()
#plt.savefig("taxi_trip_end_points.png") | [
"json.loads",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show"
] | [((229, 241), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (237, 241), True, 'import numpy as np\n'), ((975, 987), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (985, 987), True, 'import matplotlib.pyplot as plt\n'), ((1178, 1211), 'matplotlib.pyplot.title', 'plt.title', (['"""Taxi trip end points"""'], {}), "('Taxi trip end points')\n", (1187, 1211), True, 'import matplotlib.pyplot as plt\n'), ((1212, 1222), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1220, 1222), True, 'import matplotlib.pyplot as plt\n'), ((1139, 1160), 'matplotlib.pyplot.plot', 'plt.plot', (['Lats', 'Longs'], {}), '(Lats, Longs)\n', (1147, 1160), True, 'import matplotlib.pyplot as plt\n'), ((189, 202), 'json.loads', 'json.loads', (['x'], {}), '(x)\n', (199, 202), False, 'import json\n')] |
from __future__ import print_function, division
import torch
import torch.nn as nn
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
from torch.utils.data import Dataset, DataLoader
import os
from PIL import Image
class TestDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, root_dir, transform=None):
"""
Args:
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir
self.transform = transform
self.filelist = self.get_filelist()
self.label_name_list =['H', 'N', 'F', 'T', 'I', 'M', 'B', 'D' ,'C', 'G','Y']
def get_filelist(self):
return os.listdir(self.root_dir)
def __len__(self):
return len(self.filelist)
def __getitem__(self, idx):
img_name = os.path.join(self.root_dir,self.filelist[idx])
image = Image.open(img_name)
sample = Image.fromarray(np.array(image)[:,:,:3])
if self.transform:
sample = self.transform(sample)
label = self.label_name_list.index(self.filelist[idx][0])
return sample,label
def get_dataloader(batch_size = 5,patch_size=224,\
root_dir='/mnt/DATA_CRLM/Patches/Patches_Level0/Patches_224/All/',\
num_workers = 64,\
mean = [0.485, 0.456, 0.406],\
std = [0.5, 0.5, 0.5],
color_aug_param = [0.2,0.2,0.1,0.03]):
#std = [0.229, 0.224, 0.225],
data_transforms = transforms.Compose([
#transforms.RandomResizedCrop(patch_size),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ColorJitter(brightness=color_aug_param[0],contrast=color_aug_param[1],\
hue=color_aug_param[2],saturation=color_aug_param[3]),
transforms.ToTensor(),
transforms.Normalize(mean=mean,std=std)
])
train_dataset = TestDataset(root_dir,transform=data_transforms)
dataset_loader = torch.utils.data.DataLoader(train_dataset,batch_size=batch_size, shuffle=True,num_workers=num_workers)
return dataset_loader | [
"os.listdir",
"PIL.Image.open",
"torchvision.transforms.RandomHorizontalFlip",
"os.path.join",
"torchvision.transforms.RandomVerticalFlip",
"torchvision.transforms.ColorJitter",
"numpy.array",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"torchvision.transforms.ToTensor"
] | [((2173, 2282), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': 'num_workers'}), '(train_dataset, batch_size=batch_size, shuffle=\n True, num_workers=num_workers)\n', (2200, 2282), False, 'import torch\n'), ((829, 854), 'os.listdir', 'os.listdir', (['self.root_dir'], {}), '(self.root_dir)\n', (839, 854), False, 'import os\n'), ((967, 1014), 'os.path.join', 'os.path.join', (['self.root_dir', 'self.filelist[idx]'], {}), '(self.root_dir, self.filelist[idx])\n', (979, 1014), False, 'import os\n'), ((1030, 1050), 'PIL.Image.open', 'Image.open', (['img_name'], {}), '(img_name)\n', (1040, 1050), False, 'from PIL import Image\n'), ((1743, 1776), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1774, 1776), False, 'from torchvision import datasets, models, transforms\n'), ((1786, 1817), 'torchvision.transforms.RandomVerticalFlip', 'transforms.RandomVerticalFlip', ([], {}), '()\n', (1815, 1817), False, 'from torchvision import datasets, models, transforms\n'), ((1827, 1969), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': 'color_aug_param[0]', 'contrast': 'color_aug_param[1]', 'hue': 'color_aug_param[2]', 'saturation': 'color_aug_param[3]'}), '(brightness=color_aug_param[0], contrast=\n color_aug_param[1], hue=color_aug_param[2], saturation=color_aug_param[3])\n', (1849, 1969), False, 'from torchvision import datasets, models, transforms\n'), ((2004, 2025), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2023, 2025), False, 'from torchvision import datasets, models, transforms\n'), ((2035, 2075), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (2055, 2075), False, 'from torchvision import datasets, models, transforms\n'), ((1084, 1099), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (1092, 1099), True, 'import numpy as np\n')] |
"""Code from https://github.com/tambetm/simple_dqn/blob/master/src/replay_memory.py"""
import os
import random
import logging
import numpy as np
from .utils import save_npy, load_npy
class ReplayMemory:
def __init__(self, config, model_dir):
self.model_dir = model_dir
self.cnn_format = config.cnn_format
self.memory_size = config.memory_size
self.actions = np.empty(self.memory_size, dtype = np.uint8)
self.rewards = np.empty(self.memory_size, dtype = np.integer)
self.screens = np.empty((self.memory_size, config.screen_height, config.screen_width), dtype = np.float16)
self.terminals = np.empty(self.memory_size, dtype = np.bool)
self.history_length = config.history_length
self.dims = (config.screen_height, config.screen_width)
self.batch_size = config.batch_size
self.count = 0
self.current = 0
self.sequnce_length = config.sequnce_length
# pre-allocate prestates and poststates for minibatch
self.prestates = np.empty((self.batch_size, self.history_length) + self.dims, dtype = np.float16)
self.poststates = np.empty((self.batch_size, self.history_length) + self.dims, dtype = np.float16)
#self.states = np.empty((self.batch_size, self.history_length) + self.dims, dtype = np.float16)
#self.states_plus_1 = np.empty((self.batch_size, self.history_length) + self.dims, dtype = np.float16)
#self.states_plus_2 = np.empty((self.batch_size, self.history_length) + self.dims, dtype = np.float16)
self.states = np.empty((self.sequnce_length, self.batch_size, self.history_length) + self.dims, dtype = np.float16)
def add(self, screen, reward, action, terminal):
assert screen.shape == self.dims
# NB! screen is post-state, after action and reward
self.actions[self.current] = action
self.rewards[self.current] = reward
self.screens[self.current, ...] = screen
self.terminals[self.current] = terminal
self.count = max(self.count, self.current + 1)
self.current = (self.current + 1) % self.memory_size
def getState(self, index):
assert self.count > 0, "replay memory is empy, use at least --random_steps 1"
# normalize index to expected range, allows negative indexes
index = index % self.count
# if is not in the beginning of matrix
if index >= self.history_length - 1:
# use faster slicing
return self.screens[(index - (self.history_length - 1)):(index + 1), ...]
else:
# otherwise normalize indexes and use slower list based access
indexes = [(index - i) % self.count for i in reversed(range(self.history_length))]
return self.screens[indexes, ...]
def sample_g(self):
# memory must include poststate, prestate and history
assert self.count > self.history_length
# sample random indexes
indexes = []
while len(indexes) < self.batch_size:
# find random index
while True:
# sample one index (ignore states wraping over
index = random.randint(self.history_length, self.count - 1)
# if wraps over current pointer, then get new one
if index >= self.current and index - self.history_length < self.current:
continue
# if wraps over episode end, then get new one
# NB! poststate (last screen) can be terminal state!
if self.terminals[(index - self.history_length):index].any():
continue
# otherwise use this index
break
# NB! having index first is fastest in C-order matrices
self.prestates[len(indexes), ...] = self.getState(index - 1)
self.poststates[len(indexes), ...] = self.getState(index)
indexes.append(index)
actions = self.actions[indexes]
rewards = self.rewards[indexes]
terminals = self.terminals[indexes]
if self.cnn_format == 'NHWC':
return np.transpose(self.prestates, (0, 2, 3, 1)), actions, \
rewards, np.transpose(self.poststates, (0, 2, 3, 1)), terminals
else:
return self.prestates, actions, rewards, self.poststates, terminals
def save(self):
for idx, (name, array) in enumerate(
zip(['actions', 'rewards', 'screens', 'terminals', 'states', 'states_plus_1', 'states_plus_2'],
[self.actions, self.rewards, self.screens, self.terminals, self.states, self.states_plus_1, self.states_plus_2])):
save_npy(array, os.path.join(self.model_dir, name))
def load(self):
for idx, (name, array) in enumerate(
zip(['actions', 'rewards', 'screens', 'terminals', 'states', 'states_plus_1', 'states_plus_2'],
[self.actions, self.rewards, self.screens, self.terminals, self.states, self.states_plus_1, self.states_plus_2])):
array = load_npy(os.path.join(self.model_dir, name))
def sample_f(self):
# memory must include poststate, prestate and history
assert self.count > self.history_length
# sample random indexes
indexes = []
indexes_plus_1 = []
while len(indexes) < self.batch_size:
# find random index
while True:
# sample one index (ignore states wraping over
index = random.randint(self.history_length, self.count - 1)
# if wraps over current pointer, then get new one
if index >= self.current and index - self.history_length < self.current:
continue
# if wraps over episode end, then get new one
# NB! poststate (last screen) can be terminal state!
if self.terminals[(index - self.history_length):index].any():
continue
# otherwise use this index
break
# NB! having index first is fastest in C-order matrices
self.states[len(indexes), ...] = self.getState(index - 2)
self.states_plus_1[len(indexes), ...] = self.getState(index - 1)
self.states_plus_2[len(indexes), ...] = self.getState(index)
indexes.append(index -1)
indexes_plus_1.append(index)
actions = self.actions[indexes]
rewards = self.rewards[indexes]
actions_plus_1 = self.actions[indexes_plus_1]
rewards_plus_1 = self.rewards[indexes_plus_1]
terminals = self.terminals[indexes]
if self.cnn_format == 'NHWC':
return np.transpose(self.states, (0, 2, 3, 1)), actions, rewards, np.transpose(self.states_plus_1, (0, 2, 3, 1)), actions_plus_1, \
rewards_plus_1, np.transpose(self.states_plus_2, (0, 2, 3, 1)), terminals
else:
return self.states, actions, rewards, self.states_plus_1, actions_plus_1, rewards_plus_1, self.states_plus_2, terminals
def isValidIndex(self, index):
if index >= self.current and index - self.history_length < self.current:
return False
if self.terminals[(index - self.history_length):index].any():
return False
return True
def sample(self):
# memory must include poststate, prestate and history
assert self.count > self.history_length
# sample random indexes
indexes = []
indexes_end = []
while len(indexes) < self.batch_size:
# find random index
while True:
# sample one index (ignore states wraping over
index = random.randint(self.history_length, self.count - 1)
for i in range(self.sequnce_length):
if not self.isValidIndex(index - i):
continue
break
for i in range(self.sequnce_length):
self.states[self.sequnce_length - i - 1, len(indexes), ...] = self.getState(index - i)
indexes.append([ index - i for i in range(self.sequnce_length - 1)[::-1]])
indexes_end.append(index)
actions = []
rewards = []
for each in zip(*indexes):
actions.append(self.actions[list(each)])
rewards.append(self.rewards[list(each)])
terminals = self.terminals[indexes_end]
#restates = []
#if self.cnn_format == 'NHWC':
# for i in range(self.sequnce_length):
# restates.append(np.transpose(self.states[i], (0, 2, 3, 1)))
#else :
# for i in range(self.sequnce_length):
# restates.append(self.states[i])
#return restates, actions, rewards, terminals
if self.cnn_format == 'NHWC':
return np.transpose(self.states, (0, 1, 3, 4, 2)), actions, rewards, terminals
else:
return self.states, actions, rewards, terminals
def sample_k(self):
# memory must include poststate, prestate and history
assert self.count > self.history_length
# sample random indexes
indexes = []
indexes_plus_1 = []
while len(indexes) < self.batch_size:
# find random index
while True:
# sample one index (ignore states wraping over
index = random.randint(self.history_length, self.count - 1)
# if wraps over current pointer, then get new one
if index >= self.current and index - self.history_length < self.current:
continue
# if wraps over episode end, then get new one
# NB! poststate (last screen) can be terminal state!
if self.terminals[(index - self.history_length):index].any():
continue
# otherwise use this index
break
# NB! having index first is fastest in C-order matrices
self.states[len(indexes), ...] = self.getState(index - 2)
self.states_plus_1[len(indexes), ...] = self.getState(index - 1)
self.states_plus_2[len(indexes), ...] = self.getState(index)
indexes.append(index -1)
indexes_plus_1.append(index)
actions = self.actions[indexes]
rewards = self.rewards[indexes]
actions_plus_1 = self.actions[indexes_plus_1]
rewards_plus_1 = self.rewards[indexes_plus_1]
terminals = self.terminals[indexes]
if self.cnn_format == 'NHWC':
return [np.transpose(self.states, (0, 2, 3, 1)),np.transpose(self.states_plus_1, (0, 2, 3, 1)),np.transpose(self.states_plus_2, (0, 2, 3, 1))], [actions,actions_plus_1], [rewards,rewards_plus_1], terminals
else:
return [self.states,self.states_plus_1,self.states_plus_2], [actions,actions_plus_1], [rewards,rewards_plus_1], terminals
| [
"os.path.join",
"numpy.transpose",
"numpy.empty",
"random.randint"
] | [((380, 422), 'numpy.empty', 'np.empty', (['self.memory_size'], {'dtype': 'np.uint8'}), '(self.memory_size, dtype=np.uint8)\n', (388, 422), True, 'import numpy as np\n'), ((444, 488), 'numpy.empty', 'np.empty', (['self.memory_size'], {'dtype': 'np.integer'}), '(self.memory_size, dtype=np.integer)\n', (452, 488), True, 'import numpy as np\n'), ((510, 603), 'numpy.empty', 'np.empty', (['(self.memory_size, config.screen_height, config.screen_width)'], {'dtype': 'np.float16'}), '((self.memory_size, config.screen_height, config.screen_width),\n dtype=np.float16)\n', (518, 603), True, 'import numpy as np\n'), ((623, 664), 'numpy.empty', 'np.empty', (['self.memory_size'], {'dtype': 'np.bool'}), '(self.memory_size, dtype=np.bool)\n', (631, 664), True, 'import numpy as np\n'), ((983, 1061), 'numpy.empty', 'np.empty', (['((self.batch_size, self.history_length) + self.dims)'], {'dtype': 'np.float16'}), '((self.batch_size, self.history_length) + self.dims, dtype=np.float16)\n', (991, 1061), True, 'import numpy as np\n'), ((1086, 1164), 'numpy.empty', 'np.empty', (['((self.batch_size, self.history_length) + self.dims)'], {'dtype': 'np.float16'}), '((self.batch_size, self.history_length) + self.dims, dtype=np.float16)\n', (1094, 1164), True, 'import numpy as np\n'), ((1509, 1613), 'numpy.empty', 'np.empty', (['((self.sequnce_length, self.batch_size, self.history_length) + self.dims)'], {'dtype': 'np.float16'}), '((self.sequnce_length, self.batch_size, self.history_length) + self\n .dims, dtype=np.float16)\n', (1517, 1613), True, 'import numpy as np\n'), ((2970, 3021), 'random.randint', 'random.randint', (['self.history_length', '(self.count - 1)'], {}), '(self.history_length, self.count - 1)\n', (2984, 3021), False, 'import random\n'), ((3822, 3864), 'numpy.transpose', 'np.transpose', (['self.prestates', '(0, 2, 3, 1)'], {}), '(self.prestates, (0, 2, 3, 1))\n', (3834, 3864), True, 'import numpy as np\n'), ((3894, 3937), 'numpy.transpose', 'np.transpose', (['self.poststates', '(0, 2, 3, 1)'], {}), '(self.poststates, (0, 2, 3, 1))\n', (3906, 3937), True, 'import numpy as np\n'), ((4346, 4380), 'os.path.join', 'os.path.join', (['self.model_dir', 'name'], {}), '(self.model_dir, name)\n', (4358, 4380), False, 'import os\n'), ((4696, 4730), 'os.path.join', 'os.path.join', (['self.model_dir', 'name'], {}), '(self.model_dir, name)\n', (4708, 4730), False, 'import os\n'), ((5091, 5142), 'random.randint', 'random.randint', (['self.history_length', '(self.count - 1)'], {}), '(self.history_length, self.count - 1)\n', (5105, 5142), False, 'import random\n'), ((6152, 6191), 'numpy.transpose', 'np.transpose', (['self.states', '(0, 2, 3, 1)'], {}), '(self.states, (0, 2, 3, 1))\n', (6164, 6191), True, 'import numpy as np\n'), ((6211, 6257), 'numpy.transpose', 'np.transpose', (['self.states_plus_1', '(0, 2, 3, 1)'], {}), '(self.states_plus_1, (0, 2, 3, 1))\n', (6223, 6257), True, 'import numpy as np\n'), ((6301, 6347), 'numpy.transpose', 'np.transpose', (['self.states_plus_2', '(0, 2, 3, 1)'], {}), '(self.states_plus_2, (0, 2, 3, 1))\n', (6313, 6347), True, 'import numpy as np\n'), ((7084, 7135), 'random.randint', 'random.randint', (['self.history_length', '(self.count - 1)'], {}), '(self.history_length, self.count - 1)\n', (7098, 7135), False, 'import random\n'), ((8091, 8133), 'numpy.transpose', 'np.transpose', (['self.states', '(0, 1, 3, 4, 2)'], {}), '(self.states, (0, 1, 3, 4, 2))\n', (8103, 8133), True, 'import numpy as np\n'), ((8582, 8633), 'random.randint', 'random.randint', (['self.history_length', '(self.count - 1)'], {}), '(self.history_length, self.count - 1)\n', (8596, 8633), False, 'import random\n'), ((9644, 9683), 'numpy.transpose', 'np.transpose', (['self.states', '(0, 2, 3, 1)'], {}), '(self.states, (0, 2, 3, 1))\n', (9656, 9683), True, 'import numpy as np\n'), ((9684, 9730), 'numpy.transpose', 'np.transpose', (['self.states_plus_1', '(0, 2, 3, 1)'], {}), '(self.states_plus_1, (0, 2, 3, 1))\n', (9696, 9730), True, 'import numpy as np\n'), ((9731, 9777), 'numpy.transpose', 'np.transpose', (['self.states_plus_2', '(0, 2, 3, 1)'], {}), '(self.states_plus_2, (0, 2, 3, 1))\n', (9743, 9777), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""svhn.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1cr_mqeEfw7how-r9MAqqZqJqyepX31yS
"""
import numpy as np
import matplotlib.pyplot as plt
#import seaborn as sns
import h5py
#import tensorflow as tf
#import os
#import time
#import math
#from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
#from datetime import timedelta
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation
from keras.layers.normalization import BatchNormalization
#from keras.preprocessing import image
from keras.optimizers import adam
from tensorflow import keras
plt.rcParams['figure.figsize'] = (16.0, 4.0)
import urllib.request
from scipy.io import loadmat
from sklearn.preprocessing import OneHotEncoder
# Dataset loading
def load_data(path):
""" Helper function for loading a MAT-File"""
data = loadmat(path)
return data['X'], data['y']
def balanced_subsample(y, s):
"""Return a balanced subsample of the population"""
sample = []
# For every label in the dataset
for label in np.unique(y):
# Get the index of all images with a specific label
images = np.where(y==label)[0]
# Draw a random sample from the images
random_sample = np.random.choice(images, size=s, replace=False)
# Add the random sample to our subsample list
sample += random_sample.tolist()
return sample
def rgb2gray(images):
"""Convert images from rbg to grayscale
"""
return np.expand_dims(np.dot(images, [0.2989, 0.5870, 0.1140]), axis=3)
def downloadset():
urllib.request.urlretrieve("http://ufldl.stanford.edu/housenumbers/train_32x32.mat", "train_32x32.mat")
urllib.request.urlretrieve("http://ufldl.stanford.edu/housenumbers/test_32x32.mat", "test_32x32.mat")
def preprocessing():
# Downloading
downloadset()
#urllib.request.urlretrieve("http://ufldl.stanford.edu/housenumbers/train_32x32.mat", "data/train_32x32.mat")
#urllib.request.urlretrieve("http://ufldl.stanford.edu/housenumbers/test_32x32.mat", "data/test_32x32.mat")
#Loading
X_train, y_train = load_data('train_32x32.mat')
X_test, y_test = load_data('test_32x32.mat')
# Transpose the image arrays
X_train, y_train = X_train.transpose((3,0,1,2)), y_train[:,0]
X_test, y_test = X_test.transpose((3,0,1,2)), y_test[:,0]
# Change labels
y_train[y_train == 10] = 0
y_test[y_test == 10] = 0
train_samples = balanced_subsample(y_train, 600)
X_val, y_val = np.copy(X_train[train_samples]), np.copy(y_train[train_samples])
# Remove the samples to avoid duplicates
X_train = np.delete(X_train, train_samples, axis=0)
y_train = np.delete(y_train, train_samples, axis=0)
X_test, y_test = X_test, y_test
# Assert that we did not remove or add any duplicates
# assert(num_images == X_train.shape[0] + X_test.shape[0] + X_val.shape[0])
# Transform the images to greyscale
train_greyscale = rgb2gray(X_train).astype(np.float32)
test_greyscale = rgb2gray(X_test).astype(np.float32)
valid_greyscale = rgb2gray(X_val).astype(np.float32)
# Fit the OneHotEncoder
enc = OneHotEncoder().fit(y_train.reshape(-1, 1))
# Transform the label values to a one-hot-encoding scheme
y_train = enc.transform(y_train.reshape(-1, 1)).toarray()
y_test = enc.transform(y_test.reshape(-1, 1)).toarray()
y_val = enc.transform(y_val.reshape(-1, 1)).toarray()
# Create file
h5f = h5py.File('SVHN_single_grey.h5', 'w')
# Store the datasets
h5f.create_dataset('X_train', data=train_greyscale)
h5f.create_dataset('y_train', data=y_train)
h5f.create_dataset('X_test', data=test_greyscale)
h5f.create_dataset('y_test', data=y_test)
h5f.create_dataset('X_val', data=valid_greyscale)
h5f.create_dataset('y_val', data=y_val)
# Close the file
h5f.close()
def build_model(input_shape=(32, 32, 1)):
model = Sequential()
model.add(Conv2D(32, kernel_size=3, input_shape=input_shape, padding="same"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32, 3, padding="same"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.3))
model.add(Conv2D(64, 3))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(64, 3, padding="same"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.3))
model.add(Conv2D(128, 3))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(128, 3, padding="same"))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=2))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(10, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=adam(lr=0.001, decay=1e-6),
metrics=['accuracy'])
return model
def train_model(x_train, y_train, x_val, y_val):
early_stopping = keras.callbacks.EarlyStopping(monitor='val_acc', min_delta=0, patience=5, verbose=0, mode='max')
model = build_model()
model.fit(x_train, y_train,
batch_size=64,
epochs=50,
verbose=1,
validation_data=(x_val, y_val),
callbacks=[early_stopping])
return model
def traintest():
preprocessing()
# Open the file as readonly
h5f = h5py.File('SVHN_single_grey.h5', 'r')
# Load the training, test and validation set
X_train = h5f['X_train'][:]
y_train = h5f['y_train'][:]
X_test = h5f['X_test'][:]
y_test = h5f['y_test'][:]
X_val = h5f['X_val'][:]
y_val = h5f['y_val'][:]
# Close this file
h5f.close()
cnnmodel = train_model(X_train, y_train, X_val, y_val)
score = cnnmodel.evaluate(X_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
y_ = np.array([1,2,3,4,5,6,7,8,9,0])
enc = OneHotEncoder().fit(y_.reshape(-1, 1))
y_pred = cnnmodel.predict_classes(X_test, batch_size=32, verbose=1)
y_pred = enc.transform(y_pred.reshape(-1, 1)).toarray()
f1 = f1_score(y_test, y_pred, average = None) # return an array of F1_score for each layer
output_string = ""
for i in range(0, len(f1)):
output_string += "F1 score(target = {}): {}".format(i, f1[i]) # Print each class respectively
output_string += '\n'
#print ('f1score:', f1)
cnnmodel.save("svhn.h5")
return output_string
from keras.models import load_model
from PIL import Image
def test(file):
# Model loading
svhn_model = load_model('svhn.h5')
# svhn_model.summary()
graph = Image.open(file)
grapharr = np.array(graph)
try:
grapharr.shape == (32,32,3)
graph_grey = rgb2gray(grapharr).astype(np.float32)
graph_grey = graph_grey.reshape(1,32,32,1) # Reshape to feed to cnnmodel
graph_pred = svhn_model.predict_classes(graph_grey, batch_size = 32, verbose = 1)
return graph_pred[0]
except:
print ("Image size wrong, should be 32by32 pixels image")
return None
#traintest()
#result1 = test("mytestimage1.png")
#print(result1) | [
"keras.layers.Conv2D",
"scipy.io.loadmat",
"tensorflow.keras.callbacks.EarlyStopping",
"numpy.array",
"keras.layers.Activation",
"keras.layers.Dense",
"numpy.where",
"numpy.delete",
"numpy.dot",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"keras.layers.normalization.BatchNormalization... | [((984, 997), 'scipy.io.loadmat', 'loadmat', (['path'], {}), '(path)\n', (991, 997), False, 'from scipy.io import loadmat\n'), ((1187, 1199), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (1196, 1199), True, 'import numpy as np\n'), ((2780, 2821), 'numpy.delete', 'np.delete', (['X_train', 'train_samples'], {'axis': '(0)'}), '(X_train, train_samples, axis=0)\n', (2789, 2821), True, 'import numpy as np\n'), ((2836, 2877), 'numpy.delete', 'np.delete', (['y_train', 'train_samples'], {'axis': '(0)'}), '(y_train, train_samples, axis=0)\n', (2845, 2877), True, 'import numpy as np\n'), ((3649, 3686), 'h5py.File', 'h5py.File', (['"""SVHN_single_grey.h5"""', '"""w"""'], {}), "('SVHN_single_grey.h5', 'w')\n", (3658, 3686), False, 'import h5py\n'), ((4108, 4120), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4118, 4120), False, 'from keras.models import Sequential\n'), ((5422, 5522), 'tensorflow.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_acc"""', 'min_delta': '(0)', 'patience': '(5)', 'verbose': '(0)', 'mode': '"""max"""'}), "(monitor='val_acc', min_delta=0, patience=5,\n verbose=0, mode='max')\n", (5451, 5522), False, 'from tensorflow import keras\n'), ((5826, 5863), 'h5py.File', 'h5py.File', (['"""SVHN_single_grey.h5"""', '"""r"""'], {}), "('SVHN_single_grey.h5', 'r')\n", (5835, 5863), False, 'import h5py\n'), ((6340, 6380), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 0]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])\n', (6348, 6380), True, 'import numpy as np\n'), ((6567, 6605), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'y_pred'], {'average': 'None'}), '(y_test, y_pred, average=None)\n', (6575, 6605), False, 'from sklearn.metrics import f1_score\n'), ((7040, 7061), 'keras.models.load_model', 'load_model', (['"""svhn.h5"""'], {}), "('svhn.h5')\n", (7050, 7061), False, 'from keras.models import load_model\n'), ((7105, 7121), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (7115, 7121), False, 'from PIL import Image\n'), ((7137, 7152), 'numpy.array', 'np.array', (['graph'], {}), '(graph)\n', (7145, 7152), True, 'import numpy as np\n'), ((1371, 1418), 'numpy.random.choice', 'np.random.choice', (['images'], {'size': 's', 'replace': '(False)'}), '(images, size=s, replace=False)\n', (1387, 1418), True, 'import numpy as np\n'), ((1633, 1671), 'numpy.dot', 'np.dot', (['images', '[0.2989, 0.587, 0.114]'], {}), '(images, [0.2989, 0.587, 0.114])\n', (1639, 1671), True, 'import numpy as np\n'), ((2655, 2686), 'numpy.copy', 'np.copy', (['X_train[train_samples]'], {}), '(X_train[train_samples])\n', (2662, 2686), True, 'import numpy as np\n'), ((2688, 2719), 'numpy.copy', 'np.copy', (['y_train[train_samples]'], {}), '(y_train[train_samples])\n', (2695, 2719), True, 'import numpy as np\n'), ((4135, 4201), 'keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3)', 'input_shape': 'input_shape', 'padding': '"""same"""'}), "(32, kernel_size=3, input_shape=input_shape, padding='same')\n", (4141, 4201), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((4217, 4237), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4235, 4237), False, 'from keras.layers.normalization import BatchNormalization\n'), ((4253, 4271), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4263, 4271), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((4287, 4316), 'keras.layers.Conv2D', 'Conv2D', (['(32)', '(3)'], {'padding': '"""same"""'}), "(32, 3, padding='same')\n", (4293, 4316), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((4332, 4352), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4350, 4352), False, 'from keras.layers.normalization import BatchNormalization\n'), ((4368, 4386), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4378, 4386), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((4402, 4427), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (4414, 4427), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((4443, 4455), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (4450, 4455), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((4472, 4485), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {}), '(64, 3)\n', (4478, 4485), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((4501, 4521), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4519, 4521), False, 'from keras.layers.normalization import BatchNormalization\n'), ((4537, 4555), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4547, 4555), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((4571, 4600), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'padding': '"""same"""'}), "(64, 3, padding='same')\n", (4577, 4600), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((4616, 4636), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4634, 4636), False, 'from keras.layers.normalization import BatchNormalization\n'), ((4652, 4670), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4662, 4670), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((4686, 4711), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (4698, 4711), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((4727, 4739), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (4734, 4739), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((4756, 4770), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(3)'], {}), '(128, 3)\n', (4762, 4770), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((4786, 4806), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4804, 4806), False, 'from keras.layers.normalization import BatchNormalization\n'), ((4822, 4840), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4832, 4840), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((4856, 4886), 'keras.layers.Conv2D', 'Conv2D', (['(128)', '(3)'], {'padding': '"""same"""'}), "(128, 3, padding='same')\n", (4862, 4886), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((4902, 4922), 'keras.layers.normalization.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (4920, 4922), False, 'from keras.layers.normalization import BatchNormalization\n'), ((4938, 4956), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4948, 4956), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((4972, 4997), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2)'}), '(pool_size=2)\n', (4984, 4997), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((5013, 5025), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (5020, 5025), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((5042, 5051), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5049, 5051), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((5067, 5096), 'keras.layers.Dense', 'Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (5072, 5096), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((5112, 5124), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (5119, 5124), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((5141, 5172), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""softmax"""'}), "(10, activation='softmax')\n", (5146, 5172), False, 'from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, Activation\n'), ((1278, 1298), 'numpy.where', 'np.where', (['(y == label)'], {}), '(y == label)\n', (1286, 1298), True, 'import numpy as np\n'), ((3323, 3338), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (3336, 3338), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((5265, 5292), 'keras.optimizers.adam', 'adam', ([], {'lr': '(0.001)', 'decay': '(1e-06)'}), '(lr=0.001, decay=1e-06)\n', (5269, 5292), False, 'from keras.optimizers import adam\n'), ((6382, 6397), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (6395, 6397), False, 'from sklearn.preprocessing import OneHotEncoder\n')] |
import gym
import numpy as np
from gym import spaces
class NormalizedActionWrapper(gym.ActionWrapper):
"""Environment wrapper to normalize the action space to [-scale, scale]
Args:
env (gym.env): OpenAI Gym environment to wrap around
scale (float): Scale for normalizing action. Default: 1.0.
References:
https://github.com/tristandeleu/pytorch-maml-rl
"""
def __init__(self, env, scale=1.0):
super(NormalizedActionWrapper, self).__init__(env)
self.scale = scale
self.action_space = spaces.Box(low=-scale, high=scale, shape=self.env.action_space.shape)
def action(self, action):
# Clip the action in [-scale, scale]
action = np.clip(action, -self.scale, self.scale)
# Map normalized action to original action space
lb, ub = self.env.action_space.low, self.env.action_space.high
if np.all(np.isfinite(lb)) and np.all(np.isfinite(ub)):
action = lb + (action + self.scale) * (ub - lb) / (2 * self.scale)
action = np.clip(action, lb, ub)
else:
raise ValueError("Invalid value in action space")
return action
| [
"numpy.clip",
"numpy.isfinite",
"gym.spaces.Box"
] | [((556, 625), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-scale)', 'high': 'scale', 'shape': 'self.env.action_space.shape'}), '(low=-scale, high=scale, shape=self.env.action_space.shape)\n', (566, 625), False, 'from gym import spaces\n'), ((719, 759), 'numpy.clip', 'np.clip', (['action', '(-self.scale)', 'self.scale'], {}), '(action, -self.scale, self.scale)\n', (726, 759), True, 'import numpy as np\n'), ((1053, 1076), 'numpy.clip', 'np.clip', (['action', 'lb', 'ub'], {}), '(action, lb, ub)\n', (1060, 1076), True, 'import numpy as np\n'), ((907, 922), 'numpy.isfinite', 'np.isfinite', (['lb'], {}), '(lb)\n', (918, 922), True, 'import numpy as np\n'), ((935, 950), 'numpy.isfinite', 'np.isfinite', (['ub'], {}), '(ub)\n', (946, 950), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import scipy.optimize
import numdifftools as nd
from pyswarm import pso
import time
state_map_dict = {0:'KY', 1:'OH', 2:'PA', 3:'VA', 4:'WV'}
time_map_dict = {0:2010, 1:2011, 2:2012, 3:2013, 4:2014, 5:2015, 6:2016, 7:2017}
full2abbrev_dict = {'Kentucky':'KY', 'Ohio':'OH', 'Pennsylvania':'PA', 'Virginia':'VA', 'West Virginia':'WV'}
I_df = pd.read_csv('MCM_NFLIS_Data.csv')
I_df = I_df.groupby(['State', 'YYYY'])['TotalDrugReportsState'].mean()
population_df = pd.read_csv('ACS_10_5YR_DP02_with_ann.csv')
population_df = population_df.iloc[1:]
population_df['HC01_VC128'] = population_df['HC01_VC128'].apply(lambda x:int(x))
population_df['State'] = population_df['GEO.display-label'].apply(lambda x:full2abbrev_dict[x.split(', ')[1]])
population_df = population_df.groupby(['State'])['HC01_VC128'].sum()
size = 5
max_time = 8
initial_state = I_df[I_df.index.map(lambda x:x[1])==2010]
'''
gamma = np.random.rand(size)
beta = np.random.rand()
A = np.random.rand(size, size)
'''
arg_sizes = [size*size, size, 1]
total_size = sum(arg_sizes)
args = np.random.rand(total_size)
bounds = []
lb = []
ub = []
bias = 0
for i in range(0, arg_sizes[0]):
lb.append(-0.5)
ub.append(0.5)
bounds.append((-0.5, 0.5))
bias += arg_sizes[0]
for i in range(bias, bias+arg_sizes[1]):
lb.append(0)
ub.append(1)
bounds.append((0, 1))
bias += arg_sizes[1]
for i in range(bias, bias+arg_sizes[2]):
lb.append(0.1)
ub.append(100)
bounds.append((0.1, 100))
def get_beta(args):
bias = arg_sizes[0] + arg_sizes[1]
return args[bias]
def get_gamma(args):
bias = arg_sizes[0]
return args[bias+0: bias+size]
get_A = lambda args, i, j: args[size*i+j]
I_results = {}
R_results = {}
S_results = {}
summed_results = {}
def I(i, t, args):
if (i, t) in I_results:
return I_results[(i, t)]
if t == 0:
state_name = state_map_dict[i]
result = (get_beta(args)*10) *initial_state[state_name].values[0]
else:
result = I(i, t-1, args) + R(i, t-1, args) - R(i, t, args) + S(i, t-1, args) -S(i, t, args)
I_results[(i, t)] = result
return result
def R(i, t, args):
if (i, t) in R_results:
return R_results[(i, t)]
if t == 0:
result = 0
else:
result = get_gamma(args)[i]*I(i, t-1, args) + R(i, t-1, args)
R_results[(i, t)] = result
return result
def S(i, t, args):
if (i, t) in S_results:
return S_results[(i, t)]
if t == 0:
result = fastN(i) - I(i, t, args)
else:
result = -summed(i, t-1, args)*S(i, t-1, args) + S(i, t-1, args)
S_results[(i, t)] = result
return result
def summed(i, t, args):
if (i, t) in summed_results:
return summed_results[(i, t)]
result = 0
for j in range(0, size):
result += get_A(args, i, j)*I(j, t, args)/fastN(j)
summed_results[(i, t)] = result
return result
fastN = lambda i:population_df.values[i]
def N(i):
state_name = state_map_dict[i]
return population_df[state_name]
fastI_bar = lambda it:I_df.iloc[it[0]*I_df.strides[0] + it[1]]
def I_bar(i, t):
return I_df[state_map_dict[i], time_map_dict[t]]
def dict_clear():
I_results.clear()
R_results.clear()
S_results.clear()
summed_results.clear()
def f(args):
result = 0
for i in range(0, size):
for t in range(0, max_time):
result += (I(i, t, args)-fastI_bar((i, t))) **2
result = result / (size*max_time)
dict_clear()
return result
'''
while True:
start = time.time()
print(f(args))
args = np.random.rand(total_size)
print(time.time()-start)
'''
xopt, fopt = pso(f, lb, ub, maxiter=1000)
#scipy.optimize.differential_evolution(f, bounds, recombination=1, disp=True)
#scipy.optimize.minimize(f, x0=args, method='trust-ncg', jac=np.gradient, hess=lambda x: nd.Hessian(f)(x), options={'disp':True})
#scipy.optimize.minimize(f, x0=args, options={'disp':True})
print('!')
| [
"pyswarm.pso",
"numpy.random.rand",
"pandas.read_csv"
] | [((382, 415), 'pandas.read_csv', 'pd.read_csv', (['"""MCM_NFLIS_Data.csv"""'], {}), "('MCM_NFLIS_Data.csv')\n", (393, 415), True, 'import pandas as pd\n'), ((504, 547), 'pandas.read_csv', 'pd.read_csv', (['"""ACS_10_5YR_DP02_with_ann.csv"""'], {}), "('ACS_10_5YR_DP02_with_ann.csv')\n", (515, 547), True, 'import pandas as pd\n'), ((1089, 1115), 'numpy.random.rand', 'np.random.rand', (['total_size'], {}), '(total_size)\n', (1103, 1115), True, 'import numpy as np\n'), ((3652, 3680), 'pyswarm.pso', 'pso', (['f', 'lb', 'ub'], {'maxiter': '(1000)'}), '(f, lb, ub, maxiter=1000)\n', (3655, 3680), False, 'from pyswarm import pso\n')] |
from hdmf.common import CSRMatrix
from hdmf.testing import TestCase, H5RoundTripMixin
import scipy.sparse as sps
import numpy as np
class TestCSRMatrix(TestCase):
def test_from_sparse_matrix(self):
data = np.array([1, 2, 3, 4, 5, 6])
indices = np.array([0, 2, 2, 0, 1, 2])
indptr = np.array([0, 2, 3, 6])
expected = CSRMatrix(data, indices, indptr, (3, 3))
sps_mat = sps.csr_matrix((data, indices, indptr), shape=(3, 3))
received = CSRMatrix(sps_mat)
self.assertContainerEqual(received, expected, ignore_hdmf_attrs=True)
def test_to_spmat(self):
data = np.array([1, 2, 3, 4, 5, 6])
indices = np.array([0, 2, 2, 0, 1, 2])
indptr = np.array([0, 2, 3, 6])
csr_mat = CSRMatrix(data, indices, indptr, (3, 3))
spmat_array = csr_mat.to_spmat().toarray()
expected = np.asarray([[1, 0, 2], [0, 0, 3], [4, 5, 6]])
np.testing.assert_array_equal(spmat_array, expected)
# TODO more unit tests are needed for CSRMatrix
class TestCSRMatrixRoundTrip(H5RoundTripMixin, TestCase):
def setUpContainer(self):
data = np.array([1, 2, 3, 4, 5, 6])
indices = np.array([0, 2, 2, 0, 1, 2])
indptr = np.array([0, 2, 3, 6])
return CSRMatrix(data, indices, indptr, (3, 3))
| [
"numpy.asarray",
"hdmf.common.CSRMatrix",
"numpy.array",
"scipy.sparse.csr_matrix",
"numpy.testing.assert_array_equal"
] | [((221, 249), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (229, 249), True, 'import numpy as np\n'), ((268, 296), 'numpy.array', 'np.array', (['[0, 2, 2, 0, 1, 2]'], {}), '([0, 2, 2, 0, 1, 2])\n', (276, 296), True, 'import numpy as np\n'), ((314, 336), 'numpy.array', 'np.array', (['[0, 2, 3, 6]'], {}), '([0, 2, 3, 6])\n', (322, 336), True, 'import numpy as np\n'), ((356, 396), 'hdmf.common.CSRMatrix', 'CSRMatrix', (['data', 'indices', 'indptr', '(3, 3)'], {}), '(data, indices, indptr, (3, 3))\n', (365, 396), False, 'from hdmf.common import CSRMatrix\n'), ((416, 469), 'scipy.sparse.csr_matrix', 'sps.csr_matrix', (['(data, indices, indptr)'], {'shape': '(3, 3)'}), '((data, indices, indptr), shape=(3, 3))\n', (430, 469), True, 'import scipy.sparse as sps\n'), ((489, 507), 'hdmf.common.CSRMatrix', 'CSRMatrix', (['sps_mat'], {}), '(sps_mat)\n', (498, 507), False, 'from hdmf.common import CSRMatrix\n'), ((631, 659), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (639, 659), True, 'import numpy as np\n'), ((678, 706), 'numpy.array', 'np.array', (['[0, 2, 2, 0, 1, 2]'], {}), '([0, 2, 2, 0, 1, 2])\n', (686, 706), True, 'import numpy as np\n'), ((724, 746), 'numpy.array', 'np.array', (['[0, 2, 3, 6]'], {}), '([0, 2, 3, 6])\n', (732, 746), True, 'import numpy as np\n'), ((765, 805), 'hdmf.common.CSRMatrix', 'CSRMatrix', (['data', 'indices', 'indptr', '(3, 3)'], {}), '(data, indices, indptr, (3, 3))\n', (774, 805), False, 'from hdmf.common import CSRMatrix\n'), ((877, 922), 'numpy.asarray', 'np.asarray', (['[[1, 0, 2], [0, 0, 3], [4, 5, 6]]'], {}), '([[1, 0, 2], [0, 0, 3], [4, 5, 6]])\n', (887, 922), True, 'import numpy as np\n'), ((931, 983), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['spmat_array', 'expected'], {}), '(spmat_array, expected)\n', (960, 983), True, 'import numpy as np\n'), ((1143, 1171), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (1151, 1171), True, 'import numpy as np\n'), ((1190, 1218), 'numpy.array', 'np.array', (['[0, 2, 2, 0, 1, 2]'], {}), '([0, 2, 2, 0, 1, 2])\n', (1198, 1218), True, 'import numpy as np\n'), ((1236, 1258), 'numpy.array', 'np.array', (['[0, 2, 3, 6]'], {}), '([0, 2, 3, 6])\n', (1244, 1258), True, 'import numpy as np\n'), ((1274, 1314), 'hdmf.common.CSRMatrix', 'CSRMatrix', (['data', 'indices', 'indptr', '(3, 3)'], {}), '(data, indices, indptr, (3, 3))\n', (1283, 1314), False, 'from hdmf.common import CSRMatrix\n')] |
import pandas as pd
import numpy as np
import requests
# finance-datareader installed
# now : cvxopt version 1.2.6
# python interpreter : 3.9.6
import FinanceDataReader as fdr
# print(fdr.__version__) # 0.9.31
# 한국거래소 krx 불러오기
df_krx = fdr.StockListing('KRX')
# print(df_krx) # [6813 rows x 10 columns]
# 데이터 파악
# df_krx.info()
# df_krx.isnull().sum() # 이거 안됨.
# 결측치 제거
df_krx_dropna = df_krx.dropna()
# df_krx_dropna.info() # 0 Symbol 2256 non-null object
# 종목코드 가져오기
assets = df_krx_dropna['Symbol']
# print(assets) # 가능
# 가져온 종목 코드 array 안에 담기
assets = np.array(assets)
# print(len(assets)) # 2256
# ---------------------------------------
# 종목별 종가 가져오기
from datetime import datetime
# 주식 시작일은 2013년 1월 1일이고 종료일은 현재 날짜 (오늘)로 설정
#Get the stock starting date
start_date = '2013-01-01'
# today = datetime.today().strftime('%Y-%m-%d')
end_date = '2021-07-16' # datetime.today().strftime('%Y-%m-%d')
# 각 주식의 일별 종가 데이터를 저장할 데이터 프레임을 생성
#Create a dataframe to store the adjusted close price of the stocks
df = pd.DataFrame()
# FinanceDataReader로 각 종목의 종가데이터 불러오기
for stock in assets:
df[stock] = fdr.DataReader(stock, start_date, end_date)['Close']
# print(df) # [2102 rows x 2256 columns] 시간 오지게 오래걸림. 최소 5분은 걸린듯. 밑에 같은 warning 을 줌. 버그는 아님. 워닝을 보기 싫으면 pandas를 downgrade
# PerformanceWarning: DataFrame is highly fragmented. This is usually the result of calling `frame.insert` many times, which has poor performance.
# Consider using pd.concat instead. To get a de-fragmented frame, use `newframe = frame.copy()`
# df[stock] = fdr.DataReader(stock, start_date, end_date)['Close']
# DataFrame을 csv 파일로 저장하기 ( 결측값 제거하지 않음 )
# df.to_csv("krx_code_close.csv", index=True)
# 칼럼명을 회사이름으로 변경
df.columns = df_krx_dropna['Name'].values
# 결측값 있는 열 삭제 ( 종목 2256 -> 1476으로 줄어 듦 )
df2 = df.dropna(axis = 1)
# print(df2)
# 결측값을 가진 열을 제거한 DataFrame을 csv 파일로 저장하기
# df2.to_csv("krx_name_close_drop_columns.csv", index=True)
# Get the assets / tickers
assets = df2.columns
print(len(assets)) # 1476
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
# Calculate the expected annualized returns and the annualized sample covariance matrix of the daily asset returns
mu = expected_returns.mean_historical_return(df2)
S = risk_models.sample_cov(df2)
# Optimize for the maximal Sharpe ratio
# 💛데이터셋이 너무 많으면, ef.max_sharpe()에서 에러남 -> solver를 SCS로 바꿔줌
# Rober says: 100개 이하로 종목을 추린 후에 실행시키기를 추천함 !
ef = EfficientFrontier(mu, S, solver="SCS") # Create the Efficient Frontier Object
# Maximize the Sharpe ratio, and get the raw weights
weights = ef.max_sharpe()
cleaned_weights = ef.clean_weights()
print(cleaned_weights)
ef.portfolio_performance(verbose=True)
# Get the discrete allocation of each sharpe per stock
from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices
# 투자금액 (단위: KRW)
portfolio_val = 5000000
latest_prices = get_latest_prices(df2)
weights = cleaned_weights
da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=portfolio_val)
allocation, leftover = da.lp_portfolio()
print('Discrete Allocaion: ', allocation) # 종목당 몇주 살지 추천 결과 : 38개
print('Funds Remaining: ', leftover, ' KRW')
# 포트폴리오에 포함된 종목을 리스트로 만들기 (회사 이름만 리스트에 담김)
company_name = list(allocation)
# Get the discrete allocation values (리스트안에 담긴 숫자들만 나열)
discrete_allocation_list = []
for symbol in allocation:
discrete_allocation_list.append(allocation.get(symbol))
# Create a dataframe for the portfolio
portfolio_df = pd.DataFrame(columns=['Company_name', 'company_Ticker', 'Discrete_val_' + str(portfolio_val)])
# 결과: Company_name company_Ticker Discrete_val_5000000
portfolio_df['Company_name'] = company_name
portfolio_df['company_Ticker'] = allocation # 원래 종목 코드여야 하는데 앞에서 컬럼 수정을 해버려서 그런것임.
portfolio_df['Discrete_val_'+str(portfolio_val)] = discrete_allocation_list
# print(portfolio_df)
# Show Funds Remaining
print('Funds Remaining: ', leftover, ' KRW')
# Show Portfolio performance
print(ef.portfolio_performance(verbose=True))
# 총 3-5분정도 걸린듯
# 산업별 코드를 돌려봐야 한다. 그 코드는 만들어야 할듯.. ? | [
"pypfopt.discrete_allocation.DiscreteAllocation",
"FinanceDataReader.DataReader",
"pypfopt.risk_models.sample_cov",
"pypfopt.discrete_allocation.get_latest_prices",
"numpy.array",
"pypfopt.expected_returns.mean_historical_return",
"pypfopt.efficient_frontier.EfficientFrontier",
"pandas.DataFrame",
"... | [((239, 262), 'FinanceDataReader.StockListing', 'fdr.StockListing', (['"""KRX"""'], {}), "('KRX')\n", (255, 262), True, 'import FinanceDataReader as fdr\n'), ((577, 593), 'numpy.array', 'np.array', (['assets'], {}), '(assets)\n', (585, 593), True, 'import numpy as np\n'), ((1029, 1043), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1041, 1043), True, 'import pandas as pd\n'), ((2264, 2308), 'pypfopt.expected_returns.mean_historical_return', 'expected_returns.mean_historical_return', (['df2'], {}), '(df2)\n', (2303, 2308), False, 'from pypfopt import expected_returns\n'), ((2313, 2340), 'pypfopt.risk_models.sample_cov', 'risk_models.sample_cov', (['df2'], {}), '(df2)\n', (2335, 2340), False, 'from pypfopt import risk_models\n'), ((2492, 2530), 'pypfopt.efficient_frontier.EfficientFrontier', 'EfficientFrontier', (['mu', 'S'], {'solver': '"""SCS"""'}), "(mu, S, solver='SCS')\n", (2509, 2530), False, 'from pypfopt.efficient_frontier import EfficientFrontier\n'), ((2943, 2965), 'pypfopt.discrete_allocation.get_latest_prices', 'get_latest_prices', (['df2'], {}), '(df2)\n', (2960, 2965), False, 'from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices\n'), ((2999, 3078), 'pypfopt.discrete_allocation.DiscreteAllocation', 'DiscreteAllocation', (['weights', 'latest_prices'], {'total_portfolio_value': 'portfolio_val'}), '(weights, latest_prices, total_portfolio_value=portfolio_val)\n', (3017, 3078), False, 'from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices\n'), ((1118, 1161), 'FinanceDataReader.DataReader', 'fdr.DataReader', (['stock', 'start_date', 'end_date'], {}), '(stock, start_date, end_date)\n', (1132, 1161), True, 'import FinanceDataReader as fdr\n')] |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.13.0
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Fleet Clustering
#
# ### <NAME>, 2019-01-16
#
# ## Longliner Edition
#
# We cluster vessel using HDBSCAN and a custom metric to derive fleets
# that are related in the sense that they spend a lot of time in the same
# location while at sea.
#
# ## See Also
#
# * Other notebooks in https://github.com/GlobalFishingWatch/fleet-clustering for
# examples of clustering Squid Jiggers, etc.
# * This workspace that Nate put together: https://globalfishingwatch.org/map/workspace/udw-v2-85ff8c4f-fbfe-4126-b067-4d94cdd2b737
from __future__ import print_function
from __future__ import division
from collections import Counter, OrderedDict
import datetime as dt
import hdbscan
import logging
import matplotlib.pyplot as plt
import matplotlib.animation as mpl_animation
import numpy as np
import pandas as pd
from skimage import color
from IPython.display import HTML
from fleet_clustering import bq
from fleet_clustering import filters
from fleet_clustering import distances
from fleet_clustering import animation
# ## Load AIS Clustering Data
#
# Load the AIS data that we use for clustering. Note that it onlyu includes vessels away
# from shores so as to exclude clustering on ports
all_by_date = bq.load_ais_by_date('drifting_longlines', dt.date(2016, 1, 1),
dt.date(2018, 12, 31),
fishing_only=False, min_km_from_shore=0)
pruned_by_date = {k : filters.remove_near_shore(10,
filters.remove_chinese_coast(v)) for (k, v) in all_by_date.items()}
# valid_ssvid = sorted(filters.find_valid_ssvid(pruned_by_date))
# ## Create Distance Metrics
#
# Create an array of distance metrics. The details are still evolving, but in general
# we want to deal with two things. Days on which a boat is missing and days where the
# boat is away from the fleet.
#
# * Distances to/from a boat on days when it is missing are represented by $\infty$ in
# the distance matrix. HDBSCAN ignores these values.
# * Only the closest N days are kept for each boat pair, allowing boats to leave the fleet
# for up to half the year without penalty.
#
# In addition, distances have a floor of 1 km to prevent overclustering when boats tie up
# up together, etc.
dists_by_date = {}
valid_ssvid_by_date = {}
for start_date, end_date in [
("20160101", "20161231"),
("20170101", "20171231"),
("20180101", "20181231"),
]:
if start_date in dists_by_date:
continue
print("computing distance for", start_date, end_date)
subset_by_date = {
k: v for (k, v) in pruned_by_date.items() if start_date <= k <= end_date
}
valid_ssvid_by_date[start_date] = sorted(filters.find_valid_ssvid(subset_by_date))
C = distances.create_composite_lonlat_array(
subset_by_date, valid_ssvid_by_date[start_date]
)
dists = distances.compute_distances_4(C, gamma=2)
dists_by_date[start_date] = dists
# ## Load Carrier Data
carriers_by_date = bq.load_carriers_by_year(2017, 2018)
pruned_carriers_by_date = {k : filters.remove_chinese_coast(v) for (k, v) in carriers_by_date.items()}
query = """
SELECT CAST(mmsi AS STRING) FROM
`world-fishing-827.vessel_database.all_vessels_20190102`
WHERE iscarriervessel AND confidence = 3
"""
valid_carrier_ssvid_df = pd.read_gbq(query, dialect='standard', project_id='world-fishing-827')
valid_carrier_ssvid = valid_carrier_ssvid_df.f0_
valid_carrier_ssvid_set = set(valid_carrier_ssvid)
# ## Load Encounters Data And Country Codes
#
# This is used to filter the carrier vessels down to only those
# that meet with target vessels and to add iso3 labels to outputs
encounters = bq.load_carriers(2017, 2017)
query = """
SELECT code, iso3 FROM `world-fishing-827.gfw_research.country_codes`"""
country_codes_df = pd.read_gbq(query, dialect='standard', project_id='world-fishing-827')
iso3_map = {x.code : x.iso3 for x in country_codes_df.itertuples()}
# ## Fit the Clusterer
#
# This is pretty straightforward -- all the complicated stuff is
# embedded in the matrix computations. Fleet size can be tweaked
# using `min_cluster_size` and `min_sample_size`.
raw_clusterers = {}
for start_date, dists in dists_by_date.items():
clusterer = hdbscan.HDBSCAN(metric='precomputed',
min_cluster_size=9,
)
clusterer.fit(dists)
raw_clusterers[start_date] = clusterer
# ## Create Psuedo Distance From Fleet Membership
# +
# pdists_by_date = {}
# for date in ['20160101', '20170101', '20180101']:
# pdists = np.zeros_like(dists_by_date[date])
# raw_labels = np.asarray(raw_clusterers[date].labels_)
# SCALE = 1000
# UNKNOWN_FLEET_DIST = 1 * SCALE
# OTHER_FLEET_DIST = 2 * SCALE
# mask = (raw_labels == -1)
# for i, fid in enumerate(raw_labels):
# if fid == -1:
# pdists[i] = UNKNOWN_FLEET_DIST
# else:
# pdists[i] = OTHER_FLEET_DIST * (raw_labels != fid)
# pdists[i, mask] = UNKNOWN_FLEET_DIST
# pdists_by_date[date] = pdists
# -
# ## Set up Fleets
#
# Set up the fleets for viewing.
# +
def to_rgb(string):
string = string.strip('#')
r = string[:2]
g = string[2:4]
b = string[4:]
return [int(x, 16) / 225.0 for x in (r, g, b)]
def find_labels(dists, valid_ssvid):
clusterer = hdbscan.HDBSCAN(metric='precomputed',
min_cluster_size=9).fit(dists)
all_fleet_ssvid_set = set([s for (s, f) in zip(valid_ssvid, clusterer.labels_) if f >= 0])
valid_ssvid_set = set(valid_ssvid)
all_fleet_reefer_ssvid_set = set()
for x in encounters.itertuples():
if x.ssvid_1 in all_fleet_ssvid_set and x.ssvid_2 in valid_carrier_ssvid_set:
all_fleet_reefer_ssvid_set.add(x.ssvid_2)
if x.ssvid_2 in all_fleet_ssvid_set and x.ssvid_1 in valid_carrier_ssvid_set:
all_fleet_reefer_ssvid_set.add(x.ssvid_1)
all_fleet_reefer_ssvid = sorted(all_fleet_reefer_ssvid_set)
valid_ssvid_set = set(valid_ssvid)
carrier_ids = [x for x in all_fleet_reefer_ssvid if x not in valid_ssvid_set]
joint_ssvid = valid_ssvid + sorted(carrier_ids)
labels = list(clusterer.labels_) + [max(clusterer.labels_) + 1] * len(carrier_ids)
# Remove vessels that have no connection to other vessels
for i, ssvid in enumerate(valid_ssvid):
connections = (~np.isinf(dists[i])).sum()
if connections == 0:
labels[i] = -1
return joint_ssvid, labels
def create_fleet_mapping(labels, include_carriers=False):
counts = []
skip = []
for i in range(max(labels) + 1):
if i in skip:
counts.append(0)
else:
counts.append((np.array(labels) == i).sum())
fleet_ids = [x for x in np.argsort(counts)[::-1] if counts[x] > 0]
fleet_ids_without_carriers = [x for x in fleet_ids if x != max(labels)]
fleets = OrderedDict()
n_hues = int(np.ceil(len(fleet_ids) / 4.0))
used = set()
for i, fid in enumerate(fleet_ids_without_carriers):
b = (i // (2 * n_hues)) % 2
c = (i // 2)% n_hues
d = i % 2
symbol = 'H^'[d]
assert (b, c, d) not in used, (i, b, c, d)
used.add((b, c, d))
sat = 1
val = 1
raw_hue = c / float(n_hues)
# We remap the raw hue in order to avoid the 60 degree segment around blue
hue = 5. / 6. * raw_hue
if hue > 7. / 12.:
hue += 1. / 6.
assert 0 <= hue < 1, hue
[[clr]] = color.hsv2rgb([[(hue, sat, val)]])
fg = [[0.1511111111111111, 0.2, 0.3333333333333333], clr][b]
bg = [clr, [0.1511111111111111, 0.2, 0.3333333333333333]][b]
w = [1, 2][b]
sz = [9, 7][b]
fleets[fid] = (symbol, tuple(fg), tuple(bg), sz, w, str(i + 1))
if include_carriers:
fleets[max(labels)] = ('1', 'k', 'k', 8, 2, 'Carrier Vessel')
print(len(set([x for x in fleets if x != -1])), "fleets")
return fleets
def iou(a, b):
a = set(a)
b = set(b)
return len(a & b) / len(a | b)
def best_match(a, bs):
ious = [iou(a, b) for b in bs]
i = np.argmax(ious)
if ious[i] == 0:
return None
return i
def adapt_fleet_mapping(base_fleets, base_ssvid, base_labels, new_ssivd, new_labels):
new_labels = np.array(new_labels)
ssvid_base = []
base_fleet_ids = sorted(base_fleets)
for fid in base_fleet_ids:
mask = (base_labels == fid)
ssvid_base.append(np.array(base_ssvid)[mask])
ssvid_new = []
new_fleet_ids = sorted(set([x for x in new_labels if x != -1]))
for fid in new_fleet_ids:
mask = (new_labels == fid)
ssvid_new.append(np.array(new_ssivd)[mask])
rev_mapping = {}
for fid, ssvid_list in zip(new_fleet_ids, ssvid_new):
i = best_match(ssvid_list, ssvid_base)
if i is None:
rev_mapping[fid] = None
else:
rev_mapping[fid] = base_fleet_ids[i]
mapping = {}
for k, v in rev_mapping.items():
if v in mapping:
mask = (new_labels == k)
new_labels[mask] = mapping[v]
else:
mapping[v] = k
new_fleets = OrderedDict()
for i, fid in enumerate(base_fleets):
if fid in mapping and mapping[fid] is not None:
k = mapping[fid]
if k in new_fleets:
print("Skipping", k, fid, "because of double match")
new_fleets[i + max(base_fleets)] = base_fleets[fid]
else:
new_fleets[mapping[fid]] = base_fleets[fid]
else:
new_fleets[i + max(base_fleets)] = base_fleets[fid]
return new_fleets, new_labels
# +
import imp; imp.reload(animation)
joint_ssvid_2017, labels_2017 = find_labels(dists_by_date['20170101'],
valid_ssvid_by_date['20170101'])
fleets_2017 = create_fleet_mapping(labels_2017)
all_by_date_2017 = {k : v for (k, v) in all_by_date.items() if '20170101' <= k <= '20171231'}
anim = animation.make_anim(joint_ssvid_2017,
labels_2017,
all_by_date_2017,
interval=100,
fleets=fleets_2017,
show_ungrouped=True,
alpha=1,
legend_cols=12,
ungrouped_legend="Ungrouped")
HTML(anim.to_html5_video())
# +
joint_ssvid_2017, labels_2017 = find_labels(dists_by_date['20170101'],
valid_ssvid_by_date['20170101'])
fleets_2017 = create_fleet_mapping(labels_2017)
all_by_date_2017 = {k : v for (k, v) in all_by_date.items() if '20170101' <= k <= '20171231'}
anim = animation.make_anim(joint_ssvid_2017,
labels_2017,
all_by_date_2017,
interval=1,
fleets=fleets_2017,
show_ungrouped=True,
alpha=1,
legend_cols=12,
ungrouped_legend="Ungrouped")
Writer = mpl_animation.writers['ffmpeg']
writer = Writer(fps=8, metadata=dict(artist='Me'), bitrate=1800)
anim.save('fleet_longlines_2017.mp4', writer=writer,
savefig_kwargs={'facecolor':'#222D4B'})
# -
# Stuff below here relies on pseudo distances and gluing years together, which
# is currently not working.
# +
joint_ssvid_2016, labels_2016 = find_labels(dists_by_date['20160101'] +
pdists_by_date['20170101'])
fleets_2016, labels_2016 = adapt_fleet_mapping(fleets_2017, joint_ssvid_2017, labels_2017,
joint_ssvid_2016, labels_2016)
all_by_date_2016 = {k : v for (k, v) in all_by_date.items() if '20160101' <= k <= '20161231'}
anim = animation.make_anim(joint_ssvid_2016,
labels_2016,
all_by_date_2016,
interval=100,
fleets=fleets_2016,
show_ungrouped=True,
alpha=1,
legend_cols=12,
ungrouped_legend="Ungrouped")
HTML(anim.to_html5_video())
# +
joint_ssvid_2018, labels_2018 = find_labels(dists_by_date['20180101'] +
pdists_by_date['20170101'])
fleets_2018, labels_2018 = adapt_fleet_mapping(fleets_2017, joint_ssvid_2017, labels_2017,
joint_ssvid_2018, labels_2018)
all_by_date_2018 = {k : v for (k, v) in all_by_date.items() if '20180101' <= k <= '20181231'}
anim = animation.make_anim(joint_ssvid_2018,
labels_2018,
all_by_date_2018,
interval=100,
fleets=fleets_2018,
show_ungrouped=True,
alpha=1,
legend_cols=12,
ungrouped_legend="Ungrouped")
HTML(anim.to_html5_video())
# -
anim = animation.make_anim(joint_ssvid_2017,
labels_2017,
all_by_date_2017,
interval=1,
fleets=fleets_2017,
show_ungrouped=True,
alpha=1,
legend_cols=12,
ungrouped_legend="Ungrouped")
Writer = mpl_animation.writers['ffmpeg']
writer = Writer(fps=8, metadata=dict(artist='Me'), bitrate=1800)
anim.save('fleet_longlines_2017.mp4', writer=writer,
savefig_kwargs={'facecolor':'#222D4B'})
anim = animation.make_anim(joint_ssvid_2018,
labels_2018,
all_by_date_2018,
interval=1,
fleets=fleets_2018,
show_ungrouped=True,
alpha=1,
legend_cols=12,
ungrouped_legend="Ungrouped")
Writer = mpl_animation.writers['ffmpeg']
writer = Writer(fps=8, metadata=dict(artist='Me'), bitrate=1800)
anim.save('fleet_longlines_2018.mp4', writer=writer,
savefig_kwargs={'facecolor':'#222D4B'})
anim = animation.make_anim(joint_ssvid_2016,
labels_2016,
all_by_date_2016,
interval=1,
fleets=fleets_2016,
show_ungrouped=True,
alpha=1,
legend_cols=12,
ungrouped_legend="Ungrouped")
Writer = mpl_animation.writers['ffmpeg']
writer = Writer(fps=8, metadata=dict(artist='Me'), bitrate=1800)
anim.save('fleet_longlines_2016.mp4', writer=writer,
savefig_kwargs={'facecolor':'#222D4B'})
# ## Print Out Typical Fleet Membership
def print_fleets(fleets, labels, joint_ssvid):
for fid, v in fleets.items():
label = v[-1]
mask = (fid == np.array(labels))
ssvids = np.array(joint_ssvid)[mask]
mids = [x[:3] for x in ssvids]
countries = [iso3_map.get(float(x), x) for x in mids]
c = Counter(countries)
print('Fleet: {} ({})'.format(label, fid), label)
for country, count in c.most_common():
print('\t', country, ':', count)
print_fleets(fleets_2017, labels_2017, joint_ssvid_2017)
# ## Look for labor violations
print(2016)
available = set(mmsi) & set(joint_ssvid_2016)
for x in available:
mask = (np.array(joint_ssvid_2016) == x)
[fid] = np.array(labels_2016)[mask]
if fid in fleets_2016:
label = fleets_2016[fid][-1]
print(x, label, fid)
# +
text = "312422000,2015;312422000,2014;312000125,2015;312000125,2014;412420941,2014;412420941,2015;412201837,2015;412201837,2016;413270430,2017;413270430,2016;440801000,2013;440801000,2014;533000000,2017;567000421,2015;567000445,2014;567000445,2015;567025800,2015;567025800,2014;416202800,2014;416202800,2015;416003928,2014;416054500,2017;416054500,2016;416001769,2013;416001769,2014;367363390,2015;576678000,2015;576678000,2014"
pairs = text.strip().split(';')
# Ignore years for now
mmsi = [x.split(',')[0] for x in pairs]
print(2017)
available = set(mmsi) & set(joint_ssvid_2017)
for x in available:
mask = (np.array(joint_ssvid_2017) == x)
[fid] = np.array(labels_2017)[mask]
if fid != -1:
label = fleets_2017[fid][-1]
print(x, label, fid)
# -
print(2018)
available = set(mmsi) & set(joint_ssvid_2018)
for x in available:
mask = (np.array(joint_ssvid_2018) == x)
[fid] = np.array(labels_2018)[mask]
if fid in fleets_2018:
label = fleets_2018[fid][-1]
print(x, label, fid)
mask = (np.array(labels_2017) == 28)
np.array(joint_ssvid_2017)[mask]
| [
"fleet_clustering.filters.find_valid_ssvid",
"collections.OrderedDict",
"skimage.color.hsv2rgb",
"fleet_clustering.distances.create_composite_lonlat_array",
"pandas.read_gbq",
"fleet_clustering.animation.make_anim",
"imp.reload",
"numpy.argmax",
"fleet_clustering.distances.compute_distances_4",
"c... | [((3276, 3312), 'fleet_clustering.bq.load_carriers_by_year', 'bq.load_carriers_by_year', (['(2017)', '(2018)'], {}), '(2017, 2018)\n', (3300, 3312), False, 'from fleet_clustering import bq\n'), ((3642, 3712), 'pandas.read_gbq', 'pd.read_gbq', (['query'], {'dialect': '"""standard"""', 'project_id': '"""world-fishing-827"""'}), "(query, dialect='standard', project_id='world-fishing-827')\n", (3653, 3712), True, 'import pandas as pd\n'), ((4004, 4032), 'fleet_clustering.bq.load_carriers', 'bq.load_carriers', (['(2017)', '(2017)'], {}), '(2017, 2017)\n', (4020, 4032), False, 'from fleet_clustering import bq\n'), ((4138, 4208), 'pandas.read_gbq', 'pd.read_gbq', (['query'], {'dialect': '"""standard"""', 'project_id': '"""world-fishing-827"""'}), "(query, dialect='standard', project_id='world-fishing-827')\n", (4149, 4208), True, 'import pandas as pd\n'), ((10158, 10179), 'imp.reload', 'imp.reload', (['animation'], {}), '(animation)\n', (10168, 10179), False, 'import imp\n'), ((10479, 10665), 'fleet_clustering.animation.make_anim', 'animation.make_anim', (['joint_ssvid_2017', 'labels_2017', 'all_by_date_2017'], {'interval': '(100)', 'fleets': 'fleets_2017', 'show_ungrouped': '(True)', 'alpha': '(1)', 'legend_cols': '(12)', 'ungrouped_legend': '"""Ungrouped"""'}), "(joint_ssvid_2017, labels_2017, all_by_date_2017,\n interval=100, fleets=fleets_2017, show_ungrouped=True, alpha=1,\n legend_cols=12, ungrouped_legend='Ungrouped')\n", (10498, 10665), False, 'from fleet_clustering import animation\n'), ((11212, 11396), 'fleet_clustering.animation.make_anim', 'animation.make_anim', (['joint_ssvid_2017', 'labels_2017', 'all_by_date_2017'], {'interval': '(1)', 'fleets': 'fleets_2017', 'show_ungrouped': '(True)', 'alpha': '(1)', 'legend_cols': '(12)', 'ungrouped_legend': '"""Ungrouped"""'}), "(joint_ssvid_2017, labels_2017, all_by_date_2017,\n interval=1, fleets=fleets_2017, show_ungrouped=True, alpha=1,\n legend_cols=12, ungrouped_legend='Ungrouped')\n", (11231, 11396), False, 'from fleet_clustering import animation\n'), ((12371, 12557), 'fleet_clustering.animation.make_anim', 'animation.make_anim', (['joint_ssvid_2016', 'labels_2016', 'all_by_date_2016'], {'interval': '(100)', 'fleets': 'fleets_2016', 'show_ungrouped': '(True)', 'alpha': '(1)', 'legend_cols': '(12)', 'ungrouped_legend': '"""Ungrouped"""'}), "(joint_ssvid_2016, labels_2016, all_by_date_2016,\n interval=100, fleets=fleets_2016, show_ungrouped=True, alpha=1,\n legend_cols=12, ungrouped_legend='Ungrouped')\n", (12390, 12557), False, 'from fleet_clustering import animation\n'), ((13238, 13424), 'fleet_clustering.animation.make_anim', 'animation.make_anim', (['joint_ssvid_2018', 'labels_2018', 'all_by_date_2018'], {'interval': '(100)', 'fleets': 'fleets_2018', 'show_ungrouped': '(True)', 'alpha': '(1)', 'legend_cols': '(12)', 'ungrouped_legend': '"""Ungrouped"""'}), "(joint_ssvid_2018, labels_2018, all_by_date_2018,\n interval=100, fleets=fleets_2018, show_ungrouped=True, alpha=1,\n legend_cols=12, ungrouped_legend='Ungrouped')\n", (13257, 13424), False, 'from fleet_clustering import animation\n'), ((13677, 13861), 'fleet_clustering.animation.make_anim', 'animation.make_anim', (['joint_ssvid_2017', 'labels_2017', 'all_by_date_2017'], {'interval': '(1)', 'fleets': 'fleets_2017', 'show_ungrouped': '(True)', 'alpha': '(1)', 'legend_cols': '(12)', 'ungrouped_legend': '"""Ungrouped"""'}), "(joint_ssvid_2017, labels_2017, all_by_date_2017,\n interval=1, fleets=fleets_2017, show_ungrouped=True, alpha=1,\n legend_cols=12, ungrouped_legend='Ungrouped')\n", (13696, 13861), False, 'from fleet_clustering import animation\n'), ((14293, 14477), 'fleet_clustering.animation.make_anim', 'animation.make_anim', (['joint_ssvid_2018', 'labels_2018', 'all_by_date_2018'], {'interval': '(1)', 'fleets': 'fleets_2018', 'show_ungrouped': '(True)', 'alpha': '(1)', 'legend_cols': '(12)', 'ungrouped_legend': '"""Ungrouped"""'}), "(joint_ssvid_2018, labels_2018, all_by_date_2018,\n interval=1, fleets=fleets_2018, show_ungrouped=True, alpha=1,\n legend_cols=12, ungrouped_legend='Ungrouped')\n", (14312, 14477), False, 'from fleet_clustering import animation\n'), ((14909, 15093), 'fleet_clustering.animation.make_anim', 'animation.make_anim', (['joint_ssvid_2016', 'labels_2016', 'all_by_date_2016'], {'interval': '(1)', 'fleets': 'fleets_2016', 'show_ungrouped': '(True)', 'alpha': '(1)', 'legend_cols': '(12)', 'ungrouped_legend': '"""Ungrouped"""'}), "(joint_ssvid_2016, labels_2016, all_by_date_2016,\n interval=1, fleets=fleets_2016, show_ungrouped=True, alpha=1,\n legend_cols=12, ungrouped_legend='Ungrouped')\n", (14928, 15093), False, 'from fleet_clustering import animation\n'), ((1551, 1570), 'datetime.date', 'dt.date', (['(2016)', '(1)', '(1)'], {}), '(2016, 1, 1)\n', (1558, 1570), True, 'import datetime as dt\n'), ((1606, 1627), 'datetime.date', 'dt.date', (['(2018)', '(12)', '(31)'], {}), '(2018, 12, 31)\n', (1613, 1627), True, 'import datetime as dt\n'), ((3037, 3130), 'fleet_clustering.distances.create_composite_lonlat_array', 'distances.create_composite_lonlat_array', (['subset_by_date', 'valid_ssvid_by_date[start_date]'], {}), '(subset_by_date, valid_ssvid_by_date\n [start_date])\n', (3076, 3130), False, 'from fleet_clustering import distances\n'), ((3152, 3193), 'fleet_clustering.distances.compute_distances_4', 'distances.compute_distances_4', (['C'], {'gamma': '(2)'}), '(C, gamma=2)\n', (3181, 3193), False, 'from fleet_clustering import distances\n'), ((3344, 3375), 'fleet_clustering.filters.remove_chinese_coast', 'filters.remove_chinese_coast', (['v'], {}), '(v)\n', (3372, 3375), False, 'from fleet_clustering import filters\n'), ((4568, 4625), 'hdbscan.HDBSCAN', 'hdbscan.HDBSCAN', ([], {'metric': '"""precomputed"""', 'min_cluster_size': '(9)'}), "(metric='precomputed', min_cluster_size=9)\n", (4583, 4625), False, 'import hdbscan\n'), ((7276, 7289), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7287, 7289), False, 'from collections import Counter, OrderedDict\n'), ((8511, 8526), 'numpy.argmax', 'np.argmax', (['ious'], {}), '(ious)\n', (8520, 8526), True, 'import numpy as np\n'), ((8690, 8710), 'numpy.array', 'np.array', (['new_labels'], {}), '(new_labels)\n', (8698, 8710), True, 'import numpy as np\n'), ((9601, 9614), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9612, 9614), False, 'from collections import Counter, OrderedDict\n'), ((17426, 17447), 'numpy.array', 'np.array', (['labels_2017'], {}), '(labels_2017)\n', (17434, 17447), True, 'import numpy as np\n'), ((17455, 17481), 'numpy.array', 'np.array', (['joint_ssvid_2017'], {}), '(joint_ssvid_2017)\n', (17463, 17481), True, 'import numpy as np\n'), ((1787, 1818), 'fleet_clustering.filters.remove_chinese_coast', 'filters.remove_chinese_coast', (['v'], {}), '(v)\n', (1815, 1818), False, 'from fleet_clustering import filters\n'), ((2987, 3027), 'fleet_clustering.filters.find_valid_ssvid', 'filters.find_valid_ssvid', (['subset_by_date'], {}), '(subset_by_date)\n', (3011, 3027), False, 'from fleet_clustering import filters\n'), ((7888, 7922), 'skimage.color.hsv2rgb', 'color.hsv2rgb', (['[[(hue, sat, val)]]'], {}), '([[(hue, sat, val)]])\n', (7901, 7922), False, 'from skimage import color\n'), ((15862, 15880), 'collections.Counter', 'Counter', (['countries'], {}), '(countries)\n', (15869, 15880), False, 'from collections import Counter, OrderedDict\n'), ((16213, 16239), 'numpy.array', 'np.array', (['joint_ssvid_2016'], {}), '(joint_ssvid_2016)\n', (16221, 16239), True, 'import numpy as np\n'), ((16258, 16279), 'numpy.array', 'np.array', (['labels_2016'], {}), '(labels_2016)\n', (16266, 16279), True, 'import numpy as np\n'), ((16999, 17025), 'numpy.array', 'np.array', (['joint_ssvid_2017'], {}), '(joint_ssvid_2017)\n', (17007, 17025), True, 'import numpy as np\n'), ((17044, 17065), 'numpy.array', 'np.array', (['labels_2017'], {}), '(labels_2017)\n', (17052, 17065), True, 'import numpy as np\n'), ((17251, 17277), 'numpy.array', 'np.array', (['joint_ssvid_2018'], {}), '(joint_ssvid_2018)\n', (17259, 17277), True, 'import numpy as np\n'), ((17296, 17317), 'numpy.array', 'np.array', (['labels_2018'], {}), '(labels_2018)\n', (17304, 17317), True, 'import numpy as np\n'), ((5683, 5740), 'hdbscan.HDBSCAN', 'hdbscan.HDBSCAN', ([], {'metric': '"""precomputed"""', 'min_cluster_size': '(9)'}), "(metric='precomputed', min_cluster_size=9)\n", (5698, 5740), False, 'import hdbscan\n'), ((15686, 15702), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (15694, 15702), True, 'import numpy as np\n'), ((15721, 15742), 'numpy.array', 'np.array', (['joint_ssvid'], {}), '(joint_ssvid)\n', (15729, 15742), True, 'import numpy as np\n'), ((7143, 7161), 'numpy.argsort', 'np.argsort', (['counts'], {}), '(counts)\n', (7153, 7161), True, 'import numpy as np\n'), ((8865, 8885), 'numpy.array', 'np.array', (['base_ssvid'], {}), '(base_ssvid)\n', (8873, 8885), True, 'import numpy as np\n'), ((9071, 9090), 'numpy.array', 'np.array', (['new_ssivd'], {}), '(new_ssivd)\n', (9079, 9090), True, 'import numpy as np\n'), ((6739, 6757), 'numpy.isinf', 'np.isinf', (['dists[i]'], {}), '(dists[i])\n', (6747, 6757), True, 'import numpy as np\n'), ((7084, 7100), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (7092, 7100), True, 'import numpy as np\n')] |
import torch
from torch import Tensor
from typing import List, Tuple, Any, Optional
from torchvision.transforms import functional as F
import torchvision
import torch.utils.data as torch_data
from torchvision import transforms
from PIL import Image
from copy import deepcopy
import math
import os
import numpy as np
class CELEB_A_HQ(torch_data.Dataset):
def __init__(self,
dataset,
mode="train",
transform=transforms.ToTensor(),
data_root='',
use_landmark=False,
local_rank=0):
'''
targets: list of values for classification
or list of paths to segmentation mask for segmentation task.
augment: list of keywords for augmentations.
'''
self.dataset = dataset
self.mode = mode
self.transform = transform
self.data_root = data_root
self.use_landmark = True
self.local_rank = f'cuda:{local_rank}'
self.fa = None
def __len__(self):
return len(self.dataset)
def __getitem__(self, index):
source_image = np.array(Image.open(os.path.join(self.data_root, self.dataset.iloc[[index]]['path'].values[0])))
target_image = deepcopy(source_image)
masked = np.zeros_like(target_image)[:, :, 0]
if self.mode == 'train':
mid = int((self.dataset.iloc[[index]]['28'].values[0] + self.dataset.iloc[[index]]['4'].values[0]) / 2)
width = int(abs(self.dataset.iloc[[index]]['28'].values[0] - self.dataset.iloc[[index]]['4'].values[0]))
height = width * 0.77
left_x = int((self.dataset.iloc[[index]]['56'].values[0] + mid) / 2 - width // 2)
left_y = int(self.dataset.iloc[[index]]['57'].values[0])
bbx1, bby1, bbx2, bby2 = self.randbbox(width, height, lam=0)
bbx1 = int(bbx1 + left_x)
bbx2 = int(bbx2 + left_x)
bby1 = int(bby1 + left_y)
bby2 = int(bby2 + left_y)
if bbx1 > bbx2:
bbx1, bbx2 = bbx2, bbx1
if bby1 > bby2:
bby1, bby2 = bby2, bby1
source_image[bby1:bby2, bbx1:bbx2] = 128
masked[bby1:bby2, bbx1:bbx2] = 255
# np.random.randint(low=0, high=255, size=source_image[bby1:bby2, bbx1:bbx2].shape)
else:
mid = int((self.dataset.iloc[[index]]['28'].values[0] + self.dataset.iloc[[index]]['4'].values[0]) / 2)
width = abs(self.dataset.iloc[[index]]['28'].values[0] - self.dataset.iloc[[index]]['4'].values[0]) * 0.95
height = width * 0.78
left_x = int((self.dataset.iloc[[index]]['56'].values[0] + mid) / 2 - width // 2)
left_y = int(self.dataset.iloc[[index]]['57'].values[0])
source_image[left_y:int(left_y + height), left_x:int(left_x + width)] = 128
masked[left_y:int(left_y + height), left_x:int(left_x + width)] = 255
# p.random.randint(low=0, high=255, size=source_image[left_y:int(left_y + height), left_x:int(left_x + width)].shape)
landmark = []
for i in range(68 * 2):
landmark.append(self.dataset.iloc[[index]][f'{i}'].values[0])
source_image = Image.fromarray(source_image)
target_image = Image.fromarray(target_image)
masked = Image.fromarray(masked)
# if self.use_landmark is not None:
# aug_channel = Image.fromarray(aug_channel)
if self.mode == 'train':
if np.random.rand() < 0.5:
source_image = F.hflip(source_image)
target_image = F.hflip(target_image)
# if self.use_landmark is not None:
# aug_channel = F.hflip(aug_channel)
i, j, h, w = self.get_params(source_image, (0.75, 1.), (3. / 4., 4. / 3.))
source_image = F.resized_crop(source_image, i, j, h, w, (256, 256))
target_image = F.resized_crop(target_image, i, j, h, w, (256, 256))
masked = F.resized_crop(masked, i, j, h, w, (256, 256))
# if self.use_landmark is not None:
# aug_channel = F.resized_crop(aug_channel, i, j, h, w, (256, 256))
source = self.transform(source_image)
target = self.transform(target_image)
masked = torchvision.transforms.ToTensor()(masked)
# if self.use_landmark is not None:
# aug_channel = self.transform(aug_channel)
# source = torch.cat([source, aug_channel[:1, :, :]], dim=0)
nonzero = (masked == 1.).nonzero(as_tuple=False)
first = nonzero[0]
last = nonzero[-1]
bbox = torch.as_tensor([first[-2], first[-1], last[-2] - first[-2], last[-1] - first[-1]])
return source, target, np.array(landmark, dtype=np.float32), masked, bbox
def randbbox(self, width, height, lam):
W = width
H = height
cut_rat = np.sqrt(1. - lam)
cut_w = np.int(W * cut_rat)
cut_h = np.int(H * cut_rat)
# uniform
#cx = np.random.randint(W)
#cy = np.random.randint(H)
alpha = 80.0
beta = 80.0
cx = int(W * np.random.beta(alpha, beta))
cy = int(H * np.random.beta(alpha, beta))
bbx1 = np.clip(cx - cut_w // 2, 0, W)
bby1 = np.clip(cy - cut_h // 2, 0, H)
bbx2 = np.clip(cx + cut_w // 2, 0, W)
bby2 = np.clip(cy + cut_h // 2, 0, H)
return bbx1, bby1, bbx2, bby2
def get_params(self,
img: Tensor, scale: List[float], ratio: List[float]
) -> Tuple[int, int, int, int]:
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image or Tensor): Input image.
scale (list): range of scale of the origin size cropped
ratio (list): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
width, height = F.get_image_size(img)
area = height * width
log_ratio = torch.log(torch.tensor(ratio))
for _ in range(10):
target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()
aspect_ratio = torch.exp(
torch.empty(1).uniform_(log_ratio[0], log_ratio[1])
).item()
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < w <= width and 0 < h <= height:
i = torch.randint(0, height - h + 1, size=(1,)).item()
j = torch.randint(0, width - w + 1, size=(1,)).item()
return i, j, h, w
# Fallback to central crop
in_ratio = float(width) / float(height)
if in_ratio < min(ratio):
w = width
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = height
w = int(round(h * max(ratio)))
else: # whole image
w = width
h = height
i = (height - h) // 2
j = (width - w) // 2
return i, j, h, w | [
"numpy.clip",
"torch.as_tensor",
"numpy.sqrt",
"numpy.random.rand",
"math.sqrt",
"numpy.array",
"copy.deepcopy",
"torchvision.transforms.functional.get_image_size",
"torch.randint",
"torchvision.transforms.ToTensor",
"numpy.random.beta",
"torchvision.transforms.functional.resized_crop",
"tor... | [((464, 485), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (483, 485), False, 'from torchvision import transforms\n'), ((1249, 1271), 'copy.deepcopy', 'deepcopy', (['source_image'], {}), '(source_image)\n', (1257, 1271), False, 'from copy import deepcopy\n'), ((3276, 3305), 'PIL.Image.fromarray', 'Image.fromarray', (['source_image'], {}), '(source_image)\n', (3291, 3305), False, 'from PIL import Image\n'), ((3329, 3358), 'PIL.Image.fromarray', 'Image.fromarray', (['target_image'], {}), '(target_image)\n', (3344, 3358), False, 'from PIL import Image\n'), ((3376, 3399), 'PIL.Image.fromarray', 'Image.fromarray', (['masked'], {}), '(masked)\n', (3391, 3399), False, 'from PIL import Image\n'), ((4686, 4773), 'torch.as_tensor', 'torch.as_tensor', (['[first[-2], first[-1], last[-2] - first[-2], last[-1] - first[-1]]'], {}), '([first[-2], first[-1], last[-2] - first[-2], last[-1] -\n first[-1]])\n', (4701, 4773), False, 'import torch\n'), ((4953, 4971), 'numpy.sqrt', 'np.sqrt', (['(1.0 - lam)'], {}), '(1.0 - lam)\n', (4960, 4971), True, 'import numpy as np\n'), ((4987, 5006), 'numpy.int', 'np.int', (['(W * cut_rat)'], {}), '(W * cut_rat)\n', (4993, 5006), True, 'import numpy as np\n'), ((5023, 5042), 'numpy.int', 'np.int', (['(H * cut_rat)'], {}), '(H * cut_rat)\n', (5029, 5042), True, 'import numpy as np\n'), ((5289, 5319), 'numpy.clip', 'np.clip', (['(cx - cut_w // 2)', '(0)', 'W'], {}), '(cx - cut_w // 2, 0, W)\n', (5296, 5319), True, 'import numpy as np\n'), ((5335, 5365), 'numpy.clip', 'np.clip', (['(cy - cut_h // 2)', '(0)', 'H'], {}), '(cy - cut_h // 2, 0, H)\n', (5342, 5365), True, 'import numpy as np\n'), ((5381, 5411), 'numpy.clip', 'np.clip', (['(cx + cut_w // 2)', '(0)', 'W'], {}), '(cx + cut_w // 2, 0, W)\n', (5388, 5411), True, 'import numpy as np\n'), ((5427, 5457), 'numpy.clip', 'np.clip', (['(cy + cut_h // 2)', '(0)', 'H'], {}), '(cy + cut_h // 2, 0, H)\n', (5434, 5457), True, 'import numpy as np\n'), ((6064, 6085), 'torchvision.transforms.functional.get_image_size', 'F.get_image_size', (['img'], {}), '(img)\n', (6080, 6085), True, 'from torchvision.transforms import functional as F\n'), ((1289, 1316), 'numpy.zeros_like', 'np.zeros_like', (['target_image'], {}), '(target_image)\n', (1302, 1316), True, 'import numpy as np\n'), ((3903, 3955), 'torchvision.transforms.functional.resized_crop', 'F.resized_crop', (['source_image', 'i', 'j', 'h', 'w', '(256, 256)'], {}), '(source_image, i, j, h, w, (256, 256))\n', (3917, 3955), True, 'from torchvision.transforms import functional as F\n'), ((3983, 4035), 'torchvision.transforms.functional.resized_crop', 'F.resized_crop', (['target_image', 'i', 'j', 'h', 'w', '(256, 256)'], {}), '(target_image, i, j, h, w, (256, 256))\n', (3997, 4035), True, 'from torchvision.transforms import functional as F\n'), ((4057, 4103), 'torchvision.transforms.functional.resized_crop', 'F.resized_crop', (['masked', 'i', 'j', 'h', 'w', '(256, 256)'], {}), '(masked, i, j, h, w, (256, 256))\n', (4071, 4103), True, 'from torchvision.transforms import functional as F\n'), ((4345, 4378), 'torchvision.transforms.ToTensor', 'torchvision.transforms.ToTensor', ([], {}), '()\n', (4376, 4378), False, 'import torchvision\n'), ((4801, 4837), 'numpy.array', 'np.array', (['landmark'], {'dtype': 'np.float32'}), '(landmark, dtype=np.float32)\n', (4809, 4837), True, 'import numpy as np\n'), ((6147, 6166), 'torch.tensor', 'torch.tensor', (['ratio'], {}), '(ratio)\n', (6159, 6166), False, 'import torch\n'), ((1149, 1223), 'os.path.join', 'os.path.join', (['self.data_root', "self.dataset.iloc[[index]]['path'].values[0]"], {}), "(self.data_root, self.dataset.iloc[[index]]['path'].values[0])\n", (1161, 1223), False, 'import os\n'), ((3549, 3565), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3563, 3565), True, 'import numpy as np\n'), ((3604, 3625), 'torchvision.transforms.functional.hflip', 'F.hflip', (['source_image'], {}), '(source_image)\n', (3611, 3625), True, 'from torchvision.transforms import functional as F\n'), ((3657, 3678), 'torchvision.transforms.functional.hflip', 'F.hflip', (['target_image'], {}), '(target_image)\n', (3664, 3678), True, 'from torchvision.transforms import functional as F\n'), ((5194, 5221), 'numpy.random.beta', 'np.random.beta', (['alpha', 'beta'], {}), '(alpha, beta)\n', (5208, 5221), True, 'import numpy as np\n'), ((5244, 5271), 'numpy.random.beta', 'np.random.beta', (['alpha', 'beta'], {}), '(alpha, beta)\n', (5258, 5271), True, 'import numpy as np\n'), ((6434, 6471), 'math.sqrt', 'math.sqrt', (['(target_area * aspect_ratio)'], {}), '(target_area * aspect_ratio)\n', (6443, 6471), False, 'import math\n'), ((6500, 6537), 'math.sqrt', 'math.sqrt', (['(target_area / aspect_ratio)'], {}), '(target_area / aspect_ratio)\n', (6509, 6537), False, 'import math\n'), ((6612, 6655), 'torch.randint', 'torch.randint', (['(0)', '(height - h + 1)'], {'size': '(1,)'}), '(0, height - h + 1, size=(1,))\n', (6625, 6655), False, 'import torch\n'), ((6683, 6725), 'torch.randint', 'torch.randint', (['(0)', '(width - w + 1)'], {'size': '(1,)'}), '(0, width - w + 1, size=(1,))\n', (6696, 6725), False, 'import torch\n'), ((6229, 6243), 'torch.empty', 'torch.empty', (['(1)'], {}), '(1)\n', (6240, 6243), False, 'import torch\n'), ((6334, 6348), 'torch.empty', 'torch.empty', (['(1)'], {}), '(1)\n', (6345, 6348), False, 'import torch\n')] |
import numpy as np
import os
import cv2
from skimage.io import imread
from skimage.io import imsave
from os.path import join
import sys
import matplotlib.pyplot as plt
import argparse
def add_noise(noise_typ, image, sigma):
if noise_typ == "gauss":
row, col, ch = image.shape
mean = 0
gauss = np.random.normal(mean, sigma, (row, col, ch))
gauss = gauss.reshape(row, col, ch)
noisy = image + gauss
return noisy
elif noise_typ == "s&p":
row, col, ch = image.shape
s_vs_p = 0.5
amount = 0.004
out = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape]
out[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount * image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
elif noise_typ == "poisson":
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(image * vals) / float(vals)
return noisy
elif noise_typ == "speckle":
row, col, ch = image.shape
gauss = np.random.randn(row, col, ch)
gauss = gauss.reshape(row, col, ch)
noisy = image + image * gauss
return noisy
def get_gaussian_perturbed_image(img_dir, noise_num, noise_interval):
for i in range(noise_num+1):
gaussian_dir = join(img_dir, 'random_gaussian_' + str(i))
if not os.path.exists(join(gaussian_dir, 'membrane.tif')):
try:
os.makedirs(gaussian_dir)
except:
pass
for img_name in ['nucleus', 'cytoplasm', 'membrane']:
img = imread(join(img_dir, img_name + '.tif'))
# w = int(sys.argv[4])
# h = int(sys.argv[5])
# center = [img.shape[0] / 2, img.shape[1] / 2]
# x = center[1] - w/2
# y = center[0] - h/2
# img = img[int(y):int(y+h), int(x):int(x+w)]
img = np.expand_dims(img, axis=-1)
img_noisy = add_noise('gauss', img, i * noise_interval)
img_noisy = np.squeeze(img_noisy, axis=-1)
img_noisy[np.where(img_noisy < 0)] = 0
img_noisy[np.where(img_noisy > 65535)] = 65535
img_noisy = img_noisy.astype('uint16')
imsave(join(gaussian_dir, img_name + '.tif'), img_noisy)
def get_downsampled_image(img_dir):
for i in [30, 50, 70]:
downsample_dir = join(img_dir, 'downsampling_' + str(i))
try:
os.makedirs(downsample_dir)
except:
pass
if not os.path.exists(join(downsample_dir, 'membrane.tif')):
for img_name in ['nucleus', 'cytoplasm', 'membrane']:
img = imread(join(img_dir, img_name + '.tif'))
x = img.shape[0]
y = img.shape[1]
img_downsampled = cv2.resize(img, (int(y * i / 100), int(x * i / 100)), interpolation=cv2.INTER_AREA)
img_downsampled[np.where(img_downsampled < 0)] = 0
img_downsampled[np.where(img_downsampled > 65535)] = 65535
img_downsampled = img_downsampled.astype('uint16')
imsave(join(downsample_dir, img_name + '.tif'), img_downsampled)
channel_dir = join(img_dir, 'channels_downsampling_' + str(i))
if (not os.path.exists(channel_dir)) or (len(os.listdir(channel_dir)) == 0):
os.system('rm -rf ' + channel_dir)
os.makedirs(channel_dir)
channels = glob.glob(join(img_dir, "channels", '*.tif'))
n = len(channels)
# print(channels)
for c in range(n):
channel = imread(channels[c])
x = channel.shape[0]
y = channel.shape[1]
channel_downsampled = cv2.resize(channel, (int(y * i / 100), int(x * i / 100)), interpolation=cv2.INTER_AREA)
imsave(join(channel_dir, str(c) + '.tif'), channel_downsampled) | [
"numpy.random.normal",
"numpy.copy",
"numpy.ceil",
"os.path.exists",
"os.listdir",
"numpy.unique",
"os.makedirs",
"numpy.random.poisson",
"numpy.where",
"os.path.join",
"numpy.squeeze",
"skimage.io.imread",
"numpy.expand_dims",
"os.system",
"numpy.log2",
"numpy.random.randn"
] | [((303, 348), 'numpy.random.normal', 'np.random.normal', (['mean', 'sigma', '(row, col, ch)'], {}), '(mean, sigma, (row, col, ch))\n', (319, 348), True, 'import numpy as np\n'), ((521, 535), 'numpy.copy', 'np.copy', (['image'], {}), '(image)\n', (528, 535), True, 'import numpy as np\n'), ((563, 600), 'numpy.ceil', 'np.ceil', (['(amount * image.size * s_vs_p)'], {}), '(amount * image.size * s_vs_p)\n', (570, 600), True, 'import numpy as np\n'), ((742, 787), 'numpy.ceil', 'np.ceil', (['(amount * image.size * (1.0 - s_vs_p))'], {}), '(amount * image.size * (1.0 - s_vs_p))\n', (749, 787), True, 'import numpy as np\n'), ((1861, 1889), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(-1)'}), '(img, axis=-1)\n', (1875, 1889), True, 'import numpy as np\n'), ((1964, 1994), 'numpy.squeeze', 'np.squeeze', (['img_noisy'], {'axis': '(-1)'}), '(img_noisy, axis=-1)\n', (1974, 1994), True, 'import numpy as np\n'), ((2322, 2349), 'os.makedirs', 'os.makedirs', (['downsample_dir'], {}), '(downsample_dir)\n', (2333, 2349), False, 'import os\n'), ((3076, 3110), 'os.system', 'os.system', (["('rm -rf ' + channel_dir)"], {}), "('rm -rf ' + channel_dir)\n", (3085, 3110), False, 'import os\n'), ((3114, 3138), 'os.makedirs', 'os.makedirs', (['channel_dir'], {}), '(channel_dir)\n', (3125, 3138), False, 'import os\n'), ((1449, 1483), 'os.path.join', 'join', (['gaussian_dir', '"""membrane.tif"""'], {}), "(gaussian_dir, 'membrane.tif')\n", (1453, 1483), False, 'from os.path import join\n'), ((1498, 1523), 'os.makedirs', 'os.makedirs', (['gaussian_dir'], {}), '(gaussian_dir)\n', (1509, 1523), False, 'import os\n'), ((1616, 1648), 'os.path.join', 'join', (['img_dir', "(img_name + '.tif')"], {}), "(img_dir, img_name + '.tif')\n", (1620, 1648), False, 'from os.path import join\n'), ((2008, 2031), 'numpy.where', 'np.where', (['(img_noisy < 0)'], {}), '(img_noisy < 0)\n', (2016, 2031), True, 'import numpy as np\n'), ((2050, 2077), 'numpy.where', 'np.where', (['(img_noisy > 65535)'], {}), '(img_noisy > 65535)\n', (2058, 2077), True, 'import numpy as np\n'), ((2139, 2176), 'os.path.join', 'join', (['gaussian_dir', "(img_name + '.tif')"], {}), "(gaussian_dir, img_name + '.tif')\n", (2143, 2176), False, 'from os.path import join\n'), ((2392, 2428), 'os.path.join', 'join', (['downsample_dir', '"""membrane.tif"""'], {}), "(downsample_dir, 'membrane.tif')\n", (2396, 2428), False, 'from os.path import join\n'), ((3004, 3031), 'os.path.exists', 'os.path.exists', (['channel_dir'], {}), '(channel_dir)\n', (3018, 3031), False, 'import os\n'), ((3163, 3197), 'os.path.join', 'join', (['img_dir', '"""channels"""', '"""*.tif"""'], {}), "(img_dir, 'channels', '*.tif')\n", (3167, 3197), False, 'from os.path import join\n'), ((3277, 3296), 'skimage.io.imread', 'imread', (['channels[c]'], {}), '(channels[c])\n', (3283, 3296), False, 'from skimage.io import imread\n'), ((953, 969), 'numpy.unique', 'np.unique', (['image'], {}), '(image)\n', (962, 969), True, 'import numpy as np\n'), ((1018, 1049), 'numpy.random.poisson', 'np.random.poisson', (['(image * vals)'], {}), '(image * vals)\n', (1035, 1049), True, 'import numpy as np\n'), ((1148, 1177), 'numpy.random.randn', 'np.random.randn', (['row', 'col', 'ch'], {}), '(row, col, ch)\n', (1163, 1177), True, 'import numpy as np\n'), ((2505, 2537), 'os.path.join', 'join', (['img_dir', "(img_name + '.tif')"], {}), "(img_dir, img_name + '.tif')\n", (2509, 2537), False, 'from os.path import join\n'), ((2707, 2736), 'numpy.where', 'np.where', (['(img_downsampled < 0)'], {}), '(img_downsampled < 0)\n', (2715, 2736), True, 'import numpy as np\n'), ((2762, 2795), 'numpy.where', 'np.where', (['(img_downsampled > 65535)'], {}), '(img_downsampled > 65535)\n', (2770, 2795), True, 'import numpy as np\n'), ((2871, 2910), 'os.path.join', 'join', (['downsample_dir', "(img_name + '.tif')"], {}), "(downsample_dir, img_name + '.tif')\n", (2875, 2910), False, 'from os.path import join\n'), ((3041, 3064), 'os.listdir', 'os.listdir', (['channel_dir'], {}), '(channel_dir)\n', (3051, 3064), False, 'import os\n'), ((993, 1006), 'numpy.log2', 'np.log2', (['vals'], {}), '(vals)\n', (1000, 1006), True, 'import numpy as np\n')] |
"""
Description:
Author: <NAME> (<EMAIL>)
Date: 2021-06-06 01:55:29
LastEditors: <NAME> (<EMAIL>)
LastEditTime: 2021-06-06 01:55:30
"""
import os
import argparse
import json
import logging
import logging.handlers
import time
from collections import OrderedDict
from datetime import datetime
from pathlib import Path
from typing import Optional
import numpy as np
import torch
__all__ = [
"ensure_dir",
"read_json",
"write_json",
"profile",
"print_stat",
"Timer",
"TimerCtx",
"TorchTracemalloc",
"fullprint",
"setup_default_logging",
"Logger",
"logger",
"get_logger",
"ArgParser",
"disable_tf_warning",
"AverageMeter",
]
def ensure_dir(dirname, exist_ok: bool = True):
dirname = Path(dirname)
if not dirname.is_dir():
dirname.mkdir(parents=True, exist_ok=exist_ok)
def read_json(fname):
with open(fname, "rt") as handle:
return json.load(handle, object_hook=OrderedDict)
def write_json(content, fname):
with open(fname, "wt") as handle:
json.dump(content, handle, indent=4, sort_keys=False)
def profile(func=None, timer=True):
from functools import wraps, partial
import time
if func == None:
return partial(profile, timer=timer)
@wraps(func)
def wrapper(*args, **kw):
if timer:
local_time = time.time()
res = func(*args, **kw)
end_time = time.time()
print("[I] <%s> runtime: %.3f ms" % (func.__name__, (end_time - local_time) * 1000))
else:
res = func(*args, **kw)
return res
return wrapper
def print_stat(x):
if isinstance(x, torch.Tensor):
print(
f"min = {x.min().data.item():-15f} max = {x.max().data.item():-15f} mean = {x.mean().data.item():-15f} std = {x.std().data.item():-15f}"
)
elif isinstance(x, np.ndarray):
print(
f"min = {np.min(x):-15f} max = {np.max(x):-15f} mean = {np.mean(x):-15f} std = {np.std(x):-15f}"
)
class Timer(object):
def __init__(self):
self.cache = datetime.now()
def check(self):
now = datetime.now()
duration = now - self.cache
self.cache = now
return duration.total_seconds()
def reset(self):
self.cache = datetime.now()
class TimerCtx:
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
class TorchTracemalloc(object):
def __init__(self, verbose: bool = False) -> None:
super().__init__()
self.verbose = verbose
def __enter__(self):
self.begin = self._b2mb(torch.cuda.memory_allocated())
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
return self
def _b2mb(self, x):
return x / 2 ** 20
def __exit__(self, *exc):
self.end = self._b2mb(torch.cuda.memory_allocated())
self.peak = self._b2mb(torch.cuda.max_memory_allocated())
self.used = self.end - self.begin
self.peaked = self.peak - self.begin
if self.verbose:
print(f"Delta used/peaked {self.used:.2f} MB / {self.peaked:.2f} MB")
print(f"Current used/peaked {self.end:.2f} MB / {self.peak:.2f} MB")
class fullprint:
"context manager for printing full numpy arrays"
def __init__(self, **kwargs):
"""linewidth=75; precision=8"""
kwargs.setdefault("threshold", np.inf)
self.opt = kwargs
def __enter__(self):
self._opt = np.get_printoptions()
np.set_printoptions(**self.opt)
def __exit__(self, type, value, traceback):
np.set_printoptions(**self._opt)
class CustomFormatter(logging.Formatter):
"""Logging Formatter to add colors and count warning / errors"""
grey = "\x1b[38;21m"
yellow = "\x1b[33;21m"
red = "\x1b[31;21m"
bold_red = "\x1b[31;1m"
green = "\x1b[32;21m"
reset = "\x1b[0m"
# format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s (%(filename)s:%(lineno)d)"
format = "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s"
FORMATS = {
logging.DEBUG: grey + format + reset,
logging.INFO: grey + format + reset,
logging.WARNING: yellow + format + reset,
logging.ERROR: red + format + reset,
logging.CRITICAL: bold_red + format + reset,
}
def format(self, record):
log_fmt = self.FORMATS.get(record.levelno)
formatter = logging.Formatter(log_fmt)
return formatter.format(record)
def setup_default_logging(default_level=logging.INFO, default_file_level=logging.INFO, log_path=""):
console_handler = logging.StreamHandler()
console_handler.setFormatter(CustomFormatter())
logging.root.addHandler(console_handler)
logging.root.setLevel(default_level)
if log_path:
file_handler = logging.handlers.RotatingFileHandler(log_path, maxBytes=(1024 ** 2 * 2), backupCount=3)
file_formatter = logging.Formatter(
"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s"
)
file_handler.setFormatter(file_formatter)
file_handler.setLevel(default_file_level)
logging.root.addHandler(file_handler)
class Logger(object):
def __init__(self, console=True, logfile=None, console_level=logging.INFO, logfile_level=logging.INFO):
super().__init__()
self.logfile = logfile
self.console_level = console_level
self.logifle_level = logfile_level
assert (
console == True or logfile is not None
), "At least enable one from console or logfile for Logger"
# 第一步,创建一个logger
self.logger = logging.getLogger("my_logger")
self.logger.setLevel(logging.INFO) # Log等级总开关
self.logger.propagate = False
# formatter = logging.Formatter(
# "%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s")
formatter = CustomFormatter()
# 第三步,再创建一个handler,用于输出到控制台
if console:
ch = logging.StreamHandler()
ch.setLevel(self.console_level) # 输出到console的log等级的开关
ch.setFormatter(formatter)
self.logger.addHandler(ch)
if self.logfile is not None:
fh = logging.FileHandler(self.logfile, mode="w")
fh.setLevel(self.logifle_level) # 输出到file的log等级的开关
fh.setFormatter(formatter)
self.logger.addHandler(fh)
def debug(self, message):
self.logger.debug(message)
def info(self, message):
self.logger.info(message)
def warning(self, message):
self.logger.warning(message)
def error(self, message):
self.logger.error(message)
def critical(self, message):
self.logger.critical(message)
def get_logger(name="default", default_level=logging.INFO, default_file_level=logging.INFO, log_path=""):
setup_default_logging(
default_level=default_level, default_file_level=default_file_level, log_path=log_path
)
return logging.getLogger(name)
logger = get_logger()
class ArgParser(object):
def __init__(self, load_json=None, save_json=None):
super().__init__()
self.load_json = load_json
self.save_json = save_json
self.args = None
self.parser = argparse.ArgumentParser("Argument Parser")
def add_arg(self, *args, **keywords):
self.parser.add_argument(*args, **keywords)
def parse_args(self):
if self.load_json is not None:
assert os.path.exists(self.load_json), logging.error(
f"Configuration JSON {self.load_json} not found"
)
json = read_json(self.load_json)
t_args = argparse.Namespace()
t_args.__dict__.update(json)
self.args = self.parser.parse_args(args=[], namespace=t_args)
else:
self.args = self.parser.parse_args()
return self.args
def print_args(self):
# Print arguments to std out
# and save argument values to yaml file
print("Arguments:")
for p in vars(self.args).items():
print(f"\t{p[0]:30}{str(p[1]):20}")
print("\n")
def dump_args(self, json_file=None):
if json_file is None:
if self.save_json is None:
logging.error("Skip dump configuration JSON. Please specify json_file")
return False
else:
ensure_dir(os.path.dirname(self.save_json))
logging.warning(f"Dump to the initialized JSON file {self.save_json}")
write_json(vars(self.args), self.save_json)
else:
ensure_dir(os.path.dirname(json_file))
logging.info(f"Dump to JSON file {json_file}")
write_json(vars(self.args), json_file)
# with open(self.file, 'w') as f:
# yaml.dump(vars(self.args), f, default_flow_style=False)
# print(f"[I] Arguments dumped to {file}")
def disable_tf_warning():
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
import tensorflow as tf
if hasattr(tf, "contrib") and type(tf.contrib) != type(tf):
tf.contrib._warning = None
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
# tf.logging.set_verbosity(tf.logging.ERROR)
import logging
logging.getLogger("tensorflow").setLevel(logging.ERROR)
class Meter(object):
"""Base class for Meters."""
def __init__(self):
pass
def state_dict(self):
return {}
def load_state_dict(self, state_dict):
pass
def reset(self):
raise NotImplementedError
@property
def smoothed_value(self) -> float:
"""Smoothed value used for logging."""
raise NotImplementedError
def safe_round(number, ndigits):
if hasattr(number, "__round__"):
return round(number, ndigits)
elif torch is not None and torch.is_tensor(number) and number.numel() == 1:
return safe_round(number.item(), ndigits)
elif np is not None and np.ndim(number) == 0 and hasattr(number, "item"):
return safe_round(number.item(), ndigits)
else:
return number
def type_as(a, b):
if torch.is_tensor(a) and torch.is_tensor(b):
return a.to(b)
else:
return a
class AverageMeter(Meter):
"""Computes and stores the average and current value"""
def __init__(self, name: str, fmt: str = ":f", round: Optional[int] = None) -> None:
self.name = name
self.fmt = fmt
self.round = round
self.reset()
def reset(self):
self.val = None # most recent update
self.sum = 0 # sum from all updates
self.count = 0 # total n from all updates
self.avg = 0
def update(self, val, n=1):
if val is not None:
self.val = val
if n > 0:
self.sum = type_as(self.sum, val) + (val * n)
self.count = type_as(self.count, n) + n
self.avg = self.sum / self.count if self.count > 0 else self.val
def state_dict(self):
return {
"val": self.val,
"sum": self.sum,
"count": self.count,
"round": self.round,
}
def load_state_dict(self, state_dict):
self.val = state_dict["val"]
self.sum = state_dict["sum"]
self.count = state_dict["count"]
self.round = state_dict.get("round", None)
@property
def smoothed_value(self) -> float:
val = self.avg
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
def __str__(self) -> str:
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
| [
"logging.getLogger",
"logging.StreamHandler",
"argparse.Namespace",
"logging.error",
"logging.info",
"os.path.exists",
"numpy.mean",
"argparse.ArgumentParser",
"pathlib.Path",
"tensorflow.compat.v1.logging.set_verbosity",
"numpy.ndim",
"functools.wraps",
"numpy.max",
"logging.root.setLevel... | [((752, 765), 'pathlib.Path', 'Path', (['dirname'], {}), '(dirname)\n', (756, 765), False, 'from pathlib import Path\n'), ((1272, 1283), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (1277, 1283), False, 'from functools import wraps, partial\n'), ((4770, 4793), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (4791, 4793), False, 'import logging\n'), ((4850, 4890), 'logging.root.addHandler', 'logging.root.addHandler', (['console_handler'], {}), '(console_handler)\n', (4873, 4890), False, 'import logging\n'), ((4895, 4931), 'logging.root.setLevel', 'logging.root.setLevel', (['default_level'], {}), '(default_level)\n', (4916, 4931), False, 'import logging\n'), ((7163, 7186), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (7180, 7186), False, 'import logging\n'), ((9235, 9292), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (9258, 9292), False, 'import warnings\n'), ((9297, 9359), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'DeprecationWarning'}), "('ignore', category=DeprecationWarning)\n", (9320, 9359), False, 'import warnings\n'), ((9493, 9555), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (9527, 9555), True, 'import tensorflow as tf\n'), ((927, 969), 'json.load', 'json.load', (['handle'], {'object_hook': 'OrderedDict'}), '(handle, object_hook=OrderedDict)\n', (936, 969), False, 'import json\n'), ((1050, 1103), 'json.dump', 'json.dump', (['content', 'handle'], {'indent': '(4)', 'sort_keys': '(False)'}), '(content, handle, indent=4, sort_keys=False)\n', (1059, 1103), False, 'import json\n'), ((1236, 1265), 'functools.partial', 'partial', (['profile'], {'timer': 'timer'}), '(profile, timer=timer)\n', (1243, 1265), False, 'from functools import wraps, partial\n'), ((2095, 2109), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2107, 2109), False, 'from datetime import datetime\n'), ((2146, 2160), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2158, 2160), False, 'from datetime import datetime\n'), ((2305, 2319), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2317, 2319), False, 'from datetime import datetime\n'), ((2384, 2395), 'time.time', 'time.time', ([], {}), '()\n', (2393, 2395), False, 'import time\n'), ((2467, 2478), 'time.time', 'time.time', ([], {}), '()\n', (2476, 2478), False, 'import time\n'), ((2769, 2808), 'torch.cuda.reset_max_memory_allocated', 'torch.cuda.reset_max_memory_allocated', ([], {}), '()\n', (2806, 2808), False, 'import torch\n'), ((3612, 3633), 'numpy.get_printoptions', 'np.get_printoptions', ([], {}), '()\n', (3631, 3633), True, 'import numpy as np\n'), ((3642, 3673), 'numpy.set_printoptions', 'np.set_printoptions', ([], {}), '(**self.opt)\n', (3661, 3673), True, 'import numpy as np\n'), ((3731, 3763), 'numpy.set_printoptions', 'np.set_printoptions', ([], {}), '(**self._opt)\n', (3750, 3763), True, 'import numpy as np\n'), ((4578, 4604), 'logging.Formatter', 'logging.Formatter', (['log_fmt'], {}), '(log_fmt)\n', (4595, 4604), False, 'import logging\n'), ((4972, 5061), 'logging.handlers.RotatingFileHandler', 'logging.handlers.RotatingFileHandler', (['log_path'], {'maxBytes': '(1024 ** 2 * 2)', 'backupCount': '(3)'}), '(log_path, maxBytes=1024 ** 2 * 2,\n backupCount=3)\n', (5008, 5061), False, 'import logging\n'), ((5085, 5183), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s"""'], {}), "(\n '%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')\n", (5102, 5183), False, 'import logging\n'), ((5309, 5346), 'logging.root.addHandler', 'logging.root.addHandler', (['file_handler'], {}), '(file_handler)\n', (5332, 5346), False, 'import logging\n'), ((5806, 5836), 'logging.getLogger', 'logging.getLogger', (['"""my_logger"""'], {}), "('my_logger')\n", (5823, 5836), False, 'import logging\n'), ((7438, 7480), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Argument Parser"""'], {}), "('Argument Parser')\n", (7461, 7480), False, 'import argparse\n'), ((10501, 10519), 'torch.is_tensor', 'torch.is_tensor', (['a'], {}), '(a)\n', (10516, 10519), False, 'import torch\n'), ((10524, 10542), 'torch.is_tensor', 'torch.is_tensor', (['b'], {}), '(b)\n', (10539, 10542), False, 'import torch\n'), ((1357, 1368), 'time.time', 'time.time', ([], {}), '()\n', (1366, 1368), False, 'import time\n'), ((1428, 1439), 'time.time', 'time.time', ([], {}), '()\n', (1437, 1439), False, 'import time\n'), ((2730, 2759), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', ([], {}), '()\n', (2757, 2759), False, 'import torch\n'), ((2974, 3003), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', ([], {}), '()\n', (3001, 3003), False, 'import torch\n'), ((3036, 3069), 'torch.cuda.max_memory_allocated', 'torch.cuda.max_memory_allocated', ([], {}), '()\n', (3067, 3069), False, 'import torch\n'), ((6170, 6193), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (6191, 6193), False, 'import logging\n'), ((6393, 6436), 'logging.FileHandler', 'logging.FileHandler', (['self.logfile'], {'mode': '"""w"""'}), "(self.logfile, mode='w')\n", (6412, 6436), False, 'import logging\n'), ((7661, 7691), 'os.path.exists', 'os.path.exists', (['self.load_json'], {}), '(self.load_json)\n', (7675, 7691), False, 'import os\n'), ((7693, 7756), 'logging.error', 'logging.error', (['f"""Configuration JSON {self.load_json} not found"""'], {}), "(f'Configuration JSON {self.load_json} not found')\n", (7706, 7756), False, 'import logging\n'), ((7853, 7873), 'argparse.Namespace', 'argparse.Namespace', ([], {}), '()\n', (7871, 7873), False, 'import argparse\n'), ((8857, 8903), 'logging.info', 'logging.info', (['f"""Dump to JSON file {json_file}"""'], {}), "(f'Dump to JSON file {json_file}')\n", (8869, 8903), False, 'import logging\n'), ((9630, 9661), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (9647, 9661), False, 'import logging\n'), ((10214, 10237), 'torch.is_tensor', 'torch.is_tensor', (['number'], {}), '(number)\n', (10229, 10237), False, 'import torch\n'), ((8454, 8525), 'logging.error', 'logging.error', (['"""Skip dump configuration JSON. Please specify json_file"""'], {}), "('Skip dump configuration JSON. Please specify json_file')\n", (8467, 8525), False, 'import logging\n'), ((8649, 8719), 'logging.warning', 'logging.warning', (['f"""Dump to the initialized JSON file {self.save_json}"""'], {}), "(f'Dump to the initialized JSON file {self.save_json}')\n", (8664, 8719), False, 'import logging\n'), ((8817, 8843), 'os.path.dirname', 'os.path.dirname', (['json_file'], {}), '(json_file)\n', (8832, 8843), False, 'import os\n'), ((8600, 8631), 'os.path.dirname', 'os.path.dirname', (['self.save_json'], {}), '(self.save_json)\n', (8615, 8631), False, 'import os\n'), ((10341, 10356), 'numpy.ndim', 'np.ndim', (['number'], {}), '(number)\n', (10348, 10356), True, 'import numpy as np\n'), ((1929, 1938), 'numpy.min', 'np.min', (['x'], {}), '(x)\n', (1935, 1938), True, 'import numpy as np\n'), ((1952, 1961), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (1958, 1961), True, 'import numpy as np\n'), ((1976, 1986), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (1983, 1986), True, 'import numpy as np\n'), ((2000, 2009), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (2006, 2009), True, 'import numpy as np\n')] |
import os
import glob
import logging
import numpy as np
import skimage.io
import skimage.transform
import skimage.color
from joblib import Parallel, delayed
from pprint import pformat
from utils import CONFIG
config = CONFIG.DatasetLoader
log = logging.getLogger('DatasetLoader')
log.setLevel(config.log.level)
class DatasetLoader(object):
"""docstring for DatasetLoader"""
def __init__(self, folder_name, ext='jpg', size=224, batch_size=None):
self.folder_name = folder_name
self.input_size = size
self._par = Parallel(n_jobs=config.par_jobs)
filelist = "{}_list.txt".format(os.path.abspath(folder_name))
if os.path.exists(filelist):
with open(filelist, 'r') as fp:
self._fnames = [f.strip() for f in fp.readlines()]
else:
self._fnames = glob.glob(os.path.join(folder_name, '*.%s' % ext))
with open(filelist, 'w') as fp:
fp.write("\n".join(self._fnames))
self.num_samples = len(self._fnames)
self.curr_img_idx = 0
self._batch_size = batch_size
self.passes = 0
log.info("Number of images found: %d" % self.num_samples)
def get_batch(self, batch_size=None, warp=True):
batch_size = self._batch_size if batch_size is None else batch_size
assert batch_size is not None
if self.curr_img_idx + batch_size < self.num_samples:
names = self._fnames[
self.curr_img_idx: self.curr_img_idx + batch_size]
assert len(names) == batch_size
self.curr_img_idx += batch_size
else:
remains = self.num_samples - self.curr_img_idx
warp_count = 0
if warp:
warp_count = batch_size - remains
names = self._fnames[-remains:] + self._fnames[:warp_count]
assert len(names) == batch_size
else:
names = self._fnames[-remains:]
log.debug("num of files {}".format(len(names)))
self.curr_img_idx = warp_count
self.passes += 1
X = self._par(delayed(load_image)(path, self.input_size)
for path in names)
X = np.asarray(X, dtype=np.float32)
# X = np.transpose(X, (1, 2, 3, 0))
log.debug("size of batch fed {}".format(X.shape))
return X
def load_image(path, size=224):
img = skimage.io.imread(path)
short_edge = min(img.shape[:2])
yy = int((img.shape[0] - short_edge) / 2)
xx = int((img.shape[1] - short_edge) / 2)
crop_img = img[yy:yy + short_edge, xx:xx + short_edge]
resized_img = skimage.transform.resize(crop_img, (size, size))
if resized_img.ndim == 2:
resized_img = skimage.color.gray2rgb(resized_img)
assert resized_img.ndim == 3, ("RGB image expected")
return resized_img
| [
"logging.getLogger",
"os.path.exists",
"numpy.asarray",
"os.path.join",
"joblib.Parallel",
"os.path.abspath",
"joblib.delayed"
] | [((246, 280), 'logging.getLogger', 'logging.getLogger', (['"""DatasetLoader"""'], {}), "('DatasetLoader')\n", (263, 280), False, 'import logging\n'), ((547, 579), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'config.par_jobs'}), '(n_jobs=config.par_jobs)\n', (555, 579), False, 'from joblib import Parallel, delayed\n'), ((661, 685), 'os.path.exists', 'os.path.exists', (['filelist'], {}), '(filelist)\n', (675, 685), False, 'import os\n'), ((2221, 2252), 'numpy.asarray', 'np.asarray', (['X'], {'dtype': 'np.float32'}), '(X, dtype=np.float32)\n', (2231, 2252), True, 'import numpy as np\n'), ((620, 648), 'os.path.abspath', 'os.path.abspath', (['folder_name'], {}), '(folder_name)\n', (635, 648), False, 'import os\n'), ((849, 888), 'os.path.join', 'os.path.join', (['folder_name', "('*.%s' % ext)"], {}), "(folder_name, '*.%s' % ext)\n", (861, 888), False, 'import os\n'), ((2125, 2144), 'joblib.delayed', 'delayed', (['load_image'], {}), '(load_image)\n', (2132, 2144), False, 'from joblib import Parallel, delayed\n')] |
import os
import time
import numpy as np
import pandas as pd
from oplrareg.solvers import get_solver_definition
from modSAR.dataset import QSARDatasetIO
from modSAR.graph import GraphUtils
from copy import deepcopy
from sklearn.externals.joblib import Parallel, delayed
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import PredefinedSplit, ParameterGrid, ParameterSampler
class DataSplit:
def __init__(self, qsar_dataset, filename, n_splits=100):
self.n_splits = n_splits
self.qsar_dataset = qsar_dataset
excelFile = pd.ExcelFile(filename)
self.sheets = {sheetName: excelFile.parse(sheetName) for sheetName in excelFile.sheet_names}
# Get ID of samples:
def get_id_internal_samples(self, split_number):
return self.sheets["split%d_internal_samples" % split_number]["ID"]
def get_id_external_samples(self, split_number):
return self.sheets["split%d_external_samples" % split_number]["ID"]
def get_id_internal_ts_samples(self, split_number, fold_number):
folds = self.sheets["split%d_folds" % split_number]
return folds.loc[folds["fold"] == fold_number, "ID"]
def get_id_internal_tr_samples(self, split_number, fold_number):
internal_samples = self.get_internal_samples(split_number)
ts_samples = self.get_id_internal_ts_samples(split_number, fold_number)
return internal_samples[~internal_samples.index.isin(ts_samples)].index
# Subset according to predefined split and fold number:
def get_internal_samples(self, split_number):
return self.qsar_dataset.X.loc[self.get_id_internal_samples(split_number)]
def get_external_samples(self, split_number):
return self.qsar_dataset.X.loc[self.get_id_external_samples(split_number)]
def get_test_samples_in_fold(self, split_number, fold_number):
return self.qsar_dataset.X.loc[self.get_id_internal_ts_samples(split_number, fold_number)]
def get_train_samples_in_fold(self, split_number, fold_number):
return self.qsar_dataset.X.loc[self.get_id_internal_tr_samples(split_number, fold_number)]
def get_internal_Y(self, split_number):
return self.qsar_dataset.y.loc[self.get_id_internal_samples(split_number)]
def get_external_Y(self, split_number):
return self.qsar_dataset.y.loc[self.get_id_external_samples(split_number)]
def get_test_Y_in_fold(self, split_number, fold_number):
return self.qsar_dataset.y.loc[self.get_id_internal_ts_samples(split_number, fold_number)]
def get_train_Y_in_fold(self, split_number, fold_number):
return self.qsar_dataset.y.loc[self.get_id_internal_tr_samples(split_number, fold_number)]
# Similarity matrix subset:
def get_sim_matrix_tr_samples(self, split_number, fold_number):
internal_tr_samples = self.get_id_internal_tr_samples(split_number, fold_number)
return self.qsar_dataset.pairwise_similarity.loc[internal_tr_samples.values, internal_tr_samples.values]
def get_sim_matrix_ts_samples(self, split_number, fold_number):
internal_ts_samples = self.get_id_internal_ts_samples(split_number, fold_number)
return self.qsar_dataset.pairwise_similarity.loc[internal_ts_samples.values, internal_ts_samples.values]
def get_sim_matrix_internal_samples(self, split_number):
internal_samples = self.get_id_internal_samples(split_number)
return self.qsar_dataset.pairwise_similarity.loc[internal_samples.values, internal_samples.values]
def get_sim_matrix_external_samples(self, split_number):
external_samples = self.get_id_external_samples(split_number)
return self.qsar_dataset.pairwise_similarity.loc[external_samples.values, external_samples.values]
class QSARValidation:
""" Performs QSAR validation workflow
"""
def __init__(self, estimator, data_split, split_number, is_random_search=False):
self.estimator = estimator
self.is_random_search = is_random_search
self.data_split = data_split
self.split_number = split_number
self.n_splits = data_split.n_splits
self.dataset_name = data_split.qsar_dataset.name
self.dataset_version = 'default'
def predefined_cross_validation(self, param_grid, fit_params, folds=None, n_jobs=-1):
""" Run cross validation in parallel with grid search or random search """
if self.is_random_search:
# If it is random search, creates 6 random combinations of
# the parameters grid/distribution for each fold
paramGrid = ParameterSampler(param_grid, 6)
else:
# Regular GridSearch, obtains a combination of all possible parameters
paramGrid = ParameterGrid(param_grid)
print(self.estimator)
# Find optimal threshold
if self.estimator.algorithm_name == 'modSAR':
internal_samples_sim = self.data_split.get_sim_matrix_internal_samples(self.split_number)
_, threshold = GraphUtils.find_optimal_threshold(internal_samples_sim)
fit_params['threshold'] = threshold
""" Creats parallel tasks for the cross-validation.
This is the same function used in the source code of GridSearchCV in sklearn.
Parallel function will take care of all for loops defined here and will correctly
allocate more computational resources when each for loop complete.
Each for loop runs the function _fit_and_score defined above """
cross_validation_results = \
Parallel(n_jobs=n_jobs, verbose=True, pre_dispatch='n_jobs') \
(delayed(self._fit_and_score)(deepcopy(self.estimator), fold, params, fit_params)
for fold in range(1, self.n_splits + 1) if folds is None or (folds is not None and fold in folds)
for params in paramGrid)
# After cross-validation, gather results and picks best model
(results, cv_models) = zip(*cross_validation_results)
results = pd.concat(results, ignore_index=True)
bestFold = results["test_mae"].idxmin()
# Shows parameters of the best fold
print("Metrics for best model in cross-validation:")
print(results.iloc[bestFold])
best_model = cv_models[bestFold]
# External Validation
external_X = self.data_split.get_external_samples(self.split_number)
external_y = self.data_split.get_external_Y(self.split_number)
if self.estimator.algorithm_name == "modSAR":
id_external_samples = self.data_split.get_id_external_samples(self.split_number)
externalX_smiles = self.data_split.qsar_dataset.X_smiles.loc[id_external_samples]
pred = best_model.predict(external_X, externalX_smiles)
else:
pred = best_model.predict(external_X)
mae_external = mean_absolute_error(external_y, pred)
rmse_external = mean_squared_error(external_y, pred) ** 0.5
if best_model.algorithm_name in ["OplraRegularised", "OplraFeatureSelection"]:
external_results = pd.DataFrame({'splitStrategy': 1, 'splitNumber': self.split_number,
'dataset': self.dataset_name, 'datasetVersion': self.dataset_version,
'fold': results.iloc[bestFold]["fold"], 'algorithm': best_model.algorithm_name,
'algorithm_version': best_model.algorithm_version, 'internal': 'FALSE',
'train_mae': 'NA', 'test_mae': mae_external,
'train_rmse': 'NA', 'test_rmse': rmse_external, 'fit_time': 'NA',
'beta': results.iloc[bestFold]['beta'],
'lambda': results.iloc[bestFold]['lambda'],
'no_regions': results.iloc[bestFold]['no_regions'],
'no_features': results.iloc[bestFold]['no_features']},
index=np.arange(1))
elif best_model.algorithm_name in ["OplraEnsemble"]:
external_results = pd.DataFrame({'splitStrategy': 1, 'splitNumber': self.split_number,
'dataset': self.dataset_name, 'datasetVersion': self.dataset_version,
'fold': results.iloc[bestFold]["fold"], 'algorithm': best_model.algorithm_name,
'algorithm_version': best_model.algorithm_version, 'internal': 'FALSE',
'train_mae': 'NA', 'test_mae': mae_external,
'train_rmse': 'NA', 'test_rmse': rmse_external, 'fit_time': 'NA',
'beta': results.iloc[bestFold]['beta'],
'lambda': results.iloc[bestFold]['lambda'],
'no_repeats': results.iloc[bestFold]['no_repeats'],
'resampling': results.iloc[bestFold]['resampling'],
'avg_no_regions': results.iloc[bestFold]['avg_no_regions'],
'no_features': results.iloc[bestFold]['no_features']},
index=np.arange(1))
elif best_model.algorithm_name in ["modSAR"]:
external_results = pd.DataFrame({'splitStrategy': 1, 'splitNumber': self.split_number,
'dataset': self.dataset_name, 'datasetVersion': self.dataset_version,
'fold': results.iloc[bestFold]["fold"], 'algorithm': best_model.algorithm_name,
'algorithm_version': best_model.algorithm_version, 'internal': 'FALSE',
'no_modules': results.iloc[bestFold]['no_modules'],
'no_classes': results.iloc[bestFold]['no_classes'],
'threshold': results.iloc[bestFold]['threshold'],
'train_mae': 'NA', 'test_mae': mae_external,
'train_rmse': 'NA', 'test_rmse': rmse_external, 'fit_time': 'NA',
'beta': results.iloc[bestFold]['beta'],
'lambda': results.iloc[bestFold]['lambda']},
index=np.arange(1))
else:
external_results = pd.DataFrame({'splitStrategy': 1, 'splitNumber': self.split_number,
'dataset': self.dataset_name, 'datasetVersion': self.dataset_version,
'fold': results.iloc[bestFold]["fold"], 'algorithm': best_model.algorithm_name,
'algorithm_version': best_model.algorithm_version, 'internal': 'FALSE',
'no_modules': None,
'no_classes': None,
'threshold': None,
'train_mae': 'NA', 'test_mae': mae_external,
'train_rmse': 'NA', 'test_rmse': rmse_external, 'fit_time': 'NA',
'beta': None,
'lambda': None},
index=np.arange(1))
results = pd.concat([results, external_results], ignore_index=True)
return results, best_model
def _fit_and_score(self, estimator, fold, params, fit_params):
"""A iteration of cross-validation with algorithm <estimator>, fold number <fold> and samples in
training/testing defined in <cvIter>, run with parameters in <params>
:param estimator:
:param fold:
:param params:
:return:
"""
print("Iteration: %d/%d" % (fold, self.n_splits))
# Sets parameters for the algorithms, as defined by the grid search
estimator.set_params(**params)
# Runs the algorithm in the predefined split of this Cross-Validation iteration
trainX = self.data_split.get_train_samples_in_fold(self.split_number, fold)
trainY = self.data_split.get_train_Y_in_fold(self.split_number, fold)
testX = self.data_split.get_test_samples_in_fold(self.split_number, fold)
testY = self.data_split.get_test_Y_in_fold(self.split_number, fold)
start = time.time()
if estimator.algorithm_name == "modSAR":
estimator.solver_def = get_solver_definition(estimator.solver_name) # CPLEX or GLPK
# Obtain smiles codes for samples in training
internal_tr_samples = self.data_split.get_id_internal_tr_samples(self.split_number, fold)
trainX_smiles = self.data_split.qsar_dataset.X_smiles.loc[internal_tr_samples]
sim_matrix = self.data_split.qsar_dataset.pairwise_similarity.loc[internal_tr_samples, internal_tr_samples]
print(self.data_split.qsar_dataset)
print(trainX.shape)
estimator.fit(trainX, trainY, sim_matrix, trainX_smiles,
threshold=fit_params['threshold'],
k=fit_params['k'])
elif estimator.algorithm_name == "OplraRegularised":
trainY = trainY['pchembl_value'] # Get series
estimator.solver_def = get_solver_definition(estimator.solver_name)
if self.estimator.algorithm_version == "v1_1":
estimator.fit(trainX, trainY, f_star=fit_params['fStar'])
else:
estimator.fit(trainX, trainY)
else:
estimator.fit(trainX, trainY)
end = time.time()
if estimator.algorithm_name == "modSAR":
train_predicted = estimator.predict(trainX, trainX_smiles)
else:
train_predicted = estimator.predict(trainX)
trainMAE = mean_absolute_error(trainY, train_predicted)
trainRMSE = mean_squared_error(trainY, train_predicted) ** 0.5
if estimator.algorithm_name == "modSAR":
internal_ts_samples = self.data_split.get_id_internal_ts_samples(self.split_number, fold)
testX_smiles = self.data_split.qsar_dataset.X_smiles.loc[internal_ts_samples]
test_predicted = estimator.predict(testX, testX_smiles)
else:
test_predicted = estimator.predict(testX)
testMAE = mean_absolute_error(testY, test_predicted)
testRMSE = mean_squared_error(testY, test_predicted) ** 0.5
result = self.get_results_df(estimator, fold, start, end, trainMAE, testMAE, trainRMSE, testRMSE, params)
return result, estimator
def get_results_df(self, estimator, fold, start, end, trainMAE, testMAE, trainRMSE, testRMSE, params):
if estimator.algorithm_name in ["OplraRegularised", "OplraFeatureSelection"]:
return pd.DataFrame({'splitStrategy': 1, 'splitNumber': self.split_number,
'dataset': self.dataset_name, 'datasetVersion': self.dataset_version,
'fold': 'fold%d' % (fold + 1), 'algorithm': self.estimator.algorithm_name,
'algorithm_version': self.estimator.algorithm_version, 'internal': 'TRUE',
'train_mae': trainMAE, 'test_mae': testMAE,
'train_rmse': trainRMSE, 'test_rmse': testRMSE,
'fit_time': (end - start), 'beta': params['beta'], 'lambda': params['lam'],
'no_regions': estimator.final_model.number_regions,
'no_features': len(estimator.final_model.get_selected_features())},
index=np.arange(1))
elif estimator.algorithm_name in ["OplraEnsemble"]:
return pd.DataFrame({'splitStrategy': 1, 'splitNumber': self.split_number,
'dataset': self.dataset_name, 'datasetVersion': self.dataset_version,
'fold': 'fold%d' % (fold + 1), 'algorithm': self.estimator.algorithm_name,
'algorithm_version': self.estimator.algorithm_version, 'internal': 'TRUE',
'train_mae': trainMAE, 'test_mae': testMAE,
'train_rmse': trainRMSE, 'test_rmse': testRMSE,
'fit_time': (end - start), 'beta': params['beta'], 'lambda': params['lam'],
'no_repeats': params['noRepeats'], 'resampling': params['resampling'],
'avg_no_regions': estimator.avg_number_regions(),
'no_features': len(estimator.get_selected_features())},
index=np.arange(1))
elif estimator.algorithm_name in ["modSAR"]:
return pd.DataFrame({'splitStrategy': 1, 'splitNumber': self.split_number,
'dataset': self.dataset_name, 'datasetVersion': self.dataset_version,
'fold': fold, 'algorithm': self.estimator.algorithm_name,
'algorithm_version': self.estimator.algorithm_version, 'internal': 'TRUE',
'no_modules': estimator.number_modules, 'no_classes': estimator.number_classes,
'threshold': estimator.threshold,
'train_mae': trainMAE, 'test_mae': testMAE,
'train_rmse': trainRMSE, 'test_rmse': testRMSE,
'fit_time': (end - start), 'beta': params['beta'], 'lambda': params['lam']},
index=np.arange(1))
else:
return pd.DataFrame({'splitStrategy': 1, 'splitNumber': self.split_number,
'dataset': self.dataset_name, 'datasetVersion': self.dataset_version,
'fold': 'fold%d' % (fold + 1), 'algorithm': self.estimator.algorithm_name,
'algorithmVersion': self.estimator.algorithm_version, 'internal': 'TRUE',
'train_mae': trainMAE, 'test_mae': testMAE, 'train_rmse': trainRMSE, 'test_rmse': testRMSE,
'fit_time': (end - start), 'params': str(params)},
index=np.arange(1))
| [
"sklearn.model_selection.ParameterGrid",
"copy.deepcopy",
"sklearn.externals.joblib.delayed",
"sklearn.model_selection.ParameterSampler",
"modSAR.graph.GraphUtils.find_optimal_threshold",
"sklearn.metrics.mean_squared_error",
"pandas.ExcelFile",
"sklearn.externals.joblib.Parallel",
"oplrareg.solvers... | [((601, 623), 'pandas.ExcelFile', 'pd.ExcelFile', (['filename'], {}), '(filename)\n', (613, 623), True, 'import pandas as pd\n'), ((6046, 6083), 'pandas.concat', 'pd.concat', (['results'], {'ignore_index': '(True)'}), '(results, ignore_index=True)\n', (6055, 6083), True, 'import pandas as pd\n'), ((6895, 6932), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['external_y', 'pred'], {}), '(external_y, pred)\n', (6914, 6932), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error\n'), ((11844, 11901), 'pandas.concat', 'pd.concat', (['[results, external_results]'], {'ignore_index': '(True)'}), '([results, external_results], ignore_index=True)\n', (11853, 11901), True, 'import pandas as pd\n'), ((12889, 12900), 'time.time', 'time.time', ([], {}), '()\n', (12898, 12900), False, 'import time\n'), ((14144, 14155), 'time.time', 'time.time', ([], {}), '()\n', (14153, 14155), False, 'import time\n'), ((14367, 14411), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['trainY', 'train_predicted'], {}), '(trainY, train_predicted)\n', (14386, 14411), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error\n'), ((14880, 14922), 'sklearn.metrics.mean_absolute_error', 'mean_absolute_error', (['testY', 'test_predicted'], {}), '(testY, test_predicted)\n', (14899, 14922), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error\n'), ((4623, 4654), 'sklearn.model_selection.ParameterSampler', 'ParameterSampler', (['param_grid', '(6)'], {}), '(param_grid, 6)\n', (4639, 4654), False, 'from sklearn.model_selection import PredefinedSplit, ParameterGrid, ParameterSampler\n'), ((4776, 4801), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['param_grid'], {}), '(param_grid)\n', (4789, 4801), False, 'from sklearn.model_selection import PredefinedSplit, ParameterGrid, ParameterSampler\n'), ((5049, 5104), 'modSAR.graph.GraphUtils.find_optimal_threshold', 'GraphUtils.find_optimal_threshold', (['internal_samples_sim'], {}), '(internal_samples_sim)\n', (5082, 5104), False, 'from modSAR.graph import GraphUtils\n'), ((5589, 5649), 'sklearn.externals.joblib.Parallel', 'Parallel', ([], {'n_jobs': 'n_jobs', 'verbose': '(True)', 'pre_dispatch': '"""n_jobs"""'}), "(n_jobs=n_jobs, verbose=True, pre_dispatch='n_jobs')\n", (5597, 5649), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((6957, 6993), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['external_y', 'pred'], {}), '(external_y, pred)\n', (6975, 6993), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error\n'), ((12985, 13029), 'oplrareg.solvers.get_solver_definition', 'get_solver_definition', (['estimator.solver_name'], {}), '(estimator.solver_name)\n', (13006, 13029), False, 'from oplrareg.solvers import get_solver_definition\n'), ((14432, 14475), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['trainY', 'train_predicted'], {}), '(trainY, train_predicted)\n', (14450, 14475), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error\n'), ((14942, 14983), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['testY', 'test_predicted'], {}), '(testY, test_predicted)\n', (14960, 14983), False, 'from sklearn.metrics import mean_absolute_error, mean_squared_error\n'), ((13831, 13875), 'oplrareg.solvers.get_solver_definition', 'get_solver_definition', (['estimator.solver_name'], {}), '(estimator.solver_name)\n', (13852, 13875), False, 'from oplrareg.solvers import get_solver_definition\n'), ((5665, 5693), 'sklearn.externals.joblib.delayed', 'delayed', (['self._fit_and_score'], {}), '(self._fit_and_score)\n', (5672, 5693), False, 'from sklearn.externals.joblib import Parallel, delayed\n'), ((5694, 5718), 'copy.deepcopy', 'deepcopy', (['self.estimator'], {}), '(self.estimator)\n', (5702, 5718), False, 'from copy import deepcopy\n'), ((8167, 8179), 'numpy.arange', 'np.arange', (['(1)'], {}), '(1)\n', (8176, 8179), True, 'import numpy as np\n'), ((16231, 16243), 'numpy.arange', 'np.arange', (['(1)'], {}), '(1)\n', (16240, 16243), True, 'import numpy as np\n'), ((9522, 9534), 'numpy.arange', 'np.arange', (['(1)'], {}), '(1)\n', (9531, 9534), True, 'import numpy as np\n'), ((17292, 17304), 'numpy.arange', 'np.arange', (['(1)'], {}), '(1)\n', (17301, 17304), True, 'import numpy as np\n'), ((10761, 10773), 'numpy.arange', 'np.arange', (['(1)'], {}), '(1)\n', (10770, 10773), True, 'import numpy as np\n'), ((11811, 11823), 'numpy.arange', 'np.arange', (['(1)'], {}), '(1)\n', (11820, 11823), True, 'import numpy as np\n'), ((18234, 18246), 'numpy.arange', 'np.arange', (['(1)'], {}), '(1)\n', (18243, 18246), True, 'import numpy as np\n'), ((18914, 18926), 'numpy.arange', 'np.arange', (['(1)'], {}), '(1)\n', (18923, 18926), True, 'import numpy as np\n')] |
from matplotlib import pyplot as plt
import pandas as pd
import seaborn as sns
import numpy as np
def heatmap(x, y,
freq_labels = 1,
show_grid = True,
invert_yaxis = False,
**kwargs,
):
color = kwargs.get('color',
[1]*len(x),
)
color_min, color_max = kwargs.get('color_range',
(min(color), max(color)),
)
size = kwargs.get('size',
[1]*len(x),
)
size_min, size_max = kwargs.get('size_range',
(min(size), max(size)),
)[:2]
size_scale = kwargs.get('size_scale', 500)
marker = kwargs.get('marker',
's',
)
if 'palette' in kwargs:
palette = kwargs['palette']
n_colors = len(palette)
else:
n_colors = 256 # Use 256 colors for the diverging color palette
palette = sns.color_palette("Blues", n_colors)
def value_to_color(val):
if color_min == color_max:
return palette[-1]
else:
val_position = float((val - color_min)) / (color_max - color_min) # position of value in the input range, relative to the length of the input range
val_position = min(max(val_position, 0), 1) # bound the position betwen 0 and 1
ind = int(val_position * (n_colors - 1)) # target index in the color palette
return palette[ind]
def value_to_size(val):
if size_min == size_max:
return 1 * size_scale
else:
val_position = (val - size_min) * 0.99 / (size_max - size_min) + 0.01 # position of value in the input range, relative to the length of the input range
val_position = min(max(val_position,
0,
),
1,
) # bound the position betwen 0 and 1
return val_position * size_scale
if 'x_order' in kwargs:
x_names = [t
for t in kwargs['x_order']
]
else:
x_names = [t
for t in list(set([v
for v in x
]))
]
x_to_num = {p[1]:p[0]
for p in enumerate(x_names)
}
if 'y_order' in kwargs:
y_names = [t
for t in kwargs['y_order']
]
else:
y_names = [t
for t in list(set([v for v in y]))
]
y_to_num = {p[1]:p[0]
for p in enumerate(y_names)
}
plot_grid = plt.GridSpec(1,
15,
hspace = 0.2,
wspace = 0.1,
)
ax = plt.subplot(plot_grid[:,:-1]) # Use the left 14/15ths of the grid for the main plot
ax.invert_yaxis()
kwargs_pass_on = {k:v
for k,v in kwargs.items()
if k not in ['color',
'palette',
'color_range',
'size',
'size_range',
'size_scale',
'marker',
'x_order',
'y_order',
'invert_yaxis',
'show_grid',
]
}
ax.scatter(x = [x_to_num[v] for v in x],
y = [y_to_num[v] for v in y],
marker = marker,
s = [value_to_size(v) for v in size],
c = [value_to_color(v) for v in color],
**kwargs_pass_on
)
if freq_labels:
ax.set_xticks([v for k,v in x_to_num.items()][::freq_labels])
ax.set_xticklabels([k for k in x_to_num][::freq_labels],
rotation=90, horizontalalignment='right',
)
ax.set_yticks([v for k,v in y_to_num.items()][::freq_labels])
ax.set_yticklabels([k for k in y_to_num][::freq_labels],
#rotation=45, horizontalalignment='right',
)
if show_grid:
ax.grid(False, 'major')
ax.grid(True, 'minor')
else:
ax.grid(False, 'major')
ax.grid(False, 'minor')
ax.set_xticks([t + 0.5 for t in ax.get_xticks()], minor=True)
ax.xaxis.tick_top()
ax.set_yticks([t + 0.5 for t in ax.get_yticks()], minor=True)
ax.set_xlim([-0.5, max([v for v in x_to_num.values()]) + 0.5])
if invert_yaxis:
ax.set_ylim([-0.5, max([v for v in y_to_num.values()]) + 0.5][::-1])
else:
ax.set_ylim([-0.5, max([v for v in y_to_num.values()]) + 0.5])
ax.set_facecolor('#F1F1F1')
# Add color legend on the right side of the plot
if color_min < color_max:
ax = plt.subplot(plot_grid[:,-1]) # Use the rightmost column of the plot
col_x = [0]*len(palette) # Fixed x coordinate for the bars
bar_y=np.linspace(color_min, color_max, n_colors) # y coordinates for each of the n_colors bars
bar_height = bar_y[1] - bar_y[0]
ax.barh(y=bar_y,
width=[5]*len(palette), # Make bars 5 units wide
left=col_x, # Make bars start at 0
height=bar_height,
color=palette,
linewidth=0
)
ax.set_xlim(1, 2) # Bars are going from 0 to 5, so lets crop the plot somewhere in the middle
ax.grid(False) # Hide grid
ax.set_facecolor('white') # Make background white
ax.set_xticks([]) # Remove horizontal ticks
ax.set_yticks(np.linspace(min(bar_y), max(bar_y), 3)) # Show vertical ticks for min, middle and max
ax.yaxis.tick_right() # Show vertical ticks on the right
def corrplot(data,
size_scale = 500,
marker = 's',
):
corr = pd.melt(data.reset_index(),
id_vars = 'index',
)
corr.columns = ['x',
'y',
'value',
]
heatmap(corr['x'],
corr['y'],
color = corr['value'],
color_range = [-1, 1],
palette = sns.diverging_palette(20, 220, n=256),
size = corr['value'].abs(), size_range=[0,1],
marker = marker,
x_order = data.columns,
y_order = data.columns[::-1],
size_scale = size_scale,
)
| [
"seaborn.color_palette",
"seaborn.diverging_palette",
"matplotlib.pyplot.GridSpec",
"numpy.linspace",
"matplotlib.pyplot.subplot"
] | [((2880, 2923), 'matplotlib.pyplot.GridSpec', 'plt.GridSpec', (['(1)', '(15)'], {'hspace': '(0.2)', 'wspace': '(0.1)'}), '(1, 15, hspace=0.2, wspace=0.1)\n', (2892, 2923), True, 'from matplotlib import pyplot as plt\n'), ((3055, 3085), 'matplotlib.pyplot.subplot', 'plt.subplot', (['plot_grid[:, :-1]'], {}), '(plot_grid[:, :-1])\n', (3066, 3085), True, 'from matplotlib import pyplot as plt\n'), ((1083, 1119), 'seaborn.color_palette', 'sns.color_palette', (['"""Blues"""', 'n_colors'], {}), "('Blues', n_colors)\n", (1100, 1119), True, 'import seaborn as sns\n'), ((5291, 5320), 'matplotlib.pyplot.subplot', 'plt.subplot', (['plot_grid[:, -1]'], {}), '(plot_grid[:, -1])\n', (5302, 5320), True, 'from matplotlib import pyplot as plt\n'), ((5443, 5486), 'numpy.linspace', 'np.linspace', (['color_min', 'color_max', 'n_colors'], {}), '(color_min, color_max, n_colors)\n', (5454, 5486), True, 'import numpy as np\n'), ((6721, 6758), 'seaborn.diverging_palette', 'sns.diverging_palette', (['(20)', '(220)'], {'n': '(256)'}), '(20, 220, n=256)\n', (6742, 6758), True, 'import seaborn as sns\n')] |
# test_yuzu_naive_equality.py
# Author: <NAME> <<EMAIL>>
"""
Testing the yuzu ISM implementation is equivalent to the naive ISM
implementation using the built-in models. These are regression tests.
"""
import numpy
import torch
from nose.tools import assert_raises
from numpy.testing import assert_array_almost_equal
from yuzu import yuzu_ism
from yuzu import precompute
from yuzu.naive_ism import naive_ism
from yuzu.models import *
n_seqs = 2
seq_len = 150
idxs = numpy.random.RandomState(0).randn(n_seqs, 4, seq_len).argmax(axis=1)
X = numpy.zeros((n_seqs, 4, seq_len), dtype='float32')
for i in range(n_seqs):
X[i, idxs[i], numpy.arange(seq_len)] = 1
def evaluate_model(model, X, alpha=None):
alpha = alpha or 1.5
precomputation = precompute(model, seq_len=X.shape[2],
n_choices=X.shape[1], alpha=alpha)
yuzu_isms = yuzu_ism(model, X, precomputation)
naive_isms = naive_ism(model, X)
assert_array_almost_equal(naive_isms, yuzu_isms, 3)
def test_one_layer():
model = OneLayer(4, seq_len)
evaluate_model(model, X)
evaluate_model(model, X, alpha=10)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_toynet():
model = ToyNet(4, seq_len)
evaluate_model(model, X)
evaluate_model(model, X, alpha=10)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_deepsea():
model = DeepSEA(4, seq_len)
evaluate_model(model, X)
evaluate_model(model, X, alpha=10)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_basset():
model = Basset(4, seq_len)
evaluate_model(model, X)
evaluate_model(model, X, alpha=10)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_factorized_basset():
model = FactorizedBasset(4, seq_len)
evaluate_model(model, X)
evaluate_model(model, X, alpha=10)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_bpnet():
model = ToyNet(4, seq_len)
evaluate_model(model, X)
evaluate_model(model, X, alpha=10)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
###
def test_conv_relu():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.ReLU()
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.MaxPool1d(3)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_relu_mp():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.ReLU(),
torch.nn.MaxPool1d(3)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_conv():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.MaxPool1d(3),
torch.nn.Conv1d(8, 6, kernel_size=7, padding=3)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_batchnorm_mp_conv():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.BatchNorm1d(8),
torch.nn.MaxPool1d(3),
torch.nn.Conv1d(8, 6, kernel_size=7, padding=3)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_relu_mp_conv():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.ReLU(),
torch.nn.MaxPool1d(3),
torch.nn.Conv1d(8, 6, kernel_size=7, padding=3)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_relu_batchnorm_mp_conv():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(8),
torch.nn.MaxPool1d(3),
torch.nn.Conv1d(8, 6, kernel_size=7, padding=3)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_mp():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.MaxPool1d(3),
torch.nn.MaxPool1d(2)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_dense():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
Flatten(),
torch.nn.Linear(150*8, 5)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_relu_dense():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.ReLU(),
Flatten(),
torch.nn.Linear(150*8, 5)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_batchnorm_dense():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.BatchNorm1d(8),
Flatten(),
torch.nn.Linear(150*8, 5)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_relu_batchnorm_dense():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(8),
Flatten(),
torch.nn.Linear(150*8, 5)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_dense():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.MaxPool1d(2),
Flatten(),
torch.nn.Linear(75*8, 5)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_relu_dense():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.MaxPool1d(2),
torch.nn.ReLU(),
Flatten(),
torch.nn.Linear(75*8, 5)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_batchnorm_dense():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.MaxPool1d(2),
torch.nn.BatchNorm1d(8),
Flatten(),
torch.nn.Linear(75*8, 5)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_batchnorm_relu_dense():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.MaxPool1d(2),
torch.nn.BatchNorm1d(8),
torch.nn.ReLU(),
Flatten(),
torch.nn.Linear(75*8, 5)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_conv_dense():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.MaxPool1d(2),
torch.nn.Conv1d(8, 6, kernel_size=7, padding=3),
Flatten(),
torch.nn.Linear(75*6, 5)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_conv_dense_dense():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.MaxPool1d(2),
torch.nn.Conv1d(8, 6, kernel_size=7, padding=3),
Flatten(),
torch.nn.Linear(75*6, 5),
torch.nn.Linear(5, 3)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_conv_dense_relu_dense():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.MaxPool1d(2),
torch.nn.Conv1d(8, 6, kernel_size=7, padding=3),
Flatten(),
torch.nn.Linear(75*6, 5),
torch.nn.ReLU(),
torch.nn.Linear(5, 3)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_conv_dense_batchnorm_dense():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.MaxPool1d(2),
torch.nn.Conv1d(8, 6, kernel_size=7, padding=3),
Flatten(),
torch.nn.Linear(75*6, 5),
torch.nn.BatchNorm1d(5),
torch.nn.Linear(5, 3)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_conv_dense_batchnorm_dense_relu():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7, padding=3),
torch.nn.MaxPool1d(2),
torch.nn.Conv1d(8, 6, kernel_size=7, padding=3),
Flatten(),
torch.nn.Linear(75*6, 5),
torch.nn.BatchNorm1d(5),
torch.nn.Linear(5, 3),
torch.nn.ReLU()
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
##
def test_conv_relu_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.ReLU()
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.MaxPool1d(3)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_relu_mp_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.ReLU(),
torch.nn.MaxPool1d(3)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_conv_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.MaxPool1d(3),
torch.nn.Conv1d(8, 6, kernel_size=7)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_batchnorm_mp_conv_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.BatchNorm1d(8),
torch.nn.MaxPool1d(3),
torch.nn.Conv1d(8, 6, kernel_size=7)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_relu_mp_conv_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.ReLU(),
torch.nn.MaxPool1d(3),
torch.nn.Conv1d(8, 6, kernel_size=7)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_relu_batchnorm_mp_conv_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(8),
torch.nn.MaxPool1d(3),
torch.nn.Conv1d(8, 6, kernel_size=7)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_mp_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.MaxPool1d(3),
torch.nn.MaxPool1d(2)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_dense_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
Flatten(),
torch.nn.Linear(1152, 5)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_relu_dense_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.ReLU(),
Flatten(),
torch.nn.Linear(1152, 5)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_batchnorm_dense_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.BatchNorm1d(8),
Flatten(),
torch.nn.Linear(1152, 5)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_relu_batchnorm_dense_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.ReLU(),
torch.nn.BatchNorm1d(8),
Flatten(),
torch.nn.Linear(1152, 5)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_dense_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.MaxPool1d(2),
Flatten(),
torch.nn.Linear(576, 5)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_relu_dense_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.MaxPool1d(2),
torch.nn.ReLU(),
Flatten(),
torch.nn.Linear(576, 5)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_batchnorm_dense_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.MaxPool1d(2),
torch.nn.BatchNorm1d(8),
Flatten(),
torch.nn.Linear(576, 5)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_batchnorm_relu_dense_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.MaxPool1d(2),
torch.nn.BatchNorm1d(8),
torch.nn.ReLU(),
Flatten(),
torch.nn.Linear(576, 5)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_conv_dense_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.MaxPool1d(2),
torch.nn.Conv1d(8, 6, kernel_size=7),
Flatten(),
torch.nn.Linear(396, 5)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_conv_dense_dense_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.MaxPool1d(2),
torch.nn.Conv1d(8, 6, kernel_size=7),
Flatten(),
torch.nn.Linear(396, 5),
torch.nn.Linear(5, 3)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_conv_dense_relu_dense_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.MaxPool1d(2),
torch.nn.Conv1d(8, 6, kernel_size=7),
Flatten(),
torch.nn.Linear(396, 5),
torch.nn.ReLU(),
torch.nn.Linear(5, 3)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_conv_dense_batchnorm_dense_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.MaxPool1d(2),
torch.nn.Conv1d(8, 6, kernel_size=7),
Flatten(),
torch.nn.Linear(396, 5),
torch.nn.BatchNorm1d(5),
torch.nn.Linear(5, 3)
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95)
def test_conv_mp_conv_dense_batchnorm_dense_relu_nopad():
model = torch.nn.Sequential(
torch.nn.Conv1d(4, 8, kernel_size=7),
torch.nn.MaxPool1d(2),
torch.nn.Conv1d(8, 6, kernel_size=7),
Flatten(),
torch.nn.Linear(396, 5),
torch.nn.BatchNorm1d(5),
torch.nn.Linear(5, 3),
torch.nn.ReLU()
)
evaluate_model(model, X)
evaluate_model(model, X, alpha=100)
assert_raises(AssertionError, evaluate_model, model, X, alpha=0.95) | [
"torch.nn.MaxPool1d",
"torch.nn.ReLU",
"numpy.testing.assert_array_almost_equal",
"nose.tools.assert_raises",
"torch.nn.BatchNorm1d",
"numpy.zeros",
"yuzu.precompute",
"yuzu.naive_ism.naive_ism",
"torch.nn.Linear",
"numpy.random.RandomState",
"yuzu.yuzu_ism",
"torch.nn.Conv1d",
"numpy.arange... | [((547, 597), 'numpy.zeros', 'numpy.zeros', (['(n_seqs, 4, seq_len)'], {'dtype': '"""float32"""'}), "((n_seqs, 4, seq_len), dtype='float32')\n", (558, 597), False, 'import numpy\n'), ((747, 819), 'yuzu.precompute', 'precompute', (['model'], {'seq_len': 'X.shape[2]', 'n_choices': 'X.shape[1]', 'alpha': 'alpha'}), '(model, seq_len=X.shape[2], n_choices=X.shape[1], alpha=alpha)\n', (757, 819), False, 'from yuzu import precompute\n'), ((837, 871), 'yuzu.yuzu_ism', 'yuzu_ism', (['model', 'X', 'precomputation'], {}), '(model, X, precomputation)\n', (845, 871), False, 'from yuzu import yuzu_ism\n'), ((886, 905), 'yuzu.naive_ism.naive_ism', 'naive_ism', (['model', 'X'], {}), '(model, X)\n', (895, 905), False, 'from yuzu.naive_ism import naive_ism\n'), ((908, 959), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['naive_isms', 'yuzu_isms', '(3)'], {}), '(naive_isms, yuzu_isms, 3)\n', (933, 959), False, 'from numpy.testing import assert_array_almost_equal\n'), ((1077, 1144), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (1090, 1144), False, 'from nose.tools import assert_raises\n'), ((1257, 1324), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (1270, 1324), False, 'from nose.tools import assert_raises\n'), ((1439, 1506), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (1452, 1506), False, 'from nose.tools import assert_raises\n'), ((1619, 1686), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (1632, 1686), False, 'from nose.tools import assert_raises\n'), ((1820, 1887), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (1833, 1887), False, 'from nose.tools import assert_raises\n'), ((1999, 2066), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (2012, 2066), False, 'from nose.tools import assert_raises\n'), ((2262, 2329), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (2275, 2329), False, 'from nose.tools import assert_raises\n'), ((2524, 2591), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (2537, 2591), False, 'from nose.tools import assert_raises\n'), ((2810, 2877), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (2823, 2877), False, 'from nose.tools import assert_raises\n'), ((3128, 3195), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (3141, 3195), False, 'from nose.tools import assert_raises\n'), ((3483, 3550), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (3496, 3550), False, 'from nose.tools import assert_raises\n'), ((3825, 3892), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (3838, 3892), False, 'from nose.tools import assert_raises\n'), ((4204, 4271), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (4217, 4271), False, 'from nose.tools import assert_raises\n'), ((4494, 4561), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (4507, 4561), False, 'from nose.tools import assert_raises\n'), ((4776, 4843), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (4789, 4843), False, 'from nose.tools import assert_raises\n'), ((5082, 5149), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (5095, 5149), False, 'from nose.tools import assert_raises\n'), ((5401, 5468), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (5414, 5468), False, 'from nose.tools import assert_raises\n'), ((5744, 5811), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (5757, 5811), False, 'from nose.tools import assert_raises\n'), ((6053, 6120), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (6066, 6120), False, 'from nose.tools import assert_raises\n'), ((6386, 6453), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (6399, 6453), False, 'from nose.tools import assert_raises\n'), ((6732, 6799), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (6745, 6799), False, 'from nose.tools import assert_raises\n'), ((7102, 7169), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (7115, 7169), False, 'from nose.tools import assert_raises\n'), ((7467, 7534), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (7480, 7534), False, 'from nose.tools import assert_raises\n'), ((7863, 7930), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (7876, 7930), False, 'from nose.tools import assert_raises\n'), ((8283, 8350), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (8296, 8350), False, 'from nose.tools import assert_raises\n'), ((8716, 8783), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (8729, 8783), False, 'from nose.tools import assert_raises\n'), ((9173, 9240), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (9186, 9240), False, 'from nose.tools import assert_raises\n'), ((9430, 9497), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (9443, 9497), False, 'from nose.tools import assert_raises\n'), ((9687, 9754), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (9700, 9754), False, 'from nose.tools import assert_raises\n'), ((9968, 10035), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (9981, 10035), False, 'from nose.tools import assert_raises\n'), ((10270, 10337), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (10283, 10337), False, 'from nose.tools import assert_raises\n'), ((10609, 10676), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (10622, 10676), False, 'from nose.tools import assert_raises\n'), ((10935, 11002), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (10948, 11002), False, 'from nose.tools import assert_raises\n'), ((11298, 11365), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (11311, 11365), False, 'from nose.tools import assert_raises\n'), ((11583, 11650), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (11596, 11650), False, 'from nose.tools import assert_raises\n'), ((11859, 11926), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (11872, 11926), False, 'from nose.tools import assert_raises\n'), ((12159, 12226), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (12172, 12226), False, 'from nose.tools import assert_raises\n'), ((12472, 12539), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (12485, 12539), False, 'from nose.tools import assert_raises\n'), ((12809, 12876), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (12822, 12876), False, 'from nose.tools import assert_raises\n'), ((13112, 13179), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (13125, 13179), False, 'from nose.tools import assert_raises\n'), ((13439, 13506), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (13452, 13506), False, 'from nose.tools import assert_raises\n'), ((13779, 13846), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (13792, 13846), False, 'from nose.tools import assert_raises\n'), ((14143, 14210), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (14156, 14210), False, 'from nose.tools import assert_raises\n'), ((14491, 14558), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (14504, 14558), False, 'from nose.tools import assert_raises\n'), ((14870, 14937), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (14883, 14937), False, 'from nose.tools import assert_raises\n'), ((15273, 15340), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (15286, 15340), False, 'from nose.tools import assert_raises\n'), ((15689, 15756), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (15702, 15756), False, 'from nose.tools import assert_raises\n'), ((16129, 16196), 'nose.tools.assert_raises', 'assert_raises', (['AssertionError', 'evaluate_model', 'model', 'X'], {'alpha': '(0.95)'}), '(AssertionError, evaluate_model, model, X, alpha=0.95)\n', (16142, 16196), False, 'from nose.tools import assert_raises\n'), ((2127, 2174), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (2142, 2174), False, 'import torch\n'), ((2178, 2193), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (2191, 2193), False, 'import torch\n'), ((2383, 2430), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (2398, 2430), False, 'import torch\n'), ((2434, 2455), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(3)'], {}), '(3)\n', (2452, 2455), False, 'import torch\n'), ((2650, 2697), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (2665, 2697), False, 'import torch\n'), ((2701, 2716), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (2714, 2716), False, 'import torch\n'), ((2720, 2741), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(3)'], {}), '(3)\n', (2738, 2741), False, 'import torch\n'), ((2936, 2983), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (2951, 2983), False, 'import torch\n'), ((2987, 3008), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(3)'], {}), '(3)\n', (3005, 3008), False, 'import torch\n'), ((3012, 3059), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(8)', '(6)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(8, 6, kernel_size=7, padding=3)\n', (3027, 3059), False, 'import torch\n'), ((3264, 3311), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (3279, 3311), False, 'import torch\n'), ((3315, 3338), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(8)'], {}), '(8)\n', (3335, 3338), False, 'import torch\n'), ((3342, 3363), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(3)'], {}), '(3)\n', (3360, 3363), False, 'import torch\n'), ((3367, 3414), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(8)', '(6)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(8, 6, kernel_size=7, padding=3)\n', (3382, 3414), False, 'import torch\n'), ((3614, 3661), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (3629, 3661), False, 'import torch\n'), ((3665, 3680), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (3678, 3680), False, 'import torch\n'), ((3684, 3705), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(3)'], {}), '(3)\n', (3702, 3705), False, 'import torch\n'), ((3709, 3756), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(8)', '(6)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(8, 6, kernel_size=7, padding=3)\n', (3724, 3756), False, 'import torch\n'), ((3966, 4013), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (3981, 4013), False, 'import torch\n'), ((4017, 4032), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (4030, 4032), False, 'import torch\n'), ((4036, 4059), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(8)'], {}), '(8)\n', (4056, 4059), False, 'import torch\n'), ((4063, 4084), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(3)'], {}), '(3)\n', (4081, 4084), False, 'import torch\n'), ((4088, 4135), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(8)', '(6)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(8, 6, kernel_size=7, padding=3)\n', (4103, 4135), False, 'import torch\n'), ((4328, 4375), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (4343, 4375), False, 'import torch\n'), ((4379, 4400), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(3)'], {}), '(3)\n', (4397, 4400), False, 'import torch\n'), ((4404, 4425), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (4422, 4425), False, 'import torch\n'), ((4618, 4665), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (4633, 4665), False, 'import torch\n'), ((4682, 4709), 'torch.nn.Linear', 'torch.nn.Linear', (['(150 * 8)', '(5)'], {}), '(150 * 8, 5)\n', (4697, 4709), False, 'import torch\n'), ((4905, 4952), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (4920, 4952), False, 'import torch\n'), ((4956, 4971), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (4969, 4971), False, 'import torch\n'), ((4988, 5015), 'torch.nn.Linear', 'torch.nn.Linear', (['(150 * 8)', '(5)'], {}), '(150 * 8, 5)\n', (5003, 5015), False, 'import torch\n'), ((5216, 5263), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (5231, 5263), False, 'import torch\n'), ((5267, 5290), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(8)'], {}), '(8)\n', (5287, 5290), False, 'import torch\n'), ((5307, 5334), 'torch.nn.Linear', 'torch.nn.Linear', (['(150 * 8)', '(5)'], {}), '(150 * 8, 5)\n', (5322, 5334), False, 'import torch\n'), ((5540, 5587), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (5555, 5587), False, 'import torch\n'), ((5591, 5606), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (5604, 5606), False, 'import torch\n'), ((5610, 5633), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(8)'], {}), '(8)\n', (5630, 5633), False, 'import torch\n'), ((5650, 5677), 'torch.nn.Linear', 'torch.nn.Linear', (['(150 * 8)', '(5)'], {}), '(150 * 8, 5)\n', (5665, 5677), False, 'import torch\n'), ((5871, 5918), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (5886, 5918), False, 'import torch\n'), ((5922, 5943), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (5940, 5943), False, 'import torch\n'), ((5960, 5986), 'torch.nn.Linear', 'torch.nn.Linear', (['(75 * 8)', '(5)'], {}), '(75 * 8, 5)\n', (5975, 5986), False, 'import torch\n'), ((6185, 6232), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (6200, 6232), False, 'import torch\n'), ((6236, 6257), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (6254, 6257), False, 'import torch\n'), ((6261, 6276), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (6274, 6276), False, 'import torch\n'), ((6293, 6319), 'torch.nn.Linear', 'torch.nn.Linear', (['(75 * 8)', '(5)'], {}), '(75 * 8, 5)\n', (6308, 6319), False, 'import torch\n'), ((6523, 6570), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (6538, 6570), False, 'import torch\n'), ((6574, 6595), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (6592, 6595), False, 'import torch\n'), ((6599, 6622), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(8)'], {}), '(8)\n', (6619, 6622), False, 'import torch\n'), ((6639, 6665), 'torch.nn.Linear', 'torch.nn.Linear', (['(75 * 8)', '(5)'], {}), '(75 * 8, 5)\n', (6654, 6665), False, 'import torch\n'), ((6874, 6921), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (6889, 6921), False, 'import torch\n'), ((6925, 6946), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (6943, 6946), False, 'import torch\n'), ((6950, 6973), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(8)'], {}), '(8)\n', (6970, 6973), False, 'import torch\n'), ((6977, 6992), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (6990, 6992), False, 'import torch\n'), ((7009, 7035), 'torch.nn.Linear', 'torch.nn.Linear', (['(75 * 8)', '(5)'], {}), '(75 * 8, 5)\n', (7024, 7035), False, 'import torch\n'), ((7234, 7281), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (7249, 7281), False, 'import torch\n'), ((7285, 7306), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (7303, 7306), False, 'import torch\n'), ((7310, 7357), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(8)', '(6)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(8, 6, kernel_size=7, padding=3)\n', (7325, 7357), False, 'import torch\n'), ((7374, 7400), 'torch.nn.Linear', 'torch.nn.Linear', (['(75 * 6)', '(5)'], {}), '(75 * 6, 5)\n', (7389, 7400), False, 'import torch\n'), ((7605, 7652), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (7620, 7652), False, 'import torch\n'), ((7656, 7677), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (7674, 7677), False, 'import torch\n'), ((7681, 7728), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(8)', '(6)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(8, 6, kernel_size=7, padding=3)\n', (7696, 7728), False, 'import torch\n'), ((7745, 7771), 'torch.nn.Linear', 'torch.nn.Linear', (['(75 * 6)', '(5)'], {}), '(75 * 6, 5)\n', (7760, 7771), False, 'import torch\n'), ((7773, 7794), 'torch.nn.Linear', 'torch.nn.Linear', (['(5)', '(3)'], {}), '(5, 3)\n', (7788, 7794), False, 'import torch\n'), ((8006, 8053), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (8021, 8053), False, 'import torch\n'), ((8057, 8078), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (8075, 8078), False, 'import torch\n'), ((8082, 8129), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(8)', '(6)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(8, 6, kernel_size=7, padding=3)\n', (8097, 8129), False, 'import torch\n'), ((8146, 8172), 'torch.nn.Linear', 'torch.nn.Linear', (['(75 * 6)', '(5)'], {}), '(75 * 6, 5)\n', (8161, 8172), False, 'import torch\n'), ((8174, 8189), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (8187, 8189), False, 'import torch\n'), ((8193, 8214), 'torch.nn.Linear', 'torch.nn.Linear', (['(5)', '(3)'], {}), '(5, 3)\n', (8208, 8214), False, 'import torch\n'), ((8431, 8478), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (8446, 8478), False, 'import torch\n'), ((8482, 8503), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (8500, 8503), False, 'import torch\n'), ((8507, 8554), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(8)', '(6)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(8, 6, kernel_size=7, padding=3)\n', (8522, 8554), False, 'import torch\n'), ((8571, 8597), 'torch.nn.Linear', 'torch.nn.Linear', (['(75 * 6)', '(5)'], {}), '(75 * 6, 5)\n', (8586, 8597), False, 'import torch\n'), ((8599, 8622), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(5)'], {}), '(5)\n', (8619, 8622), False, 'import torch\n'), ((8626, 8647), 'torch.nn.Linear', 'torch.nn.Linear', (['(5)', '(3)'], {}), '(5, 3)\n', (8641, 8647), False, 'import torch\n'), ((8869, 8916), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(4, 8, kernel_size=7, padding=3)\n', (8884, 8916), False, 'import torch\n'), ((8920, 8941), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (8938, 8941), False, 'import torch\n'), ((8945, 8992), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(8)', '(6)'], {'kernel_size': '(7)', 'padding': '(3)'}), '(8, 6, kernel_size=7, padding=3)\n', (8960, 8992), False, 'import torch\n'), ((9009, 9035), 'torch.nn.Linear', 'torch.nn.Linear', (['(75 * 6)', '(5)'], {}), '(75 * 6, 5)\n', (9024, 9035), False, 'import torch\n'), ((9037, 9060), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(5)'], {}), '(5)\n', (9057, 9060), False, 'import torch\n'), ((9064, 9085), 'torch.nn.Linear', 'torch.nn.Linear', (['(5)', '(3)'], {}), '(5, 3)\n', (9079, 9085), False, 'import torch\n'), ((9089, 9104), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (9102, 9104), False, 'import torch\n'), ((9306, 9342), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (9321, 9342), False, 'import torch\n'), ((9346, 9361), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (9359, 9361), False, 'import torch\n'), ((9557, 9593), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (9572, 9593), False, 'import torch\n'), ((9597, 9618), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(3)'], {}), '(3)\n', (9615, 9618), False, 'import torch\n'), ((9819, 9855), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (9834, 9855), False, 'import torch\n'), ((9859, 9874), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (9872, 9874), False, 'import torch\n'), ((9878, 9899), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(3)'], {}), '(3)\n', (9896, 9899), False, 'import torch\n'), ((10100, 10136), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (10115, 10136), False, 'import torch\n'), ((10140, 10161), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(3)'], {}), '(3)\n', (10158, 10161), False, 'import torch\n'), ((10165, 10201), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(8)', '(6)'], {'kernel_size': '(7)'}), '(8, 6, kernel_size=7)\n', (10180, 10201), False, 'import torch\n'), ((10412, 10448), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (10427, 10448), False, 'import torch\n'), ((10452, 10475), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(8)'], {}), '(8)\n', (10472, 10475), False, 'import torch\n'), ((10479, 10500), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(3)'], {}), '(3)\n', (10497, 10500), False, 'import torch\n'), ((10504, 10540), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(8)', '(6)'], {'kernel_size': '(7)'}), '(8, 6, kernel_size=7)\n', (10519, 10540), False, 'import torch\n'), ((10746, 10782), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (10761, 10782), False, 'import torch\n'), ((10786, 10801), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (10799, 10801), False, 'import torch\n'), ((10805, 10826), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(3)'], {}), '(3)\n', (10823, 10826), False, 'import torch\n'), ((10830, 10866), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(8)', '(6)'], {'kernel_size': '(7)'}), '(8, 6, kernel_size=7)\n', (10845, 10866), False, 'import torch\n'), ((11082, 11118), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (11097, 11118), False, 'import torch\n'), ((11122, 11137), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (11135, 11137), False, 'import torch\n'), ((11141, 11164), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(8)'], {}), '(8)\n', (11161, 11164), False, 'import torch\n'), ((11168, 11189), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(3)'], {}), '(3)\n', (11186, 11189), False, 'import torch\n'), ((11193, 11229), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(8)', '(6)'], {'kernel_size': '(7)'}), '(8, 6, kernel_size=7)\n', (11208, 11229), False, 'import torch\n'), ((11428, 11464), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (11443, 11464), False, 'import torch\n'), ((11468, 11489), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(3)'], {}), '(3)\n', (11486, 11489), False, 'import torch\n'), ((11493, 11514), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (11511, 11514), False, 'import torch\n'), ((11713, 11749), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (11728, 11749), False, 'import torch\n'), ((11766, 11790), 'torch.nn.Linear', 'torch.nn.Linear', (['(1152)', '(5)'], {}), '(1152, 5)\n', (11781, 11790), False, 'import torch\n'), ((11994, 12030), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (12009, 12030), False, 'import torch\n'), ((12034, 12049), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (12047, 12049), False, 'import torch\n'), ((12066, 12090), 'torch.nn.Linear', 'torch.nn.Linear', (['(1152)', '(5)'], {}), '(1152, 5)\n', (12081, 12090), False, 'import torch\n'), ((12299, 12335), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (12314, 12335), False, 'import torch\n'), ((12339, 12362), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(8)'], {}), '(8)\n', (12359, 12362), False, 'import torch\n'), ((12379, 12403), 'torch.nn.Linear', 'torch.nn.Linear', (['(1152)', '(5)'], {}), '(1152, 5)\n', (12394, 12403), False, 'import torch\n'), ((12617, 12653), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (12632, 12653), False, 'import torch\n'), ((12657, 12672), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (12670, 12672), False, 'import torch\n'), ((12676, 12699), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(8)'], {}), '(8)\n', (12696, 12699), False, 'import torch\n'), ((12716, 12740), 'torch.nn.Linear', 'torch.nn.Linear', (['(1152)', '(5)'], {}), '(1152, 5)\n', (12731, 12740), False, 'import torch\n'), ((12942, 12978), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (12957, 12978), False, 'import torch\n'), ((12982, 13003), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (13000, 13003), False, 'import torch\n'), ((13020, 13043), 'torch.nn.Linear', 'torch.nn.Linear', (['(576)', '(5)'], {}), '(576, 5)\n', (13035, 13043), False, 'import torch\n'), ((13250, 13286), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (13265, 13286), False, 'import torch\n'), ((13290, 13311), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (13308, 13311), False, 'import torch\n'), ((13315, 13330), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (13328, 13330), False, 'import torch\n'), ((13347, 13370), 'torch.nn.Linear', 'torch.nn.Linear', (['(576)', '(5)'], {}), '(576, 5)\n', (13362, 13370), False, 'import torch\n'), ((13582, 13618), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (13597, 13618), False, 'import torch\n'), ((13622, 13643), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (13640, 13643), False, 'import torch\n'), ((13647, 13670), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(8)'], {}), '(8)\n', (13667, 13670), False, 'import torch\n'), ((13687, 13710), 'torch.nn.Linear', 'torch.nn.Linear', (['(576)', '(5)'], {}), '(576, 5)\n', (13702, 13710), False, 'import torch\n'), ((13927, 13963), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (13942, 13963), False, 'import torch\n'), ((13967, 13988), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (13985, 13988), False, 'import torch\n'), ((13992, 14015), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(8)'], {}), '(8)\n', (14012, 14015), False, 'import torch\n'), ((14019, 14034), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (14032, 14034), False, 'import torch\n'), ((14051, 14074), 'torch.nn.Linear', 'torch.nn.Linear', (['(576)', '(5)'], {}), '(576, 5)\n', (14066, 14074), False, 'import torch\n'), ((14281, 14317), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (14296, 14317), False, 'import torch\n'), ((14321, 14342), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (14339, 14342), False, 'import torch\n'), ((14346, 14382), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(8)', '(6)'], {'kernel_size': '(7)'}), '(8, 6, kernel_size=7)\n', (14361, 14382), False, 'import torch\n'), ((14399, 14422), 'torch.nn.Linear', 'torch.nn.Linear', (['(396)', '(5)'], {}), '(396, 5)\n', (14414, 14422), False, 'import torch\n'), ((14635, 14671), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (14650, 14671), False, 'import torch\n'), ((14675, 14696), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (14693, 14696), False, 'import torch\n'), ((14700, 14736), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(8)', '(6)'], {'kernel_size': '(7)'}), '(8, 6, kernel_size=7)\n', (14715, 14736), False, 'import torch\n'), ((14753, 14776), 'torch.nn.Linear', 'torch.nn.Linear', (['(396)', '(5)'], {}), '(396, 5)\n', (14768, 14776), False, 'import torch\n'), ((14780, 14801), 'torch.nn.Linear', 'torch.nn.Linear', (['(5)', '(3)'], {}), '(5, 3)\n', (14795, 14801), False, 'import torch\n'), ((15019, 15055), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (15034, 15055), False, 'import torch\n'), ((15059, 15080), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (15077, 15080), False, 'import torch\n'), ((15084, 15120), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(8)', '(6)'], {'kernel_size': '(7)'}), '(8, 6, kernel_size=7)\n', (15099, 15120), False, 'import torch\n'), ((15137, 15160), 'torch.nn.Linear', 'torch.nn.Linear', (['(396)', '(5)'], {}), '(396, 5)\n', (15152, 15160), False, 'import torch\n'), ((15164, 15179), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (15177, 15179), False, 'import torch\n'), ((15183, 15204), 'torch.nn.Linear', 'torch.nn.Linear', (['(5)', '(3)'], {}), '(5, 3)\n', (15198, 15204), False, 'import torch\n'), ((15427, 15463), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (15442, 15463), False, 'import torch\n'), ((15467, 15488), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (15485, 15488), False, 'import torch\n'), ((15492, 15528), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(8)', '(6)'], {'kernel_size': '(7)'}), '(8, 6, kernel_size=7)\n', (15507, 15528), False, 'import torch\n'), ((15545, 15568), 'torch.nn.Linear', 'torch.nn.Linear', (['(396)', '(5)'], {}), '(396, 5)\n', (15560, 15568), False, 'import torch\n'), ((15572, 15595), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(5)'], {}), '(5)\n', (15592, 15595), False, 'import torch\n'), ((15599, 15620), 'torch.nn.Linear', 'torch.nn.Linear', (['(5)', '(3)'], {}), '(5, 3)\n', (15614, 15620), False, 'import torch\n'), ((15848, 15884), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(4)', '(8)'], {'kernel_size': '(7)'}), '(4, 8, kernel_size=7)\n', (15863, 15884), False, 'import torch\n'), ((15888, 15909), 'torch.nn.MaxPool1d', 'torch.nn.MaxPool1d', (['(2)'], {}), '(2)\n', (15906, 15909), False, 'import torch\n'), ((15913, 15949), 'torch.nn.Conv1d', 'torch.nn.Conv1d', (['(8)', '(6)'], {'kernel_size': '(7)'}), '(8, 6, kernel_size=7)\n', (15928, 15949), False, 'import torch\n'), ((15966, 15989), 'torch.nn.Linear', 'torch.nn.Linear', (['(396)', '(5)'], {}), '(396, 5)\n', (15981, 15989), False, 'import torch\n'), ((15993, 16016), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', (['(5)'], {}), '(5)\n', (16013, 16016), False, 'import torch\n'), ((16020, 16041), 'torch.nn.Linear', 'torch.nn.Linear', (['(5)', '(3)'], {}), '(5, 3)\n', (16035, 16041), False, 'import torch\n'), ((16045, 16060), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (16058, 16060), False, 'import torch\n'), ((637, 658), 'numpy.arange', 'numpy.arange', (['seq_len'], {}), '(seq_len)\n', (649, 658), False, 'import numpy\n'), ((474, 501), 'numpy.random.RandomState', 'numpy.random.RandomState', (['(0)'], {}), '(0)\n', (498, 501), False, 'import numpy\n')] |
import numpy as np
from omegaconf import OmegaConf
def calculate_initial_lr(cfg: OmegaConf) -> float:
"""
Proposed initial learning rates by SimCLR paper.
Note: SimCLR paper says squared learning rate is better when the size of mini-batches is small.
:param cfg: Hydra's config.
:return: Initial learning rate whose type is float.
"""
if cfg["optimizer"]["linear_schedule"]:
scaled_lr = cfg["optimizer"]["lr"] * cfg["experiment"]["batches"] / 256.
else:
scaled_lr = cfg["optimizer"]["lr"] * np.sqrt(cfg["experiment"]["batches"])
return scaled_lr
def calculate_warmup_lr(cfg: OmegaConf, warmup_steps: int, current_step: int) -> float:
"""
Calculate a learning rate during warmup period given a current step.
:param cfg: Hydra's config file.
:param warmup_steps: The number of steps for warmup.
:param current_step: the current step.
:return: learning rate value.
"""
initial_lr = calculate_initial_lr(cfg)
if warmup_steps > 0.:
learning_rate = current_step / warmup_steps * initial_lr
else:
learning_rate = initial_lr
return learning_rate
| [
"numpy.sqrt"
] | [((544, 581), 'numpy.sqrt', 'np.sqrt', (["cfg['experiment']['batches']"], {}), "(cfg['experiment']['batches'])\n", (551, 581), True, 'import numpy as np\n')] |
import numpy as np
from tensorflow.keras.models import model_from_json
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from PIL import Image
import cv2
import urllib.request
import numpy as np
def load_model(model):
"""
load the saved trained model
"""
# logging.critical("Loading logo detection model...")
json_file = open(f'{model}.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(f"{model}.h5")
# logging.critical("Model is ready.")
return loaded_model
model = load_model('model_2')
def ct_scan_diagnosis(image):
"""
Detects the infection in the chest CT scan image
image: CT scan image
return: dignosis result (str)
"""
# img = Image.open(image)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# img = cv2.resize(img, (224, 224))
# # load image from the url
img = Image.open(image)
img = img.convert('RGB')
# trasnform to a desireable tensor for the model
img = img.resize((224, 224), Image.ANTIALIAS)
x = img_to_array(img)/255.
print(x.shape)
x = x.reshape((1,) + x.shape)
print(x.shape)
# prediction
result = model.predict(x)
prediction = np.argmax(result)
if prediction == 0:
prediction = 'COVID-19'
else:
prediction = 'Normal'
return prediction
if __name__ == '__main__':
predic = ct_scan_diagnosis('./uploads/normal_22.jpeg')
print(predic)
| [
"tensorflow.keras.models.model_from_json",
"tensorflow.keras.preprocessing.image.img_to_array",
"PIL.Image.open",
"numpy.argmax"
] | [((575, 609), 'tensorflow.keras.models.model_from_json', 'model_from_json', (['loaded_model_json'], {}), '(loaded_model_json)\n', (590, 609), False, 'from tensorflow.keras.models import model_from_json\n'), ((1111, 1128), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (1121, 1128), False, 'from PIL import Image\n'), ((1431, 1448), 'numpy.argmax', 'np.argmax', (['result'], {}), '(result)\n', (1440, 1448), True, 'import numpy as np\n'), ((1271, 1288), 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (1283, 1288), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\n')] |
#!/usr/bin/python
#
# This file is part of PyRQA.
# Copyright 2015 <NAME>, <NAME>.
"""
Distance metrics.
"""
import math
import numpy as np
from pyrqa.abstract_classes import AbstractMetric
class TaxicabMetric(AbstractMetric):
"""
Taxicab metric (L1)
"""
name = 'taxicab_metric'
@classmethod
def get_distance_time_series(cls, time_series_x, time_series_y, embedding_dimension, time_delay, index_x, index_y):
""" See AbstractMetric """
distance = 0
for idx in np.arange(embedding_dimension):
temp_x = index_x + (idx * time_delay)
temp_y = index_y + (idx * time_delay)
distance += math.fabs(time_series_x[temp_x] - time_series_y[temp_y])
return distance
@classmethod
def get_distance_vectors(cls, vectors_x, vectors_y, embedding_dimension, index_x, index_y):
""" See AbstractMetric """
distance = 0
for idx in np.arange(embedding_dimension):
temp_x = index_x * embedding_dimension + idx
temp_y = index_y * embedding_dimension + idx
distance += math.fabs(vectors_x[temp_x] - vectors_y[temp_y])
return distance
class EuclideanMetric(AbstractMetric):
"""
Euclidean metric (L2)
"""
name = 'euclidean_metric'
@classmethod
def get_distance_time_series(cls, time_series_x, time_series_y, embedding_dimension, time_delay, index_x, index_y):
""" See AbstractMetric """
distance = 0
for idx in np.arange(embedding_dimension):
temp_x = index_x + (idx * time_delay)
temp_y = index_y + (idx * time_delay)
distance += math.pow(time_series_x[temp_x] - time_series_y[temp_y], 2)
return math.sqrt(distance)
@classmethod
def get_distance_vectors(cls, vectors_x, vectors_y, embedding_dimension, index_x, index_y):
""" See AbstractMetric """
distance = 0
for idx in np.arange(embedding_dimension):
temp_x = index_x * embedding_dimension + idx
temp_y = index_y * embedding_dimension + idx
distance += math.pow(vectors_x[temp_x] - vectors_y[temp_y], 2)
return math.sqrt(distance)
class MaximumMetric(AbstractMetric):
"""
Maximum metric (L_inf)
"""
name = 'maximum_metric'
@classmethod
def get_distance_time_series(cls, time_series_x, time_series_y, embedding_dimension, time_delay, index_x, index_y):
""" See AbstractMetric """
distance = np.finfo(np.float32).min
for index in np.arange(embedding_dimension):
temp_x = index_x + (index * time_delay)
temp_y = index_y + (index * time_delay)
value = math.fabs(time_series_x[temp_x] - time_series_y[temp_y])
if value > distance:
distance = value
return distance
@classmethod
def get_distance_vectors(cls, vectors_x, vectors_y, embedding_dimension, index_x, index_y):
""" See AbstractMetric """
distance = np.finfo(np.float32).min
for idx in np.arange(embedding_dimension):
temp_x = index_x * embedding_dimension + idx
temp_y = index_y * embedding_dimension + idx
value = math.fabs(vectors_x[temp_x] - vectors_y[temp_y])
if value > distance:
distance = value
return distance
| [
"math.pow",
"math.sqrt",
"math.fabs",
"numpy.finfo",
"numpy.arange"
] | [((514, 544), 'numpy.arange', 'np.arange', (['embedding_dimension'], {}), '(embedding_dimension)\n', (523, 544), True, 'import numpy as np\n'), ((942, 972), 'numpy.arange', 'np.arange', (['embedding_dimension'], {}), '(embedding_dimension)\n', (951, 972), True, 'import numpy as np\n'), ((1513, 1543), 'numpy.arange', 'np.arange', (['embedding_dimension'], {}), '(embedding_dimension)\n', (1522, 1543), True, 'import numpy as np\n'), ((1745, 1764), 'math.sqrt', 'math.sqrt', (['distance'], {}), '(distance)\n', (1754, 1764), False, 'import math\n'), ((1954, 1984), 'numpy.arange', 'np.arange', (['embedding_dimension'], {}), '(embedding_dimension)\n', (1963, 1984), True, 'import numpy as np\n'), ((2192, 2211), 'math.sqrt', 'math.sqrt', (['distance'], {}), '(distance)\n', (2201, 2211), False, 'import math\n'), ((2560, 2590), 'numpy.arange', 'np.arange', (['embedding_dimension'], {}), '(embedding_dimension)\n', (2569, 2590), True, 'import numpy as np\n'), ((3077, 3107), 'numpy.arange', 'np.arange', (['embedding_dimension'], {}), '(embedding_dimension)\n', (3086, 3107), True, 'import numpy as np\n'), ((671, 727), 'math.fabs', 'math.fabs', (['(time_series_x[temp_x] - time_series_y[temp_y])'], {}), '(time_series_x[temp_x] - time_series_y[temp_y])\n', (680, 727), False, 'import math\n'), ((1113, 1161), 'math.fabs', 'math.fabs', (['(vectors_x[temp_x] - vectors_y[temp_y])'], {}), '(vectors_x[temp_x] - vectors_y[temp_y])\n', (1122, 1161), False, 'import math\n'), ((1670, 1728), 'math.pow', 'math.pow', (['(time_series_x[temp_x] - time_series_y[temp_y])', '(2)'], {}), '(time_series_x[temp_x] - time_series_y[temp_y], 2)\n', (1678, 1728), False, 'import math\n'), ((2125, 2175), 'math.pow', 'math.pow', (['(vectors_x[temp_x] - vectors_y[temp_y])', '(2)'], {}), '(vectors_x[temp_x] - vectors_y[temp_y], 2)\n', (2133, 2175), False, 'import math\n'), ((2514, 2534), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (2522, 2534), True, 'import numpy as np\n'), ((2717, 2773), 'math.fabs', 'math.fabs', (['(time_series_x[temp_x] - time_series_y[temp_y])'], {}), '(time_series_x[temp_x] - time_series_y[temp_y])\n', (2726, 2773), False, 'import math\n'), ((3033, 3053), 'numpy.finfo', 'np.finfo', (['np.float32'], {}), '(np.float32)\n', (3041, 3053), True, 'import numpy as np\n'), ((3244, 3292), 'math.fabs', 'math.fabs', (['(vectors_x[temp_x] - vectors_y[temp_y])'], {}), '(vectors_x[temp_x] - vectors_y[temp_y])\n', (3253, 3292), False, 'import math\n')] |
#!/usr/bin/env python3
""" Base class(es) for text classifiers.
The file defines some of the common classes/functions as well as the
interface (including a command line interface).
"""
import sys, os, time, csv, re, itertools, random, json
import gzip
from hashlib import md5
import numpy as np
from collections import Counter, OrderedDict
import logging
from logging import debug, info, warning, basicConfig
basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold
from sklearn.preprocessing import StandardScaler
_TOKENIZER = re.compile(r"\w+|[^ \t\n\r\f\v\w]+").findall
_MAX_LEN = 1024*1024 # default maximum text length
_MIN_LEN = 0 # default minimum text length
def read_csv(path, header=None, sep='\t'):
""" A generator for reading CSV files.
Format is restricted to proper (quoted and escaped) CSV files
with a column for label and another for the text. The file may
contain other fields, but only the class label and the text is
used.
Args:
header: None or e sequence with two elements, specifying
the headers that correspond to label and
the text respectively. If None, header is
assumed to contain no headers.
"""
if path.endswith('.gz'):
import gzip
fp = gzip.open(path, 'rt')
elif path.endswith('.xz'):
import lzma
fp = lzma.open(path, 'rt')
elif path.endswith('.bz2'):
import bz2
fp = bz2.open(path, 'rt')
else:
fp = open(path, 'rt')
if header is None:
csvfp = csv.DictReader(fp, fieldnames=('label', 'text'),
delimiter=sep)
label_h, text_h = 'label', 'text'
else:
label_h, text_h = header
csvfp = csv.DictReader(fp, delimiter=sep)
for row in csvfp:
yield row[label_h], row[text_h]
fp.close()
def get_ngrams(s, ngmin, ngmax, separator="",
bos="<", eos=">", suffix="", flatten=True):
""" For the given sequence s. Return all ngrams in range ngmin-ngmax.
spearator is useful for readability
bos/eos symbols are added to indicate beginning and end of seqence
suffix is an arbitrary string useful for distinguishing
in case differernt types of ngrams
if flatten is false, a list of lists is returned where the
first element contains the ngrams of size ngmin
and the last contains the ngrams of size ngmax
"""
# return a single dummy feature if there are no applicable ngrams
# probably resulting in a mojority-class classifier
if ngmax == 0 or (ngmax - ngmin < 0) :
return ['__dummy__']
ngrams = [[] for x in range(1, ngmax + 1)]
s = [bos] + s + [eos]
for i, ch in enumerate(s):
for ngsize in range(ngmin, ngmax + 1):
if (i + ngsize) <= len(s):
ngrams[ngsize - 1].append(
separator.join(s[i:i+ngsize]) + suffix)
if flatten:
ngrams = [ng for nglist in ngrams for ng in nglist]
return ngrams
class TextCData(object):
def __init__(self, path=None,
num_data=None,
cat_data=None,
tokenizer=_TOKENIZER,
negative_class=None,
labels=[],
maxlen=_MAX_LEN, minlen=_MIN_LEN,
text_label="text",
class_label="label",
sep='\t'):
# _id is a dirty hack to identify the objec quickly
self._id = random.getrandbits(64) ^ int(10000*time.time())
self.maxlen = maxlen
self.text_label = text_label
self.class_label = class_label
self.delimiter = sep
self.minlen = minlen
self.texts = []
self.labels = []
self.num_data = num_data
self.cat_data = cat_data
self.num_features = None
self.cat_features = None
self.label_names = OrderedDict()
self.negative_class = negative_class
if negative_class:
self.label_names[negative_class] = 0
for l in labels:
if l not in self.labels:
self.label_names[l] = len(self.label_names)
self.tokenizer = tokenizer
if path:
self.load(path, num_data, cat_data)
def __eq__(self, other):
if isinstance(other, TextCData):
return self._id == other._id
else:
return False
def symbolic_labels(self, index=None):
label_names = np.array(list(self.label_names.keys()))
if index is None:
index = self.labels
return label_names[index]
def __len__(self):
return len(self.labels)
def update(self, texts, labels=None, labelstr=None, num_feats=None):
""" Update the data with given texts, labels and optionall
numeric features.
TODO: [cleanup] code is replicated in load()
"""
assert (labels is not None) or (labelstr is not None)
if self.num_features is not None:
assert num_feats is not None
self.num_features = np.vstack((self.num_features, num_feats))
self.texts.extend(texts)
if labelstr is None:
self.labels.extend(labels)
else:
for l in labelstr:
if l not in self.label_names:
self.label_names[l] = len(self.label_names)
self.labels.extend(
[self.label_names.get(l) for l in labelstr])
def load(self, path, num_data=None, cat_data=None):
labels_str = []
linen = 0
for l, t in read_csv(path,
header=(self.class_label, self.text_label),
sep=self.delimiter):
linen += 1
if len(t) < self.minlen or len(t) > self.maxlen:
warning("Skipping: line {}...".format(linen))
continue
labels_str.append(l)
self.texts.append(t)
if l not in self.label_names:
self.label_names[l] = len(self.label_names)
self.labels.extend(
[self.label_names.get(l) for l in labels_str])
if num_data is not None:
self.num_features = []
open_file = open
if num_data.endswith('.gz'):
open_file = gzip.open
with open_file(num_data, 'rt') as fp:
for line in fp:
if line.startswith('### '): continue
self.num_features.append([float(x)
for x in line.strip().split()])
assert len(self.labels) == len(self.num_features)
scaler = StandardScaler()
self.num_features = scaler.fit_transform(
np.array(self.num_features))
debug("Loaded extra features {} from {}.".format(
self.num_features.shape, self.num_data))
def stats(self, histogram=None, most_common=0):
labels_int = list(self.label_names.values())
labels_str = list(self.label_names.keys())
label_dist = Counter(self.labels)
fmt = '{:30s} {:>6d}' + ''.join(['{:10.2f}{:10.2f}{:>7d}{:>7d}']*2)
wlen_dist_all = []
clen_dist_all = []
for li in labels_int:
clen_dist = np.array([len(x) for i, x in enumerate(self.texts)\
if self.labels[i] == li])
wlen_dist = np.array([len(self.tokenizer(x)) \
for i, x in enumerate(self.texts) if self.labels[i] == li])
clen_dist_all.extend(clen_dist)
wlen_dist_all.extend(wlen_dist)
print(fmt.format('{}({}):'.format(labels_str[li], li),
label_dist[li],
clen_dist.mean(), clen_dist.std(),
clen_dist.min(), clen_dist.max(),
wlen_dist.mean(), wlen_dist.std(),
wlen_dist.min(), wlen_dist.max()))
clen_dist_all = np.array(clen_dist_all)
wlen_dist_all = np.array(wlen_dist_all)
print(fmt.format('Total:', sum(label_dist.values()),
clen_dist_all.mean(), clen_dist_all.std(),
clen_dist_all.min(), clen_dist_all.max(),
wlen_dist_all.mean(), wlen_dist_all.std(),
wlen_dist_all.min(), wlen_dist_all.max()))
if most_common:
tok_counter = [Counter() for _ in labels_int]
ch_counter = [Counter() for _ in labels_int]
for i, txt in enumerate(self.texts):
tok_counter[self.labels[i]].update(self.tokenizer(txt.lower()))
# only char bigrams - generally useful for detecting odd things
# at te beginning or end of documents.
ch_counter[self.labels[i]].update(get_ngrams(list(txt.lower()), 2,2))
for li in labels_int:
lname = list(self.label_names.keys())[li]
print(lname, 'tokens',
[x[0] for x in tok_counter[li].most_common(most_common)])
print(lname, 'chars',
[x[0] for x in ch_counter[li].most_common(most_common)])
if histogram:
import matplotlib.pyplot as plt
_, plts = plt.subplots(nrows=1,ncols=2)
plts[0].hist(clen_dist_all, bins=100)
plts[0].set_title("Char")
plts[1].hist(wlen_dist_all, bins=100)
plts[1].set_title("Word")
if isinstance(histogram, str):
fig = plt.gcf()
fig.savefig(histogram)
else:
plt.show()
def copy(self):
return self.subset(range(len(self)))
def subset(self, index):
subset = TextCData()
for attr in vars(self):
if not attr.startswith('_'):
setattr(subset, attr, getattr(self, attr))
subset.texts = [self.texts[i] for i in index]
subset.labels = [self.labels[i] for i in index]
if self.num_features is not None:
subset.num_features = self.num_features[index]
return subset
def random_iter(param_space, max_iter=1000):
""" A generator that returns random drwas from a parameter space.
param_space is a sequence (name, type, range)
where type is either 'numeric' or 'categorical',
and range is a triple (start, stop, step) for
numeric parameters, and another sequence of
parameter values to explore.
the function keeps the set of returned parameter values, and
if an already returned parameter set is drawn max_iter times,
it terminates.
"""
seen = set()
rejected = 0
while True:
params = []
for param, type_, seq in param_space:
if 'numeric'.startswith(type_):
try:
start, stop, step = seq
p_range = np.arange(start, stop + step, step).tolist()
except:
start, stop = seq
p_range = np.arange(start, stop + 1, 1).tolist()
rval = random.choice(p_range)
params.append((param, rval))
elif 'categorical'.startswith(type_):
params.append((param, random.choice(seq)))
param_hash = md5(str(params).encode()).digest()
if param_hash not in seen:
seen.add(param_hash)
rejected = 0
yield dict(params)
else:
rejected += 1
if rejected == max_iter:
info("More than {} iterations with already drawn parameters. "
"The search space is probably exhausted".format(max_iter))
return
def grid_iter(param_space):
p_vals = []
for param, type_, seq in param_space:
if 'numeric'.startswith(type_):
try:
start, stop, step = seq
p_range = np.arange(start, stop + step, step).tolist()
except:
start, stop = seq
p_range = np.arange(start, stop + 1, 1).tolist()
p_vals.append(p_range)
elif 'categorical'.startswith(type_):
p_vals.append(seq)
for params in itertools.product(*p_vals):
yield dict(zip((p[0] for p in param_space), params))
def _str_to_param_space(paramstr):
paramstr = str(paramstr)
try:
with open(paramster, 'r') as fp:
params = eval(fp.read().strip())
except:
try:
params = eval(paramstr)
except:
params = '(' + paramstr + ')'
return params
def read_logs(filename):
with open(filename, 'r') as fp:
for line in fp:
if (len(line) > 1 and line[0] != '#'):
log_data = json.loads(line)
tuned_params = log_data['params']
model_params = log_data['model_params']
scores = log_data['scores']
yield tuned_params, scores, model_params
class TextC(object):
PARAMS = {
'name': 'textc',
'baseline': 'random',
'adapt_thresh': 0.0,
}
""" Base text classifier class.
"""
def __init__(self, arg_parser=True, **params):
self._set_defaults()
if arg_parser:
self.arg_parser = self._setup_arg_parser()
self._trained = True # Always for baselines
self.set_params(**params)
def _set_defaults(self):
for k,v in self.PARAMS.items():
setattr(self, k, v)
def get_params(self):
return {k:getattr(self, k) for k in self.PARAMS \
if not k.startswith('_')}
def set_params_str(self, s):
val_dict = dict()
for pval in s.split(','):
p, val = pval.split('=')
try:
val_dict[p] = eval(val)
except:
val_dict[p] = val
self.set_params(**val_dict)
def set_params(self, **kwargs):
for k, v in kwargs.items():
if k not in self.PARAMS:
warning("Ignoring unknown parameter {}.".format(k))
else:
old_v = getattr(self, k)
if v != k:
setattr(self, k, v)
self._trained = False
def fit(self, train=None):
""" Fit the model.
This should be overridden in the real text classifer classes.
"""
self._trained = True
def _predict_k_fold(self, train, k=10, decision_func=False):
#TODO: pass the parameter k
splits = StratifiedKFold(n_splits=k, shuffle=True)
folds = list(splits.split(train.texts, train.labels))
predictions = [None for _ in range(len(train.labels))]
if decision_func:
decision_val = [None for _ in range(len(train.labels))]
for ti, vi in folds:
if decision_func:
pred, dec, _, _ = self.predict(test=train.subset(vi),
train=train.subset(ti), decision_func=True)
else:
pred = self.predict(test=train.subset(vi),
train=train.subset(ti))
for i, j in enumerate(vi):
predictions[j] = pred[i]
if decision_func:
decision_val[j] = dec[i]
if decision_func:
return predictions, decision_val
return predictions
def _predict(self, test, train=None, decision_func=False):
""" Return predictions for the testset. Implements baselines.
Here we only return the numeric indeices for the labels. To
obtain symbolic names, use predict().
This should be overridden in the real text classifer classes.
"""
if train and not self._trained:
self.fit(train)
label_set = list(train.label_names.values())
test_len = len(test.texts)
if self.baseline == 'random':
predictions = np.random.choice(label_set, size=test_len)
elif self.baseline == 'majority':
predictions = test_len * [Counter(train.labels).most_common(1)[0][0]]
elif self.baseline == 'random_sample':
prob = Counter(train.labels)
prob = np.array([prob[l] for l in label_set]) / sum(prob.values())
predictions = np.random.choice(label_set,
size=test_len,
p=prob)
if decision_func:
return predictions, None
return predictions
def predict(self, test=None, train=None, label_names=False,
decision_func=False,
score=False, conf_mat=False):
if test is None:
predictions = self._predict_k_fold(train, decision_func=decision_func)
else:
if self.adapt_thresh != 0.0:
info("Adaptive predictions enabled")
if train is None: train = self._training_data
assert train is not None
pred, dec_val = self._predict(test, train, decision_func=True)
texts_add, labels_add, num_add = [], [], []
for i, v in enumerate(dec_val):
if len(dec_val.shape) == 1: # binary
pick = abs(v) > self.adapt_thresh
else:
pick = len(np.argwhere(v > self.adapt_thresh).flatten()) == 1
if pick:
texts_add.append(test.texts[i])
labels_add.append(pred[i])
if test.num_features is not None:
num_add.append(test.num_features[i])
if not num_add: num_add = None
train_aug = train.copy()
train_aug.update(texts_add, labels_add, num_feats=num_add)
info("Retraining with {} new trainng instances".format(len(texts_add)))
predictions = self._predict(test, train_aug, decision_func=decision_func)
else:
predictions = self._predict(test, train, decision_func=decision_func)
decision_val = None
if decision_func:
predictions, decision_val = predictions
if label_names and self._training_data.label_names:
label_names = list(self._training_data.label_names)
predictions = [label_names[i] for i in predictions]
sc = None
if score:
sc = self._score(test.symbolic_labels(), predictions,
negative_class=train.negative_class)
cf = None
if conf_mat:
from sklearn.metrics import confusion_matrix
cf = confusion_matrix(test.symbolic_labels(), predictions)
return predictions, decision_val, sc, cf
def _score(self, gold, pred,
scores={'precision', 'recall', 'f1-score'},
negative_class=None,
average=None):
""" Return the score for the testset.
"""
from sklearn.metrics import precision_recall_fscore_support as prfs
if average is None:
average = 'macro'
if negative_class:
average = 'binary'
scores = [sc \
if ':' in sc or \
sc not in {'precision', 'recall', 'f1-score'}\
else ':'.join((sc, average))\
for sc in scores]
scores = {k:None for k in scores}
for sc_avg in list(scores):
if ':' in sc_avg:
sc, avg = sc_avg.split(':')
else:
sc = sc_avg
avg = None
if scores[sc_avg] is not None:
continue
if sc not in {'precision', 'recall', 'f1-score', 'accuracy'}:
warning("Skipping unknown score `{}'.".format(sc))
continue
if sc in {'precision', 'recall', 'f1-score'}:
if avg not in {'binary', 'micro', 'macro'}:
warning("Skipping `{}': unknown avgeraging method."
.format(sc_avg))
continue
p, r, f, _ = prfs(gold, pred, average=avg)
scores[':'.join(('precision', avg))] = p
scores[':'.join(('recall', avg))] = r
scores[':'.join(('f1-score', avg))] = f
if sc == 'accuracy':
from sklearn.metrics import accuracy_score
scores['accuracy'] = accuracy_score(gold, pred)
return {k:v for k,v in scores.items()}
def score(self, test=None, train=None,
scores={'precision', 'recall', 'f1-score'},
average=None):
""" Return the score for the testset.
"""
from sklearn.metrics import precision_recall_fscore_support as prfs
pred, _, _, _ = self.predict(test, train)
if test:
gold = test.labels
else:
gold = train.labels
return self._score(gold, pred, scores=scores,
negative_class=train.negative_class,
average=average)
def ttt(self, method):
if method == 'grid':
param_iter = grid_iter(params)
def tune(self, param_space,
train, dev=None,
method=None,
round_digits=None,
max_iter=-1,
optimize=None,
scores=None,
k=None, split_r=0.2, n_splits=1,
save=sys.stdout, skip_params=None):
""" Fit and evaluate a model repeatedly,
the best one based on params.
Args:
param_space: a dict-like object whose keys are the
parameter names and values are tuples of
(type, range) or (type, list). 'type' is one of
'numeric', 'categorical'. For numeric
parameters the range is defined as [begin, end]
or [begin, end, step] is required. The former
is interpreted as range of integers in the
range [begin, end]. For 'categorical'
parameters, a list of values is required.
train: TextCData object used for training. If no test
set is given, it is used both for training
and testing
dev: same as 'train', used as development set
method: search method: 'grid', 'random' or a iterable
that returns values from the option range.
max_iter: maximum number of fit/predict/eval iterations
k: Use k-fold CV.
split_r: ratio of the held-out set, ignored if test set or
argument `k' is given
n_splits: number of splits, sklearn StratifiedShuffleSplit is
used for n-splits
save: file-like object save the results after each iteration
skip_params: A sequence of dictionaries containing individual
parameter values to skip. Useful for resuming a search.
optimize: optimize using given metric.
scores: the scores to report/log.
"""
if method is None: method = 'grid'
if optimize is None: optimize = 'f1-score:macro'
if scores is None: scores = ['precision', 'recall', 'f1-score']
param_space = [p for p in _str_to_param_space(param_space) \
if p[0] in self.get_params()]
# if not param_space:
# info('Tunable parameter list is empty')
# return
if method == 'grid':
param_iter = grid_iter(param_space)
elif method == 'random':
param_iter = random_iter(param_space)
else:
param_iter = method(params)
if dev is not None:
tune_str = "development data".format()
else:
if k:
splits = StratifiedKFold(n_splits=k, shuffle=True)
tune_str = "{}-fold CV".format(k)
else:
splits = StratifiedShuffleSplit(
n_splits=n_splits, test_size=split_r)
tune_str = "{} splits of {} ratio".format(n_splits, split_r)
trn_splits, val_splits = [], []
for ti, vi in splits.split(train.texts, train.labels):
trn_splits.append(train.subset(ti))
val_splits.append(train.subset(vi))
def param_to_str(p_dict):
p_list = sorted(tuple(p_dict.items()))
p_fmt = "{}={} " * len(p_list)
return p_fmt.format(*[x for t in p_list for x in t])
if skip_params:
skip_params = set([md5(param_to_str(p).encode()).digest() \
for p in skip_params])
else:
skip_params = set()
best_mean = 0.0
best_sc = None
best_param = None
for param in param_iter:
scores = []
p_str = param_to_str(param)
p_hash = md5(p_str.encode()).digest()
if p_hash in skip_params:
info('Skipping: {}'.format(p_str))
continue
info('Tuning with {}: {}'.format(tune_str, p_str))
self.set_params(**param)
if dev is not None:
sc = self.score(dev, train=train)
scores.append(sc)
else:
for i in range(len(trn_splits)):
sc = self.score(val_splits[i], train=trn_splits[i])
scores.append(sc)
sc_names = scores[0].keys()
scores = {k:[sc[k] for sc in scores] for k in sc_names}
sc_mean = np.array(scores[optimize]).mean()
if sc_mean > best_mean:
best_mean = sc_mean
best_sc = scores
best_param = param
if save:
json.dump({'params': param,
'model_params': self.get_params(),
'scores': scores},
save, ensure_ascii=False)
print('', file=save, flush=True)
max_iter -= 1
if max_iter == 0:
break
stop_file = '.stop-tune' + str(os.getpid())
if os.path.isfile(stop_file):
os.remove(stop_file)
break
return best_param, best_sc
def _setup_arg_parser(self):
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('--input', '-i', help="Path to the training data")
ap.add_argument('--test', '-t', help="Path to the testing data")
ap.add_argument('--unlabeled-input', '-u',
help="Path to unlabeled training data")
ap.add_argument('--unlabeled-num-input', '-U',
help="Path to numeric features for the unlabeled data")
ap.add_argument('--input-numeric', '-N',
help="Path to (optional) additional numeric features")
ap.add_argument('--test-numeric', '-M',
help="Path to (optional) additional numeric features")
ap.add_argument('--class-label', '-L', default='label',
help="Label of the column corrsponding to the class.")
ap.add_argument('--text-label', '-T', default='text',
help="Label of the column corrsponding to the text.")
ap.add_argument('--delimiter', '-D', default='\t',
help="Delimiter used in input files")
ap.add_argument('--output', '-o', default='-',
help="Output file. `-' means stdout.")
ap.add_argument('--negative-class',
help="The negative class label.")
ap.set_defaults(command='tune')
subp = ap.add_subparsers(help="Command")
tunep = subp.add_parser('tune')
tunep.set_defaults(command='tune')
tunep.add_argument('params',
help=('A string or a filename defining parameter space '
'to be searched.'
'String must be interpretable by python eval() '
'as a sequence whose members are triples of '
'(name, type, values). '
'If "type" is "numeric", values should specify a range '
'(start, stop, step), otherwise ("categorical"), '
'a sequence of values. '
'Example: (("C", "real", (0.5, 1.5, 0.1)), '
'("lowercase", "cat", ("word", "char", "both"))). '
'If "params" is a readable file, the string is read '
'from the file.'
))
tunep.add_argument('--search-method', '-s', choices=('grid', 'random'),
default='grid', help="Method used for hyper parmeter search")
tunep.add_argument('--optimize',
help=('A string of the form "score" or "score:averaging" '
'Currently supported scores are '
'accuracy, precision, recall, f1-score, '
'and supproted averaging methods are '
'micro, macro and binary. '
'If averaging is not specified it is set to '
'macro or binary depending on the classification task.'
))
tunep.add_argument('--max-iter', '-m', type=int, default=-1,
help=('Maximum number of hyperparameter combinations '
'to compare. Default (-1) means until '
'the search space is exhausted'))
tunep.add_argument('--k-folds', '-k', type=int, default=None,
help=('Use k-fold cross validation. '
'Ignored if -t is given.'))
tunep.add_argument('--test-ratio', '-r', type=float, default=0.2,
help=('Ratio of held-out data. '
'Ignored if --test option is given'))
tunep.add_argument('--n-splits', '-n', type=int, default=1,
help=('Number of splits. Ignored if -t or -k is given'))
tunep.add_argument('--save', '-S', metavar='LOGFILE',
help=('Save intermediate parameter values and scores '
'to given log file. '
'Use - for standard output'))
tunep.add_argument('--resume-from', '-R', metavar='LOGFILE',
help=('Resume tuning, skipping the parameters that '
'are logged in the log file.'))
predp = subp.add_parser('predict')
predp.set_defaults(command='predict')
predp.add_argument('params',
help=('A string that can be interpreted by python eval() '
'as a sequence whose members are pairs '
'of, parameter=value'))
predp.add_argument('--score', action='store_true',
help='Also print out the scores.')
predp.add_argument('--only-score', action='store_true',
help='Print out only the scores.')
predp.add_argument('--conf-matrix', action='store_true',
help='Also print out the confusion matrix.')
predp.add_argument('--output-decision-value', action='store_true',
help=('Also return decision function values'))
testp = subp.add_parser('score')
testp.set_defaults(command='score')
testp.add_argument('params',
help=('A string that can be interpreted by python eval() '
'as a sequence whose members are pairs '
'of, parameter=value'))
#TODO: this should not be here
statsp = subp.add_parser('stats')
statsp.set_defaults(command='stats')
statsp.add_argument('--histogram', '-H',
const=True, metavar='FILE', nargs='?',
help="Plot a histogram, optionally to FILE.")
statsp.add_argument('--most-common', type=int, default=0,
help="Also print most-common tokens")
return ap
if __name__ == "__main__":
m = TextC()
opt = m.arg_parser.parse_args()
trn = TextCData(opt.input, num_data=opt.input_numeric,
class_label=opt.class_label,
text_label=opt.text_label, sep=opt.delimiter)
tst = None
if opt.test:
tst = TextCData(opt.test, num_data=opt.test_numeric,
labels=trn.label_names,
class_label=opt.class_label,
text_label=opt.text_label, sep=opt.delimiter)
if opt.command == 'stats':
trn.stats(histogram=opt.histogram, most_common=opt.most_common)
elif opt.command == 'score':
print(m.score(tst, train=trn,
scores=['accuracy', 'precision:micro', 'precision:macro']))
elif opt.command == 'predict':
pred = m.predict(tst, train=trn)
if opt.output == '-':
fp = sys.stdout
else:
fp = open(opt.output, 'w')
for p in pred:
print(p, file=fp)
if fp != sys.stdout:
fp.close()
elif opt.command == 'tune':
skip = None
if opt.resume_from:
skip = [x for (x, _, _) in read_logs(opt.resume_from)]
savefp = None
if opt.save:
if opt.save == '-':
savefp = sys.stdout
else:
if skip and opt.resume_from == opt.save:
savefp = open(opt.save, 'a')
else:
savefp = open(opt.save, 'w')
best_param, best_sc = m.tune(opt.params,
trn, dev=tst,
method=opt.search_method,
max_iter=opt.max_iter,
k=opt.k_folds,
split_r=opt.test_ratio,
n_splits=opt.n_splits,
save=savefp,
optimize=opt.optimize,
skip_params=skip)
print('best params:', best_param)
print('best score:', best_sc)
if savefp and savefp != sys.stdout:
savefp.close()
| [
"sklearn.model_selection.StratifiedShuffleSplit",
"csv.DictReader",
"gzip.open",
"re.compile",
"sklearn.model_selection.StratifiedKFold",
"numpy.array",
"random.getrandbits",
"logging.info",
"numpy.arange",
"os.remove",
"argparse.ArgumentParser",
"itertools.product",
"numpy.vstack",
"os.ge... | [((411, 477), 'logging.basicConfig', 'basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(message)s"""'}), "(level=logging.DEBUG, format='%(asctime)s %(message)s')\n", (422, 477), False, 'from logging import debug, info, warning, basicConfig\n'), ((618, 660), 're.compile', 're.compile', (['"""\\\\w+|[^ \\\\t\\\\n\\\\r\\\\f\\\\v\\\\w]+"""'], {}), "('\\\\w+|[^ \\\\t\\\\n\\\\r\\\\f\\\\v\\\\w]+')\n", (628, 660), False, 'import sys, os, time, csv, re, itertools, random, json\n'), ((12285, 12311), 'itertools.product', 'itertools.product', (['*p_vals'], {}), '(*p_vals)\n', (12302, 12311), False, 'import sys, os, time, csv, re, itertools, random, json\n'), ((1379, 1400), 'gzip.open', 'gzip.open', (['path', '"""rt"""'], {}), "(path, 'rt')\n", (1388, 1400), False, 'import gzip\n'), ((1653, 1716), 'csv.DictReader', 'csv.DictReader', (['fp'], {'fieldnames': "('label', 'text')", 'delimiter': 'sep'}), "(fp, fieldnames=('label', 'text'), delimiter=sep)\n", (1667, 1716), False, 'import sys, os, time, csv, re, itertools, random, json\n'), ((1834, 1867), 'csv.DictReader', 'csv.DictReader', (['fp'], {'delimiter': 'sep'}), '(fp, delimiter=sep)\n', (1848, 1867), False, 'import sys, os, time, csv, re, itertools, random, json\n'), ((3987, 4000), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3998, 4000), False, 'from collections import Counter, OrderedDict\n'), ((7144, 7164), 'collections.Counter', 'Counter', (['self.labels'], {}), '(self.labels)\n', (7151, 7164), False, 'from collections import Counter, OrderedDict\n'), ((8014, 8037), 'numpy.array', 'np.array', (['clen_dist_all'], {}), '(clen_dist_all)\n', (8022, 8037), True, 'import numpy as np\n'), ((8062, 8085), 'numpy.array', 'np.array', (['wlen_dist_all'], {}), '(wlen_dist_all)\n', (8070, 8085), True, 'import numpy as np\n'), ((14634, 14675), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'k', 'shuffle': '(True)'}), '(n_splits=k, shuffle=True)\n', (14649, 14675), False, 'from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold\n'), ((26576, 26601), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (26599, 26601), False, 'import argparse\n'), ((1466, 1487), 'lzma.open', 'lzma.open', (['path', '"""rt"""'], {}), "(path, 'rt')\n", (1475, 1487), False, 'import lzma\n'), ((3568, 3590), 'random.getrandbits', 'random.getrandbits', (['(64)'], {}), '(64)\n', (3586, 3590), False, 'import sys, os, time, csv, re, itertools, random, json\n'), ((5161, 5202), 'numpy.vstack', 'np.vstack', (['(self.num_features, num_feats)'], {}), '((self.num_features, num_feats))\n', (5170, 5202), True, 'import numpy as np\n'), ((6719, 6735), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (6733, 6735), False, 'from sklearn.preprocessing import StandardScaler\n'), ((9302, 9332), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)'}), '(nrows=1, ncols=2)\n', (9314, 9332), True, 'import matplotlib.pyplot as plt\n'), ((16046, 16088), 'numpy.random.choice', 'np.random.choice', (['label_set'], {'size': 'test_len'}), '(label_set, size=test_len)\n', (16062, 16088), True, 'import numpy as np\n'), ((26383, 26408), 'os.path.isfile', 'os.path.isfile', (['stop_file'], {}), '(stop_file)\n', (26397, 26408), False, 'import sys, os, time, csv, re, itertools, random, json\n'), ((1553, 1573), 'bz2.open', 'bz2.open', (['path', '"""rt"""'], {}), "(path, 'rt')\n", (1561, 1573), False, 'import bz2\n'), ((6810, 6837), 'numpy.array', 'np.array', (['self.num_features'], {}), '(self.num_features)\n', (6818, 6837), True, 'import numpy as np\n'), ((8450, 8459), 'collections.Counter', 'Counter', ([], {}), '()\n', (8457, 8459), False, 'from collections import Counter, OrderedDict\n'), ((8507, 8516), 'collections.Counter', 'Counter', ([], {}), '()\n', (8514, 8516), False, 'from collections import Counter, OrderedDict\n'), ((9573, 9582), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (9580, 9582), True, 'import matplotlib.pyplot as plt\n'), ((9656, 9666), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9664, 9666), True, 'import matplotlib.pyplot as plt\n'), ((11163, 11185), 'random.choice', 'random.choice', (['p_range'], {}), '(p_range)\n', (11176, 11185), False, 'import sys, os, time, csv, re, itertools, random, json\n'), ((12835, 12851), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (12845, 12851), False, 'import sys, os, time, csv, re, itertools, random, json\n'), ((16908, 16944), 'logging.info', 'info', (['"""Adaptive predictions enabled"""'], {}), "('Adaptive predictions enabled')\n", (16912, 16944), False, 'from logging import debug, info, warning, basicConfig\n'), ((20224, 20253), 'sklearn.metrics.precision_recall_fscore_support', 'prfs', (['gold', 'pred'], {'average': 'avg'}), '(gold, pred, average=avg)\n', (20228, 20253), True, 'from sklearn.metrics import precision_recall_fscore_support as prfs\n'), ((20550, 20576), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['gold', 'pred'], {}), '(gold, pred)\n', (20564, 20576), False, 'from sklearn.metrics import accuracy_score\n'), ((24008, 24049), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'k', 'shuffle': '(True)'}), '(n_splits=k, shuffle=True)\n', (24023, 24049), False, 'from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold\n'), ((24143, 24203), 'sklearn.model_selection.StratifiedShuffleSplit', 'StratifiedShuffleSplit', ([], {'n_splits': 'n_splits', 'test_size': 'split_r'}), '(n_splits=n_splits, test_size=split_r)\n', (24165, 24203), False, 'from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold\n'), ((26426, 26446), 'os.remove', 'os.remove', (['stop_file'], {}), '(stop_file)\n', (26435, 26446), False, 'import sys, os, time, csv, re, itertools, random, json\n'), ((3603, 3614), 'time.time', 'time.time', ([], {}), '()\n', (3612, 3614), False, 'import sys, os, time, csv, re, itertools, random, json\n'), ((16279, 16300), 'collections.Counter', 'Counter', (['train.labels'], {}), '(train.labels)\n', (16286, 16300), False, 'from collections import Counter, OrderedDict\n'), ((16408, 16458), 'numpy.random.choice', 'np.random.choice', (['label_set'], {'size': 'test_len', 'p': 'prob'}), '(label_set, size=test_len, p=prob)\n', (16424, 16458), True, 'import numpy as np\n'), ((25788, 25814), 'numpy.array', 'np.array', (['scores[optimize]'], {}), '(scores[optimize])\n', (25796, 25814), True, 'import numpy as np\n'), ((26355, 26366), 'os.getpid', 'os.getpid', ([], {}), '()\n', (26364, 26366), False, 'import sys, os, time, csv, re, itertools, random, json\n'), ((11990, 12025), 'numpy.arange', 'np.arange', (['start', '(stop + step)', 'step'], {}), '(start, stop + step, step)\n', (11999, 12025), True, 'import numpy as np\n'), ((16321, 16359), 'numpy.array', 'np.array', (['[prob[l] for l in label_set]'], {}), '([prob[l] for l in label_set])\n', (16329, 16359), True, 'import numpy as np\n'), ((10964, 10999), 'numpy.arange', 'np.arange', (['start', '(stop + step)', 'step'], {}), '(start, stop + step, step)\n', (10973, 10999), True, 'import numpy as np\n'), ((11319, 11337), 'random.choice', 'random.choice', (['seq'], {}), '(seq)\n', (11332, 11337), False, 'import sys, os, time, csv, re, itertools, random, json\n'), ((12115, 12144), 'numpy.arange', 'np.arange', (['start', '(stop + 1)', '(1)'], {}), '(start, stop + 1, 1)\n', (12124, 12144), True, 'import numpy as np\n'), ((11101, 11130), 'numpy.arange', 'np.arange', (['start', '(stop + 1)', '(1)'], {}), '(start, stop + 1, 1)\n', (11110, 11130), True, 'import numpy as np\n'), ((16169, 16190), 'collections.Counter', 'Counter', (['train.labels'], {}), '(train.labels)\n', (16176, 16190), False, 'from collections import Counter, OrderedDict\n'), ((17412, 17446), 'numpy.argwhere', 'np.argwhere', (['(v > self.adapt_thresh)'], {}), '(v > self.adapt_thresh)\n', (17423, 17446), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import pytest
from gmpy2 import bit_mask
from rulelist.datastructure.data import Data
from rulelist.rulelistmodel.categoricalmodel.categoricalstatistic import CategoricalFixedStatistic, \
CategoricalFreeStatistic
@pytest.fixture
def constant_parameters():
input_n_cutpoints = 5
input_discretization = "static"
input_target_data = "categorical"
input_minsupp = 0
dictinput = {"attribute1": np.arange(100),
"attribute2": np.array(["below50" if i < 50 else "above49" for i in range(100)])}
input_input_data = pd.DataFrame(data=dictinput)
yield input_input_data, input_n_cutpoints, input_discretization, input_target_data,input_minsupp
@pytest.fixture
def generate_inputvalues_one_target(constant_parameters):
input_input_data, input_n_cutpoints, input_discretization, input_target_data,input_minsupp = constant_parameters
# targets
dictoutput = {"target1": np.array(["below50" if i < 50 else "above49" for i in range(100)])}
input_output_data = pd.DataFrame(data=dictoutput)
data_class = Data(input_input_data, input_n_cutpoints, input_discretization,
input_output_data, input_target_data,input_minsupp)
input_bitarray_for_statistic = bit_mask(data_class.number_instances)
yield data_class, input_bitarray_for_statistic
@pytest.fixture
def generate_inputvalues_two_targets(constant_parameters):
input_input_data, input_n_cutpoints, input_discretization, input_target_data,input_minsupp = constant_parameters
# targets
dictoutput = {"target1": np.array(["below50" if i < 50 else "above49" for i in range(100)]),
"target2": np.array(["below25" if i < 25 else "above25" for i in range(100)])}
input_output_data = pd.DataFrame(data=dictoutput)
data_class = Data(input_input_data, input_n_cutpoints, input_discretization,
input_output_data, input_target_data,input_minsupp)
input_bitarray_for_statistic = bit_mask(data_class.number_instances)
yield data_class, input_bitarray_for_statistic
class TestCategoricalFixedStatistic:
def test_2targets(self,generate_inputvalues_two_targets):
data_class, input_bitarray_for_statistic = generate_inputvalues_two_targets
statistic = CategoricalFixedStatistic(data_class)
statistic.replace_stats(data_class,input_bitarray_for_statistic)
expected_usage = 100
expected_number_targets = 2
expected_usage_per_class ={"target1": {"below50":50, "above49":50 },
"target2": {'below25': 25, 'above25': 75}}
expected_number_classes = {'target1': 2, 'target2': 2}
expected_prob_per_classes = {'target1': {'below50': 0.5, 'above49': 0.5},
'target2': {'below25': 0.25, 'above25': 0.75}}
assert expected_usage == statistic.usage
assert expected_number_targets == statistic.number_targets
assert expected_usage_per_class == statistic.usage_per_class
assert expected_number_classes == statistic.number_classes
assert expected_prob_per_classes == statistic.prob_per_classes
def test_1targets(self,generate_inputvalues_one_target):
data_class, input_bitarray_for_statistic = generate_inputvalues_one_target
statistic = CategoricalFixedStatistic(data_class)
statistic.replace_stats(data_class,input_bitarray_for_statistic)
expected_usage = 100
expected_number_targets = 1
expected_usage_per_class ={"target1": {"below50":50, "above49":50 }}
expected_number_classes = {'target1': 2}
expected_prob_per_classes = {'target1': {'below50': 0.5, 'above49': 0.5}}
assert expected_usage == statistic.usage
assert expected_number_targets == statistic.number_targets
assert expected_usage_per_class == statistic.usage_per_class
assert expected_number_classes == statistic.number_classes
assert expected_prob_per_classes == statistic.prob_per_classes
class TestCategoricalFreeStatistic:
def test_2targets(self,generate_inputvalues_two_targets):
data_class, input_bitarray_for_statistic = generate_inputvalues_two_targets
statistic = CategoricalFreeStatistic(data_class)
statistic.replace_stats(data_class,input_bitarray_for_statistic)
expected_usage = 100
expected_number_targets = 2
expected_usage_per_class ={"target1": {"below50":50, "above49":50 },
"target2": {'below25': 25, 'above25': 75}}
expected_number_classes = {'target1': 2, 'target2': 2}
assert expected_usage == statistic.usage
assert expected_number_targets == statistic.number_targets
assert expected_usage_per_class == statistic.usage_per_class
assert expected_number_classes == statistic.number_classes
def test_1targets(self,generate_inputvalues_one_target):
data_class, input_bitarray_for_statistic = generate_inputvalues_one_target
statistic = CategoricalFreeStatistic(data_class)
statistic.replace_stats(data_class,input_bitarray_for_statistic)
expected_usage = 100
expected_number_targets = 1
expected_usage_per_class ={"target1": {"below50":50, "above49":50 }}
expected_number_classes = {'target1': 2}
assert expected_usage == statistic.usage
assert expected_number_targets == statistic.number_targets
assert expected_usage_per_class == statistic.usage_per_class
assert expected_number_classes == statistic.number_classes
| [
"rulelist.rulelistmodel.categoricalmodel.categoricalstatistic.CategoricalFreeStatistic",
"rulelist.datastructure.data.Data",
"rulelist.rulelistmodel.categoricalmodel.categoricalstatistic.CategoricalFixedStatistic",
"gmpy2.bit_mask",
"pandas.DataFrame",
"numpy.arange"
] | [((593, 621), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'dictinput'}), '(data=dictinput)\n', (605, 621), True, 'import pandas as pd\n'), ((1050, 1079), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'dictoutput'}), '(data=dictoutput)\n', (1062, 1079), True, 'import pandas as pd\n'), ((1097, 1217), 'rulelist.datastructure.data.Data', 'Data', (['input_input_data', 'input_n_cutpoints', 'input_discretization', 'input_output_data', 'input_target_data', 'input_minsupp'], {}), '(input_input_data, input_n_cutpoints, input_discretization,\n input_output_data, input_target_data, input_minsupp)\n', (1101, 1217), False, 'from rulelist.datastructure.data import Data\n'), ((1271, 1308), 'gmpy2.bit_mask', 'bit_mask', (['data_class.number_instances'], {}), '(data_class.number_instances)\n', (1279, 1308), False, 'from gmpy2 import bit_mask\n'), ((1786, 1815), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'dictoutput'}), '(data=dictoutput)\n', (1798, 1815), True, 'import pandas as pd\n'), ((1833, 1953), 'rulelist.datastructure.data.Data', 'Data', (['input_input_data', 'input_n_cutpoints', 'input_discretization', 'input_output_data', 'input_target_data', 'input_minsupp'], {}), '(input_input_data, input_n_cutpoints, input_discretization,\n input_output_data, input_target_data, input_minsupp)\n', (1837, 1953), False, 'from rulelist.datastructure.data import Data\n'), ((2007, 2044), 'gmpy2.bit_mask', 'bit_mask', (['data_class.number_instances'], {}), '(data_class.number_instances)\n', (2015, 2044), False, 'from gmpy2 import bit_mask\n'), ((455, 469), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (464, 469), True, 'import numpy as np\n'), ((2301, 2338), 'rulelist.rulelistmodel.categoricalmodel.categoricalstatistic.CategoricalFixedStatistic', 'CategoricalFixedStatistic', (['data_class'], {}), '(data_class)\n', (2326, 2338), False, 'from rulelist.rulelistmodel.categoricalmodel.categoricalstatistic import CategoricalFixedStatistic, CategoricalFreeStatistic\n'), ((3343, 3380), 'rulelist.rulelistmodel.categoricalmodel.categoricalstatistic.CategoricalFixedStatistic', 'CategoricalFixedStatistic', (['data_class'], {}), '(data_class)\n', (3368, 3380), False, 'from rulelist.rulelistmodel.categoricalmodel.categoricalstatistic import CategoricalFixedStatistic, CategoricalFreeStatistic\n'), ((4257, 4293), 'rulelist.rulelistmodel.categoricalmodel.categoricalstatistic.CategoricalFreeStatistic', 'CategoricalFreeStatistic', (['data_class'], {}), '(data_class)\n', (4281, 4293), False, 'from rulelist.rulelistmodel.categoricalmodel.categoricalstatistic import CategoricalFixedStatistic, CategoricalFreeStatistic\n'), ((5071, 5107), 'rulelist.rulelistmodel.categoricalmodel.categoricalstatistic.CategoricalFreeStatistic', 'CategoricalFreeStatistic', (['data_class'], {}), '(data_class)\n', (5095, 5107), False, 'from rulelist.rulelistmodel.categoricalmodel.categoricalstatistic import CategoricalFixedStatistic, CategoricalFreeStatistic\n')] |
#!/usr/bin/env python
"""
Copyright (C) 2019 <NAME> Ltd
Copyright (C) 2019 <NAME>, ETH Zurich
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import os
import sys
import glob
import numpy as np
import pandas as pd
from collections import OrderedDict
class AggregatePredictionsApplication(object):
def __init__(self):
pass
def get_target_file(self, file_path):
from pandas import read_csv
# The CSV file must be placed in this file's directory.
rows = read_csv(file_path, sep="\t")
names = rows.columns[1:]
values = rows.values
return names, OrderedDict(values)
def run(self, search_directory, target_file_path):
all_patient_dicts = []
for folder in sorted(glob.glob(os.path.join(search_directory, "outer_*"))):
if os.path.isdir(folder):
for subfolder in sorted(glob.glob(os.path.join(folder, "inner_*"))):
if os.path.isdir(subfolder):
target_file = os.path.join(subfolder, "test_predictions.thresholded.tsv")
names, patient_dict = self.get_target_file(target_file)
all_patient_dicts.append(patient_dict)
all_patients, all_preds = [], []
for patient in all_patient_dicts[0].keys():
values = []
for d in all_patient_dicts:
values += [d[patient]]
all_patients.append(patient)
mean_pred = 1 if np.mean(values) > 0.5 else 0
all_preds.append(mean_pred)
columns = ["SURVIVAL_STATUS"]
df = pd.DataFrame(all_preds, columns=columns, index=all_patients)
df.index.name = "PATIENTID"
df.to_csv(target_file_path, sep="\t")
if __name__ == "__main__":
app = AggregatePredictionsApplication()
app.run(sys.argv[1], sys.argv[2])
| [
"numpy.mean",
"collections.OrderedDict",
"pandas.read_csv",
"os.path.join",
"os.path.isdir",
"pandas.DataFrame"
] | [((1478, 1507), 'pandas.read_csv', 'read_csv', (['file_path'], {'sep': '"""\t"""'}), "(file_path, sep='\\t')\n", (1486, 1507), False, 'from pandas import read_csv\n'), ((2583, 2643), 'pandas.DataFrame', 'pd.DataFrame', (['all_preds'], {'columns': 'columns', 'index': 'all_patients'}), '(all_preds, columns=columns, index=all_patients)\n', (2595, 2643), True, 'import pandas as pd\n'), ((1592, 1611), 'collections.OrderedDict', 'OrderedDict', (['values'], {}), '(values)\n', (1603, 1611), False, 'from collections import OrderedDict\n'), ((1798, 1819), 'os.path.isdir', 'os.path.isdir', (['folder'], {}), '(folder)\n', (1811, 1819), False, 'import os\n'), ((1738, 1779), 'os.path.join', 'os.path.join', (['search_directory', '"""outer_*"""'], {}), "(search_directory, 'outer_*')\n", (1750, 1779), False, 'import os\n'), ((1929, 1953), 'os.path.isdir', 'os.path.isdir', (['subfolder'], {}), '(subfolder)\n', (1942, 1953), False, 'import os\n'), ((2463, 2478), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (2470, 2478), True, 'import numpy as np\n'), ((1871, 1902), 'os.path.join', 'os.path.join', (['folder', '"""inner_*"""'], {}), "(folder, 'inner_*')\n", (1883, 1902), False, 'import os\n'), ((1993, 2052), 'os.path.join', 'os.path.join', (['subfolder', '"""test_predictions.thresholded.tsv"""'], {}), "(subfolder, 'test_predictions.thresholded.tsv')\n", (2005, 2052), False, 'import os\n')] |
import gym
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import matplotlib.pyplot as plt
from exploration import OUActionNoise
from rpm import Buffer, update_target
from ddpg import DDPG
from shield import Shield
from lundar_landing import LunarLanderContinuous
# from conjugate_prior import NormalNormalKnownVar
# Fuel is infinite, so an agent can learn to fly and then land on its first attempt.
# Action is two real values vector from -1 to +1. First controls main engine, -1..0 off, 0..+1 throttle from 50% to 100% power.
# Engine can't work with less than 50% power.
# Second value -1.0..-0.5 fire left engine, +0.5..+1.0 fire right engine, -0.5..0.5 off.
class SafeLunarEnv(gym.Wrapper):
def __init__(self, env, shield=Shield()):
super().__init__(env)
self.env = env
self.shield = shield
# self.exploded = 0
self.steps_to_explosion = 20
self.observation_space = env.observation_space
self.action_space = env.action_space
# self.observation_space.shape[0] = env.observation_space.shape[0]
def step(self, action):
action = self.shield.shield_action(action)
next_state, reward, done, info = self.env.step(action)
# done_explosion, reward_explosion = self.check_explosion(*action)
if np.abs(action[1]) < -0.7 or np.abs(action[1]) > 0.7 or np.abs(
action[0]) > 0.7:
reward = reward - 50
# print(warning_state)
# next_state = np.append(next_state, warning_state)
# done = done or done_explosion
# reward = reward + reward_explosion
# print(self.steps_to_explosion)
return next_state, reward, done, info
def reset(self):
# self.steps_to_explosion = 20
first_state = self.env.reset()
return first_state
# def check_explosion(self, *action):
# if np.abs(action[1]) < -0.8 or np.abs(action[1]) > 0.8 or np.abs(
# action[0]) > 0.9:
# self.steps_to_explosion -= 1
# if self.steps_to_explosion == 0:
# return True, -1000
# return False, 0
# class UserFeedbackShield:
# def __init__(self):
# # https://stats.stackexchange.com/questions/237037/bayesian-updating-with-new-data
# # https://www.cs.ubc.ca/~murphyk/Papers/bayesGauss.pdf
# self.shield_distribution_main_engine = NormalNormalKnownVar(
# 1, prior_mean=1, prior_var=0.01)
# self.shield_distribution_left_engine = NormalNormalKnownVar(
# 1, prior_mean=-1, prior_var=0.01)
# self.shield_distribution_right_engine = NormalNormalKnownVar(
# 1, prior_mean=1, prior_var=0.01)
# self.oracle_main_engine = self.shield_distribution_right_engine = NormalNormalKnownVar(
# 1, prior_mean=1, prior_var=0.001)
# self.oracle_left_engine = self.shield_distribution_right_engine = NormalNormalKnownVar(
# 1, prior_mean=-1, prior_var=0.001)
# self.oracle_right_engine = self.shield_distribution_right_engine = NormalNormalKnownVar(
# 1, prior_mean=1, prior_var=0.001)
# def get_current_shield(self):
# return Shield(thresholds_main_engine=self.
# shield_distribution_main_engine.sample(),
# thresholds_left_engine=self.
# shield_distribution_left_engine.sample(),
# thresholds_right_engine=self.
# shield_distribution_right_engine.sample())
# def update_oracle_with_last_action(self, last_action, mode='all'):
# modes = ['left', 'left_right', 'all']
# assert mode in modes
# if np.abs(last_action[1]) < -0.8:
# self.oracle_left_engine = NormalNormalKnownVar(
# 0.01,
# prior_mean=(self.oracle_left_engine.mean + 0.05),
# prior_var=0.01)
# self.update_shield_left_from_oracle()
# if np.abs(last_action[1]) > 0.8 and (mode == 'left_right'
# or mode == 'all'):
# self.oracle_left_engine = NormalNormalKnownVar(
# 0.01,
# prior_mean=(self.oracle_right_engine.mean - 0.05),
# prior_var=0.01)
# self.update_shield_right_from_oracle()
# if np.abs(last_action[0]) > 0.9 and mode == 'all':
# self.oracle_left_engine = NormalNormalKnownVar(
# 0.01,
# prior_mean=(self.oracle_main_engine.mean - 0.05),
# prior_var=0.01)
# self.update_shield_main_from_oracle()
# def update_shield_left_from_oracle(self):
# self.shield_distribution_left_engine = self.shield_distribution_left_engine.update(
# [self.oracle_left_engine.sample()])
# def update_shield_right_from_oracle(self):
# self.shield_distribution_right_engine = self.shield_distribution_right_engine.update(
# [self.oracle_right_engine.sample()])
# def update_shield_main_from_oracle(self):
# self.shield_distribution_main_engine = self.shield_distribution_main_engine.update(
# [self.oracle_main_engine.sample()])
# def create_oracle
def demo(self):
import numpy as np
from matplotlib import pyplot as plt
from conjugate_prior import NormalNormalKnownVar
model = NormalNormalKnownVar(1)
model.plot(-5, 5)
plt.show()
new_model = model
for _ in range(10):
new_model = NormalNormalKnownVar(0.01,
prior_mean=(new_model.mean +
0.05),
prior_var=0.01)
model = model.update([new_model.sample()])
model.plot(-5, 5)
print(model.sample())
plt.show()
if __name__ == '__main__':
# 'RocketLander-v0'
# conda install swig # needed to build Box2D in the pip install
# pip install box2d-py # a repackaged version of pybox2d
# problem = "Pendulum-v0"
# env = gym.make(problem)
problem = "LunarLanderContinuous-v2"
env = LunarLanderContinuous()
env = SafeLunarEnv(env)
num_states = env.observation_space.shape[0]
import ipdb
ipdb.set_trace()
print("Size of State Space -> {}".format(num_states))
num_actions = env.action_space.shape[0]
print("Size of Action Space -> {}".format(num_actions))
upper_bound = env.action_space.high[0]
lower_bound = env.action_space.low[0]
print("Max Value of Action -> {}".format(upper_bound))
print("Min Value of Action -> {}".format(lower_bound))
ddpg_agent = DDPG(problem_name=problem,
num_states=num_states,
num_actions=num_actions,
lower_bound=lower_bound,
upper_bound=upper_bound,
total_episodes=5000)
# To store reward history of each episode
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
# Takes about 4 min to train
for ep in range(ddpg_agent.total_episodes):
prev_state = env.reset()
episodic_reward = 0
render_episodes = 1000
render = not (ep % render_episodes)
while True:
# Uncomment this to see the Actor in action
# But not in a python notebook.
# env.render()
tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0)
action = ddpg_agent.policy(tf_prev_state, ddpg_agent.ou_noise)
# Recieve state and reward from environment.
# print(action)
state, reward, done, info = env.step(action[0])
if render: env.render()
ddpg_agent.buffer.record((prev_state, action[0], reward, state))
episodic_reward += reward
ddpg_agent.buffer.learn(ddpg_agent.target_actor,
ddpg_agent.target_critic,
ddpg_agent.actor_model,
ddpg_agent.critic_model,
ddpg_agent.actor_optimizer,
ddpg_agent.critic_optimizer)
update_target(ddpg_agent.target_actor.variables,
ddpg_agent.actor_model.variables, ddpg_agent.tau)
update_target(ddpg_agent.target_critic.variables,
ddpg_agent.critic_model.variables, ddpg_agent.tau)
# End this episode when `done` is True
if done:
break
prev_state = state
ep_reward_list.append(episodic_reward)
# Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
print("Episode * {} * Avg Reward is ==> {}".format(ep, avg_reward))
avg_reward_list.append(avg_reward)
# Plotting graph
# Episodes versus Avg. Rewards
plt.plot(avg_reward_list)
plt.xlabel("Episode")
plt.ylabel("Avg. Epsiodic Reward")
plt.show()
| [
"numpy.mean",
"numpy.abs",
"rpm.update_target",
"matplotlib.pyplot.ylabel",
"lundar_landing.LunarLanderContinuous",
"ipdb.set_trace",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"ddpg.DDPG",
"conjugate_prior.NormalNormalKnownVar",
"shield.Shield",
"tensorflow.convert_to_tensor",
"m... | [((6259, 6282), 'lundar_landing.LunarLanderContinuous', 'LunarLanderContinuous', ([], {}), '()\n', (6280, 6282), False, 'from lundar_landing import LunarLanderContinuous\n'), ((6380, 6396), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (6394, 6396), False, 'import ipdb\n'), ((6786, 6935), 'ddpg.DDPG', 'DDPG', ([], {'problem_name': 'problem', 'num_states': 'num_states', 'num_actions': 'num_actions', 'lower_bound': 'lower_bound', 'upper_bound': 'upper_bound', 'total_episodes': '(5000)'}), '(problem_name=problem, num_states=num_states, num_actions=num_actions,\n lower_bound=lower_bound, upper_bound=upper_bound, total_episodes=5000)\n', (6790, 6935), False, 'from ddpg import DDPG\n'), ((9110, 9135), 'matplotlib.pyplot.plot', 'plt.plot', (['avg_reward_list'], {}), '(avg_reward_list)\n', (9118, 9135), True, 'from matplotlib import pyplot as plt\n'), ((9140, 9161), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Episode"""'], {}), "('Episode')\n", (9150, 9161), True, 'from matplotlib import pyplot as plt\n'), ((9166, 9200), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Avg. Epsiodic Reward"""'], {}), "('Avg. Epsiodic Reward')\n", (9176, 9200), True, 'from matplotlib import pyplot as plt\n'), ((9205, 9215), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9213, 9215), True, 'from matplotlib import pyplot as plt\n'), ((768, 776), 'shield.Shield', 'Shield', ([], {}), '()\n', (774, 776), False, 'from shield import Shield\n'), ((5451, 5474), 'conjugate_prior.NormalNormalKnownVar', 'NormalNormalKnownVar', (['(1)'], {}), '(1)\n', (5471, 5474), False, 'from conjugate_prior import NormalNormalKnownVar\n'), ((5509, 5519), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5517, 5519), True, 'from matplotlib import pyplot as plt\n'), ((5948, 5958), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5956, 5958), True, 'from matplotlib import pyplot as plt\n'), ((8900, 8929), 'numpy.mean', 'np.mean', (['ep_reward_list[-40:]'], {}), '(ep_reward_list[-40:])\n', (8907, 8929), True, 'import numpy as np\n'), ((5599, 5675), 'conjugate_prior.NormalNormalKnownVar', 'NormalNormalKnownVar', (['(0.01)'], {'prior_mean': '(new_model.mean + 0.05)', 'prior_var': '(0.01)'}), '(0.01, prior_mean=new_model.mean + 0.05, prior_var=0.01)\n', (5619, 5675), False, 'from conjugate_prior import NormalNormalKnownVar\n'), ((8404, 8507), 'rpm.update_target', 'update_target', (['ddpg_agent.target_actor.variables', 'ddpg_agent.actor_model.variables', 'ddpg_agent.tau'], {}), '(ddpg_agent.target_actor.variables, ddpg_agent.actor_model.\n variables, ddpg_agent.tau)\n', (8417, 8507), False, 'from rpm import Buffer, update_target\n'), ((8541, 8646), 'rpm.update_target', 'update_target', (['ddpg_agent.target_critic.variables', 'ddpg_agent.critic_model.variables', 'ddpg_agent.tau'], {}), '(ddpg_agent.target_critic.variables, ddpg_agent.critic_model.\n variables, ddpg_agent.tau)\n', (8554, 8646), False, 'from rpm import Buffer, update_target\n'), ((1330, 1347), 'numpy.abs', 'np.abs', (['action[1]'], {}), '(action[1])\n', (1336, 1347), True, 'import numpy as np\n'), ((1358, 1375), 'numpy.abs', 'np.abs', (['action[1]'], {}), '(action[1])\n', (1364, 1375), True, 'import numpy as np\n'), ((1385, 1402), 'numpy.abs', 'np.abs', (['action[0]'], {}), '(action[0])\n', (1391, 1402), True, 'import numpy as np\n'), ((7608, 7640), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['prev_state'], {}), '(prev_state)\n', (7628, 7640), True, 'import tensorflow as tf\n')] |
import os
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import argparse
import time
import numpy as np
from models.attention_model import AttentionModelBddDetection, AttentionModelMultiBddDetection
from models.feature_model import FeatureModelBddDetection
from models.classifier import ClassificationHead
from ats.core.ats_layer import ATSModel, MultiATSModel, MultiParallelATSModel, MultiAtsParallelATSModel, FixedNParallelATSModel
from ats.utils.regularizers import MultinomialEntropy
from ats.utils.logging import AttentionSaverMultiBddDetection, AttentionSaverMultiParallelBddDetection, AttentionSaverMultiBatchBddDetection
from dataset.bdd_detection_dataset import BddDetection
from dataset.multiBddDetectionDataset import MultiBddDetection
from train import trainMultiResBatches, evaluateMultiResBatches, train, evaluate, trainMultiRes, evaluateMultiRes, save_checkpoint, load_checkpoint
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
def main(opts):
if not os.path.exists(opts.output_dir):
os.mkdir(opts.output_dir)
if '/' not in opts.load_dir:
opts.load_dir = os.path.join(opts.output_dir, opts.load_dir)
print(opts.load_dir)
if not os.path.exists(opts.load_dir):
os.mkdir(opts.load_dir)
if not opts.multiResBatch:
train_dataset = MultiBddDetection('dataset/bdd_detection', split="train", scales = opts.scales)
test_dataset = MultiBddDetection('dataset/bdd_detection', split='val', scales = opts.scales)
else:
train_dataset = BddDetection('dataset/bdd_detection', split="train")
test_dataset = BddDetection('dataset/bdd_detection', split="val")
print(len(train_dataset), len(test_dataset))
train_loader = DataLoader(train_dataset, batch_size=opts.batch_size, shuffle=True, num_workers=opts.num_workers)
test_loader = DataLoader(test_dataset, shuffle=False, batch_size=opts.batch_size, num_workers=opts.num_workers)
if not opts.multiResBatch:
attention_model = AttentionModelBddDetection(squeeze_channels=True, softmax_smoothing=1e-4)
feature_model = FeatureModelBddDetection(in_channels=3, strides=[1, 2, 2, 2], filters=[32, 32, 32, 32])
classification_head = ClassificationHead(in_channels=32, num_classes=len(train_dataset.CLASSES))
ats_model = None
logger = None
if opts.map_parallel:
print("Run parallel model.")
print("n patches for high res, and another n for low res.")
if opts.parallel_models:
print("Multiple attention models for multiple scales.")
attention_models = [AttentionModelBddDetection(squeeze_channels=True, softmax_smoothing=1e-4).to(opts.device) for _ in opts.scales]
if not opts.norm_resample:
if opts.fixed_patches is None:
ats_model = MultiAtsParallelATSModel(attention_models, feature_model, classification_head, n_patches=opts.n_patches, patch_size=opts.patch_size, scales=opts.scales)
else:
opts.fixed_patches = [32, 8, 2]
ats_model = FixedNParallelATSModel(attention_models, feature_model, classification_head, opts.fixed_patches, patch_size=opts.patch_size, scales=opts.scales)
else:
# Normalize the probability of samples among all scales
ats_model = MultiAtsParallelATSModel(attention_models, feature_model, classification_head, n_patches=opts.n_patches, patch_size=opts.patch_size, scales=opts.scales, norm_resample=True, norm_atts_weight=opts.norm_atts_weight)
else:
print("Single attention models for multiple scales.")
ats_model = MultiParallelATSModel(attention_model, feature_model, classification_head, n_patches=opts.n_patches, patch_size=opts.patch_size, scales=opts.scales)
ats_model = ats_model.to(opts.device)
logger = AttentionSaverMultiParallelBddDetection(opts.output_dir, ats_model, test_dataset, opts)
else:
print("Run unparallel model.")
attention_model = AttentionModelMultiBddDetection(squeeze_channels=True, softmax_smoothing=1e-4)
if opts.area_norm:
print("Merge before softmax with area normalization.")
ats_model = MultiATSModel(attention_model, feature_model, classification_head, n_patches=opts.n_patches, patch_size=opts.patch_size, scales=opts.scales, area_norm=True)
else:
print("Merge before softmax without area normalization.")
ats_model = MultiATSModel(attention_model, feature_model, classification_head, n_patches=opts.n_patches, patch_size=opts.patch_size, scales=opts.scales, area_norm=False)
ats_model = ats_model.to(opts.device)
logger = AttentionSaverMultiBddDetection(opts.output_dir, ats_model, test_dataset, opts)
else:
attention_model = AttentionModelBddDetection(squeeze_channels=True, softmax_smoothing=1e-4)
feature_model = FeatureModelBddDetection(in_channels=3, strides=[1, 2, 2, 2], filters=[32, 32, 32, 32])
classification_head = ClassificationHead(in_channels=32, num_classes=len(train_dataset.CLASSES))
ats_model = ATSModel(attention_model, feature_model, classification_head, n_patches=opts.n_patches, patch_size=opts.patch_size, replace=True)
ats_model = ats_model.to(opts.device)
logger = AttentionSaverMultiBatchBddDetection(opts.output_dir, ats_model, test_dataset, opts)
# ats_model = ats_model.to(opts.device)
if not opts.parallel_models:
optimizer = optim.Adam([{'params': ats_model.attention_model.part1.parameters(), 'weight_decay': 1e-5},
{'params': ats_model.attention_model.part2.parameters()},
{'params': ats_model.feature_model.parameters()},
{'params': ats_model.classifier.parameters()},
{'params': ats_model.sampler.parameters()},
{'params': ats_model.expectation.parameters()}
], lr=opts.lr)
else:
if opts.fixed_patches is None:
optimizer = optim.Adam([{'params': ats.part1.parameters(), 'weight_decay': 1e-5} for ats in ats_model.attention_models] + [{'params': ats.part2.parameters()} for ats in ats_model.attention_models] + [{'params': ats_model.feature_model.parameters()}, {'params': ats_model.classifier.parameters()}, {'params': ats_model.sampler.parameters()},{'params': ats_model.expectation.parameters()}], lr=opts.lr)
else:
optimizer = optim.Adam([{'params': ats.part1.parameters(), 'weight_decay': 1e-5} for ats in ats_model.attention_models] + [{'params': ats.part2.parameters()} for ats in ats_model.attention_models] + [{'params': ats_model.feature_model.parameters()}, {'params': ats_model.classifier.parameters()}, {'params': ats_model.expectation.parameters()}] + [{'params': sampler.parameters()} for sampler in ats_model.sampler_list], lr=opts.lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.decrease_lr_at, gamma=0.1)
class_weights = train_dataset.class_frequencies
class_weights = torch.from_numpy((1. / len(class_weights)) / class_weights).to(opts.device)
criterion = nn.CrossEntropyLoss(weight=class_weights)
entropy_loss_func = MultinomialEntropy(opts.regularizer_strength)
start_epoch = 0
opts.checkpoint_path = os.path.join(opts.output_dir, "checkpoint")
if not os.path.exists(opts.checkpoint_path):
os.mkdir(opts.checkpoint_path)
if opts.resume:
# start_epoch = opts.load_epoch + 1
ats_model, optimizer, start_epoch = load_checkpoint(ats_model, optimizer, os.path.join(opts.load_dir , "checkpoint{:02d}.pth".format(opts.load_epoch)))
start_epoch += 1
print("load %s successfully."%(os.path.join(opts.load_dir , "checkpoint{:02d}.pth".format(opts.load_epoch))))
else:
print("nothing to load.")
for epoch in range(start_epoch, opts.epochs):
print("Start epoch %d"%epoch)
if not opts.visualize:
if opts.multiResBatch:
train_loss, train_metrics = trainMultiResBatches(ats_model, optimizer, train_loader, criterion, entropy_loss_func, opts)
else:
train_loss, train_metrics = trainMultiRes(ats_model, optimizer, train_loader, criterion, entropy_loss_func, opts)
# if epoch % 2 == 0:
save_checkpoint(ats_model, optimizer, os.path.join(opts.checkpoint_path, "checkpoint{:02d}.pth".format(epoch)), epoch)
print("Save "+os.path.join(opts.checkpoint_path, "checkpoint{:02d}.pth".format(epoch))+" successfully.")
if not opts.multiResBatch:
print("Epoch {}, train loss: {:.3f}, train metrics: {:.3f}".format(epoch, train_loss, train_metrics["accuracy"]))
else:
scale_avg = [[], []]
for i, s in enumerate(opts.scales):
print("Epoch {}, scale {}, train loss: {:.3f}, train metrics: {:.3f}".format(epoch, s, train_loss[i], train_metrics[i]["accuracy"]))
scale_avg[0].append(train_loss[i])
scale_avg[1].append(train_metrics[i]['accuracy'])
avg_train_loss = np.round(np.mean(scale_avg[0]), 4)
avg_train_metrics = np.mean(scale_avg[1])
print("Epoch {}, avg train loss: {:.3f}, train metrics: {:.3f}".format(epoch, avg_train_loss, avg_train_metrics))
with torch.no_grad():
if opts.multiResBatch:
test_loss, test_metrics = evaluateMultiResBatches(ats_model, test_loader, criterion, entropy_loss_func, opts)
else:
test_loss, test_metrics = evaluateMultiRes(ats_model, test_loader, criterion, entropy_loss_func, opts)
logger(epoch, (train_loss, test_loss), (train_metrics, test_metrics))
if not opts.multiResBatch:
print("Epoch {}, test loss: {:.3f}, test metrics: {:.3f}".format(epoch, test_loss, test_metrics["accuracy"]))
else:
scale_avg = [[], []]
for i, s in enumerate(opts.scales):
print("Epoch {}, scale {} test loss: {:.3f}, test metrics: {:.3f}".format(epoch, s, test_loss[i], test_metrics[i]["accuracy"]))
scale_avg[0].append(test_loss[i])
scale_avg[1].append(test_metrics[i]["accuracy"])
avg_test_loss = np.round(np.mean(scale_avg[0]), 4)
avg_test_metrics = np.mean(scale_avg[1])
print("Epoch {}, avg test loss: {:.3f}, test metrics: {:.3f}".format(epoch, avg_test_loss, avg_test_metrics))
scheduler.step()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--regularizer_strength", type=float, default=0.05,
help="How strong should the regularization be for the attention")
parser.add_argument("--softmax_smoothing", type=float, default=1e-4,
help="Smoothing for calculating the attention map")
parser.add_argument("--lr", type=float, default=0.001, help="Set the optimizer's learning rate")
parser.add_argument("--n_patches", type=int, default=5, help="How many patches to sample")
parser.add_argument("--patch_size", type=int, default=100, help="Patch size of a square patch")
parser.add_argument("--scales", type=list, default=[1, 0.5, 0.25], help="Multi scales")
parser.add_argument("--batch_size", type=int, default=32, help="Choose the batch size for SGD")
parser.add_argument("--epochs", type=int, default=500, help="How many epochs to train for")
parser.add_argument("--decrease_lr_at", type=float, default=250, help="Decrease the learning rate in this epoch")
parser.add_argument("--clipnorm", type=float, default=1, help="Clip the norm of the gradients")
parser.add_argument("--output_dir", type=str, help="An output directory", default='output/bdd_detection')
# parser.add_argument("--checkpoint_path", type=str, help="An output checkpoint directory", default='output/bdd_detection/checkpoint')
parser.add_argument("--map_parallel", type=bool, default=False)
parser.add_argument("--parallel_models", type=bool, default=False)
parser.add_argument("--norm_resample", type=bool, default=False),
parser.add_argument("--fixed_patches", type=bool, default=None),
parser.add_argument("--norm_atts_weight", type=bool, default=False)
parser.add_argument("--area_norm", type=bool, default=False)
parser.add_argument("--resume", type=bool, default=False)
parser.add_argument("--multiResBatch", type=bool, default=False, help="Flag to train multiresolution in separate batches")
parser.add_argument("--visualize", type=bool, default=False)
parser.add_argument("--load_dir", type=str, default="output/bdd_detection/checkpoint")
parser.add_argument("--load_epoch", type=int, default=0)
parser.add_argument('--run_name', type=str, default='run')
parser.add_argument('--num_workers', type=int, default=12, help='Number of workers to use for data loading')
opts = parser.parse_args()
opts.run_name = f"{opts.run_name}_{time.strftime('%Y%m%dT%H%M%S')}"
opts.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
main(opts) | [
"dataset.multiBddDetectionDataset.MultiBddDetection",
"ats.utils.logging.AttentionSaverMultiBatchBddDetection",
"torch.nn.CrossEntropyLoss",
"ats.utils.regularizers.MultinomialEntropy",
"torch.cuda.is_available",
"models.attention_model.AttentionModelBddDetection",
"os.path.exists",
"dataset.bdd_detec... | [((970, 1025), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (993, 1025), False, 'import warnings\n'), ((1772, 1873), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'opts.batch_size', 'shuffle': '(True)', 'num_workers': 'opts.num_workers'}), '(train_dataset, batch_size=opts.batch_size, shuffle=True,\n num_workers=opts.num_workers)\n', (1782, 1873), False, 'from torch.utils.data import DataLoader\n'), ((1888, 1989), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset'], {'shuffle': '(False)', 'batch_size': 'opts.batch_size', 'num_workers': 'opts.num_workers'}), '(test_dataset, shuffle=False, batch_size=opts.batch_size,\n num_workers=opts.num_workers)\n', (1898, 1989), False, 'from torch.utils.data import DataLoader\n'), ((7022, 7110), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer'], {'step_size': 'opts.decrease_lr_at', 'gamma': '(0.1)'}), '(optimizer, step_size=opts.decrease_lr_at,\n gamma=0.1)\n', (7053, 7110), False, 'import torch\n'), ((7278, 7319), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'weight': 'class_weights'}), '(weight=class_weights)\n', (7297, 7319), True, 'import torch.nn as nn\n'), ((7344, 7389), 'ats.utils.regularizers.MultinomialEntropy', 'MultinomialEntropy', (['opts.regularizer_strength'], {}), '(opts.regularizer_strength)\n', (7362, 7389), False, 'from ats.utils.regularizers import MultinomialEntropy\n'), ((7438, 7481), 'os.path.join', 'os.path.join', (['opts.output_dir', '"""checkpoint"""'], {}), "(opts.output_dir, 'checkpoint')\n", (7450, 7481), False, 'import os\n'), ((10631, 10656), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10654, 10656), False, 'import argparse\n'), ((1053, 1084), 'os.path.exists', 'os.path.exists', (['opts.output_dir'], {}), '(opts.output_dir)\n', (1067, 1084), False, 'import os\n'), ((1092, 1117), 'os.mkdir', 'os.mkdir', (['opts.output_dir'], {}), '(opts.output_dir)\n', (1100, 1117), False, 'import os\n'), ((1173, 1217), 'os.path.join', 'os.path.join', (['opts.output_dir', 'opts.load_dir'], {}), '(opts.output_dir, opts.load_dir)\n', (1185, 1217), False, 'import os\n'), ((1254, 1283), 'os.path.exists', 'os.path.exists', (['opts.load_dir'], {}), '(opts.load_dir)\n', (1268, 1283), False, 'import os\n'), ((1291, 1314), 'os.mkdir', 'os.mkdir', (['opts.load_dir'], {}), '(opts.load_dir)\n', (1299, 1314), False, 'import os\n'), ((1368, 1445), 'dataset.multiBddDetectionDataset.MultiBddDetection', 'MultiBddDetection', (['"""dataset/bdd_detection"""'], {'split': '"""train"""', 'scales': 'opts.scales'}), "('dataset/bdd_detection', split='train', scales=opts.scales)\n", (1385, 1445), False, 'from dataset.multiBddDetectionDataset import MultiBddDetection\n'), ((1469, 1544), 'dataset.multiBddDetectionDataset.MultiBddDetection', 'MultiBddDetection', (['"""dataset/bdd_detection"""'], {'split': '"""val"""', 'scales': 'opts.scales'}), "('dataset/bdd_detection', split='val', scales=opts.scales)\n", (1486, 1544), False, 'from dataset.multiBddDetectionDataset import MultiBddDetection\n'), ((1579, 1631), 'dataset.bdd_detection_dataset.BddDetection', 'BddDetection', (['"""dataset/bdd_detection"""'], {'split': '"""train"""'}), "('dataset/bdd_detection', split='train')\n", (1591, 1631), False, 'from dataset.bdd_detection_dataset import BddDetection\n'), ((1653, 1703), 'dataset.bdd_detection_dataset.BddDetection', 'BddDetection', (['"""dataset/bdd_detection"""'], {'split': '"""val"""'}), "('dataset/bdd_detection', split='val')\n", (1665, 1703), False, 'from dataset.bdd_detection_dataset import BddDetection\n'), ((2042, 2117), 'models.attention_model.AttentionModelBddDetection', 'AttentionModelBddDetection', ([], {'squeeze_channels': '(True)', 'softmax_smoothing': '(0.0001)'}), '(squeeze_channels=True, softmax_smoothing=0.0001)\n', (2068, 2117), False, 'from models.attention_model import AttentionModelBddDetection, AttentionModelMultiBddDetection\n'), ((2138, 2230), 'models.feature_model.FeatureModelBddDetection', 'FeatureModelBddDetection', ([], {'in_channels': '(3)', 'strides': '[1, 2, 2, 2]', 'filters': '[32, 32, 32, 32]'}), '(in_channels=3, strides=[1, 2, 2, 2], filters=[32, \n 32, 32, 32])\n', (2162, 2230), False, 'from models.feature_model import FeatureModelBddDetection\n'), ((4891, 4966), 'models.attention_model.AttentionModelBddDetection', 'AttentionModelBddDetection', ([], {'squeeze_channels': '(True)', 'softmax_smoothing': '(0.0001)'}), '(squeeze_channels=True, softmax_smoothing=0.0001)\n', (4917, 4966), False, 'from models.attention_model import AttentionModelBddDetection, AttentionModelMultiBddDetection\n'), ((4989, 5081), 'models.feature_model.FeatureModelBddDetection', 'FeatureModelBddDetection', ([], {'in_channels': '(3)', 'strides': '[1, 2, 2, 2]', 'filters': '[32, 32, 32, 32]'}), '(in_channels=3, strides=[1, 2, 2, 2], filters=[32, \n 32, 32, 32])\n', (5013, 5081), False, 'from models.feature_model import FeatureModelBddDetection\n'), ((5203, 5337), 'ats.core.ats_layer.ATSModel', 'ATSModel', (['attention_model', 'feature_model', 'classification_head'], {'n_patches': 'opts.n_patches', 'patch_size': 'opts.patch_size', 'replace': '(True)'}), '(attention_model, feature_model, classification_head, n_patches=\n opts.n_patches, patch_size=opts.patch_size, replace=True)\n', (5211, 5337), False, 'from ats.core.ats_layer import ATSModel, MultiATSModel, MultiParallelATSModel, MultiAtsParallelATSModel, FixedNParallelATSModel\n'), ((5396, 5484), 'ats.utils.logging.AttentionSaverMultiBatchBddDetection', 'AttentionSaverMultiBatchBddDetection', (['opts.output_dir', 'ats_model', 'test_dataset', 'opts'], {}), '(opts.output_dir, ats_model,\n test_dataset, opts)\n', (5432, 5484), False, 'from ats.utils.logging import AttentionSaverMultiBddDetection, AttentionSaverMultiParallelBddDetection, AttentionSaverMultiBatchBddDetection\n'), ((7493, 7529), 'os.path.exists', 'os.path.exists', (['opts.checkpoint_path'], {}), '(opts.checkpoint_path)\n', (7507, 7529), False, 'import os\n'), ((7538, 7568), 'os.mkdir', 'os.mkdir', (['opts.checkpoint_path'], {}), '(opts.checkpoint_path)\n', (7546, 7568), False, 'import os\n'), ((3913, 4004), 'ats.utils.logging.AttentionSaverMultiParallelBddDetection', 'AttentionSaverMultiParallelBddDetection', (['opts.output_dir', 'ats_model', 'test_dataset', 'opts'], {}), '(opts.output_dir, ats_model,\n test_dataset, opts)\n', (3952, 4004), False, 'from ats.utils.logging import AttentionSaverMultiBddDetection, AttentionSaverMultiParallelBddDetection, AttentionSaverMultiBatchBddDetection\n'), ((4083, 4168), 'models.attention_model.AttentionModelMultiBddDetection', 'AttentionModelMultiBddDetection', ([], {'squeeze_channels': '(True)', 'softmax_smoothing': '(0.0001)'}), '(squeeze_channels=True, softmax_smoothing=0.0001\n )\n', (4114, 4168), False, 'from models.attention_model import AttentionModelBddDetection, AttentionModelMultiBddDetection\n'), ((4775, 4854), 'ats.utils.logging.AttentionSaverMultiBddDetection', 'AttentionSaverMultiBddDetection', (['opts.output_dir', 'ats_model', 'test_dataset', 'opts'], {}), '(opts.output_dir, ats_model, test_dataset, opts)\n', (4806, 4854), False, 'from ats.utils.logging import AttentionSaverMultiBddDetection, AttentionSaverMultiParallelBddDetection, AttentionSaverMultiBatchBddDetection\n'), ((9458, 9473), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9471, 9473), False, 'import torch\n'), ((10422, 10443), 'numpy.mean', 'np.mean', (['scale_avg[1]'], {}), '(scale_avg[1])\n', (10429, 10443), True, 'import numpy as np\n'), ((13091, 13121), 'time.strftime', 'time.strftime', (['"""%Y%m%dT%H%M%S"""'], {}), "('%Y%m%dT%H%M%S')\n", (13104, 13121), False, 'import time\n'), ((13167, 13192), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13190, 13192), False, 'import torch\n'), ((3696, 3848), 'ats.core.ats_layer.MultiParallelATSModel', 'MultiParallelATSModel', (['attention_model', 'feature_model', 'classification_head'], {'n_patches': 'opts.n_patches', 'patch_size': 'opts.patch_size', 'scales': 'opts.scales'}), '(attention_model, feature_model, classification_head,\n n_patches=opts.n_patches, patch_size=opts.patch_size, scales=opts.scales)\n', (3717, 3848), False, 'from ats.core.ats_layer import ATSModel, MultiATSModel, MultiParallelATSModel, MultiAtsParallelATSModel, FixedNParallelATSModel\n'), ((4282, 4447), 'ats.core.ats_layer.MultiATSModel', 'MultiATSModel', (['attention_model', 'feature_model', 'classification_head'], {'n_patches': 'opts.n_patches', 'patch_size': 'opts.patch_size', 'scales': 'opts.scales', 'area_norm': '(True)'}), '(attention_model, feature_model, classification_head,\n n_patches=opts.n_patches, patch_size=opts.patch_size, scales=opts.\n scales, area_norm=True)\n', (4295, 4447), False, 'from ats.core.ats_layer import ATSModel, MultiATSModel, MultiParallelATSModel, MultiAtsParallelATSModel, FixedNParallelATSModel\n'), ((4549, 4715), 'ats.core.ats_layer.MultiATSModel', 'MultiATSModel', (['attention_model', 'feature_model', 'classification_head'], {'n_patches': 'opts.n_patches', 'patch_size': 'opts.patch_size', 'scales': 'opts.scales', 'area_norm': '(False)'}), '(attention_model, feature_model, classification_head,\n n_patches=opts.n_patches, patch_size=opts.patch_size, scales=opts.\n scales, area_norm=False)\n', (4562, 4715), False, 'from ats.core.ats_layer import ATSModel, MultiATSModel, MultiParallelATSModel, MultiAtsParallelATSModel, FixedNParallelATSModel\n'), ((8171, 8267), 'train.trainMultiResBatches', 'trainMultiResBatches', (['ats_model', 'optimizer', 'train_loader', 'criterion', 'entropy_loss_func', 'opts'], {}), '(ats_model, optimizer, train_loader, criterion,\n entropy_loss_func, opts)\n', (8191, 8267), False, 'from train import trainMultiResBatches, evaluateMultiResBatches, train, evaluate, trainMultiRes, evaluateMultiRes, save_checkpoint, load_checkpoint\n'), ((8320, 8409), 'train.trainMultiRes', 'trainMultiRes', (['ats_model', 'optimizer', 'train_loader', 'criterion', 'entropy_loss_func', 'opts'], {}), '(ats_model, optimizer, train_loader, criterion,\n entropy_loss_func, opts)\n', (8333, 8409), False, 'from train import trainMultiResBatches, evaluateMultiResBatches, train, evaluate, trainMultiRes, evaluateMultiRes, save_checkpoint, load_checkpoint\n'), ((9297, 9318), 'numpy.mean', 'np.mean', (['scale_avg[1]'], {}), '(scale_avg[1])\n', (9304, 9318), True, 'import numpy as np\n'), ((9546, 9633), 'train.evaluateMultiResBatches', 'evaluateMultiResBatches', (['ats_model', 'test_loader', 'criterion', 'entropy_loss_func', 'opts'], {}), '(ats_model, test_loader, criterion,\n entropy_loss_func, opts)\n', (9569, 9633), False, 'from train import trainMultiResBatches, evaluateMultiResBatches, train, evaluate, trainMultiRes, evaluateMultiRes, save_checkpoint, load_checkpoint\n'), ((9684, 9760), 'train.evaluateMultiRes', 'evaluateMultiRes', (['ats_model', 'test_loader', 'criterion', 'entropy_loss_func', 'opts'], {}), '(ats_model, test_loader, criterion, entropy_loss_func, opts)\n', (9700, 9760), False, 'from train import trainMultiResBatches, evaluateMultiResBatches, train, evaluate, trainMultiRes, evaluateMultiRes, save_checkpoint, load_checkpoint\n'), ((10367, 10388), 'numpy.mean', 'np.mean', (['scale_avg[0]'], {}), '(scale_avg[0])\n', (10374, 10388), True, 'import numpy as np\n'), ((3377, 3603), 'ats.core.ats_layer.MultiAtsParallelATSModel', 'MultiAtsParallelATSModel', (['attention_models', 'feature_model', 'classification_head'], {'n_patches': 'opts.n_patches', 'patch_size': 'opts.patch_size', 'scales': 'opts.scales', 'norm_resample': '(True)', 'norm_atts_weight': 'opts.norm_atts_weight'}), '(attention_models, feature_model,\n classification_head, n_patches=opts.n_patches, patch_size=opts.\n patch_size, scales=opts.scales, norm_resample=True, norm_atts_weight=\n opts.norm_atts_weight)\n', (3401, 3603), False, 'from ats.core.ats_layer import ATSModel, MultiATSModel, MultiParallelATSModel, MultiAtsParallelATSModel, FixedNParallelATSModel\n'), ((9239, 9260), 'numpy.mean', 'np.mean', (['scale_avg[0]'], {}), '(scale_avg[0])\n', (9246, 9260), True, 'import numpy as np\n'), ((2869, 3030), 'ats.core.ats_layer.MultiAtsParallelATSModel', 'MultiAtsParallelATSModel', (['attention_models', 'feature_model', 'classification_head'], {'n_patches': 'opts.n_patches', 'patch_size': 'opts.patch_size', 'scales': 'opts.scales'}), '(attention_models, feature_model,\n classification_head, n_patches=opts.n_patches, patch_size=opts.\n patch_size, scales=opts.scales)\n', (2893, 3030), False, 'from ats.core.ats_layer import ATSModel, MultiATSModel, MultiParallelATSModel, MultiAtsParallelATSModel, FixedNParallelATSModel\n'), ((3118, 3266), 'ats.core.ats_layer.FixedNParallelATSModel', 'FixedNParallelATSModel', (['attention_models', 'feature_model', 'classification_head', 'opts.fixed_patches'], {'patch_size': 'opts.patch_size', 'scales': 'opts.scales'}), '(attention_models, feature_model, classification_head,\n opts.fixed_patches, patch_size=opts.patch_size, scales=opts.scales)\n', (3140, 3266), False, 'from ats.core.ats_layer import ATSModel, MultiATSModel, MultiParallelATSModel, MultiAtsParallelATSModel, FixedNParallelATSModel\n'), ((2645, 2720), 'models.attention_model.AttentionModelBddDetection', 'AttentionModelBddDetection', ([], {'squeeze_channels': '(True)', 'softmax_smoothing': '(0.0001)'}), '(squeeze_channels=True, softmax_smoothing=0.0001)\n', (2671, 2720), False, 'from models.attention_model import AttentionModelBddDetection, AttentionModelMultiBddDetection\n')] |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""Widget providing a set of tools to draw masks on a PlotWidget.
This widget is meant to work with :class:`silx.gui.plot.PlotWidget`.
- :class:`Mask`: Handle mask bitmap update and history
- :class:`MaskToolsWidget`: GUI for :class:`Mask`
- :class:`MaskToolsDockWidget`: DockWidget to integrate in :class:`PlotWindow`
"""
from __future__ import division
__authors__ = ["<NAME>"]
__license__ = "MIT"
__data__ = "08/06/2016"
import os
import sys
import numpy
import logging
from silx.image import shapes
from .Colors import cursorColorForColormap, rgba
from .. import icons, qt
from silx.third_party.EdfFile import EdfFile
from silx.third_party.TiffIO import TiffIO
try:
import fabio
except ImportError:
fabio = None
_logger = logging.getLogger(__name__)
class Mask(qt.QObject):
"""A mask field with update operations.
Coords follows (row, column) convention and are in mask array coords.
This is meant for internal use by :class:`MaskToolsWidget`.
"""
sigChanged = qt.Signal()
"""Signal emitted when the mask has changed"""
sigUndoable = qt.Signal(bool)
"""Signal emitted when undo becomes possible/impossible"""
sigRedoable = qt.Signal(bool)
"""Signal emitted when redo becomes possible/impossible"""
def __init__(self):
self.historyDepth = 10
"""Maximum number of operation stored in history list for undo"""
self._mask = numpy.array((), dtype=numpy.uint8) # Store the mask
# Init lists for undo/redo
self._history = []
self._redo = []
super(Mask, self).__init__()
def _notify(self):
"""Notify of mask change."""
self.sigChanged.emit()
def getMask(self, copy=True):
"""Get the current mask as a 2D array.
:param bool copy: True (default) to get a copy of the mask.
If False, the returned array MUST not be modified.
:return: The array of the mask with dimension of the 'active' image.
If there is no active image, an empty array is returned.
:rtype: 2D numpy.ndarray of uint8
"""
return numpy.array(self._mask, copy=copy)
def setMask(self, mask, copy=True):
"""Set the mask to a new array.
:param numpy.ndarray mask: The array to use for the mask.
:type mask: numpy.ndarray of uint8 of dimension 2, C-contiguous.
Array of other types are converted.
:param bool copy: True (the default) to copy the array,
False to use it as is if possible.
"""
assert len(mask.shape) == 2
self._mask = numpy.array(mask, copy=copy, order='C', dtype=numpy.uint8)
self._notify()
def save(self, filename, kind):
"""Save current mask in a file
:param str filename: The file where to save to mask
:param str kind: The kind of file to save in 'edf', 'tif', 'npy',
or 'msk' (if FabIO is installed)
:raise Exception: Raised if the file writing fail
"""
if kind == 'edf':
edfFile = EdfFile(filename, access="w+")
edfFile.WriteImage({}, self.getMask(copy=False), Append=0)
elif kind == 'tif':
tiffFile = TiffIO(filename, mode='w')
tiffFile.writeImage(self.getMask(copy=False), software='silx')
elif kind == 'npy':
try:
numpy.save(filename, self.getMask(copy=False))
except IOError:
raise RuntimeError("Mask file can't be written")
elif kind == 'msk':
if fabio is None:
raise ImportError("Fit2d mask files can't be written: Fabio module is not available")
try:
data = self.getMask(copy=False)
image = fabio.fabioimage.FabioImage(data=data)
image = image.convert(fabio.fit2dmaskimage.Fit2dMaskImage)
image.save(filename)
except Exception:
_logger.debug("Backtrace", exc_info=True)
raise RuntimeError("Mask file can't be written")
else:
raise ValueError("Format '%s' is not supported" % kind)
# History control
def resetHistory(self):
"""Reset history"""
self._history = [numpy.array(self._mask, copy=True)]
self._redo = []
self.sigUndoable.emit(False)
self.sigRedoable.emit(False)
def commit(self):
"""Append the current mask to history if changed"""
if (not self._history or self._redo or
not numpy.all(numpy.equal(self._mask, self._history[-1]))):
if self._redo:
self._redo = [] # Reset redo as a new action as been performed
self.sigRedoable[bool].emit(False)
while len(self._history) >= self.historyDepth:
self._history.pop(0)
self._history.append(numpy.array(self._mask, copy=True))
if len(self._history) == 2:
self.sigUndoable.emit(True)
def undo(self):
"""Restore previous mask if any"""
if len(self._history) > 1:
self._redo.append(self._history.pop())
self._mask = numpy.array(self._history[-1], copy=True)
self._notify() # Do not store this change in history
if len(self._redo) == 1: # First redo
self.sigRedoable.emit(True)
if len(self._history) == 1: # Last value in history
self.sigUndoable.emit(False)
def redo(self):
"""Restore previously undone modification if any"""
if self._redo:
self._mask = self._redo.pop()
self._history.append(numpy.array(self._mask, copy=True))
self._notify()
if not self._redo: # No more redo
self.sigRedoable.emit(False)
if len(self._history) == 2: # Something to undo
self.sigUndoable.emit(True)
# Whole mask operations
def clear(self, level):
"""Set all values of the given mask level to 0.
:param int level: Value of the mask to set to 0.
"""
assert 0 < level < 256
self._mask[self._mask == level] = 0
self._notify()
def reset(self, shape=None):
"""Reset the mask to zero and change its shape
:param shape: Shape of the new mask or None to have an empty mask
:type shape: 2-tuple of int
"""
if shape is None:
shape = 0, 0 # Empty 2D array
assert len(shape) == 2
shapeChanged = (shape != self._mask.shape)
self._mask = numpy.zeros(shape, dtype=numpy.uint8)
if shapeChanged:
self.resetHistory()
self._notify()
def invert(self, level):
"""Invert mask of the given mask level.
0 values become level and level values become 0.
:param int level: The level to invert.
"""
assert 0 < level < 256
masked = self._mask == level
self._mask[self._mask == 0] = level
self._mask[masked] = 0
self._notify()
# Drawing operations
def updateRectangle(self, level, row, col, height, width, mask=True):
"""Mask/Unmask a rectangle of the given mask level.
:param int level: Mask level to update.
:param int row: Starting row of the rectangle
:param int col: Starting column of the rectangle
:param int height:
:param int width:
:param bool mask: True to mask (default), False to unmask.
"""
assert 0 < level < 256
selection = self._mask[max(0, row):row + height + 1,
max(0, col):col + width + 1]
if mask:
selection[:, :] = level
else:
selection[selection == level] = 0
self._notify()
def updatePolygon(self, level, vertices, mask=True):
"""Mask/Unmask a polygon of the given mask level.
:param int level: Mask level to update.
:param vertices: Nx2 array of polygon corners as (row, col)
:param bool mask: True to mask (default), False to unmask.
"""
fill = shapes.polygon_fill_mask(vertices, self._mask.shape)
if mask:
self._mask[fill != 0] = level
else:
self._mask[numpy.logical_and(fill != 0,
self._mask == level)] = 0
self._notify()
def updatePoints(self, level, rows, cols, mask=True):
"""Mask/Unmask points with given coordinates.
:param int level: Mask level to update.
:param rows: Rows of selected points
:type rows: 1D numpy.ndarray
:param cols: Columns of selected points
:type cols: 1D numpy.ndarray
:param bool mask: True to mask (default), False to unmask.
"""
valid = numpy.logical_and(
numpy.logical_and(rows >= 0, cols >= 0),
numpy.logical_and(rows < self._mask.shape[0],
cols < self._mask.shape[1]))
rows, cols = rows[valid], cols[valid]
if mask:
self._mask[rows, cols] = level
else:
inMask = self._mask[rows, cols] == level
self._mask[rows[inMask], cols[inMask]] = 0
self._notify()
def updateStencil(self, level, stencil, mask=True):
"""Mask/Unmask area from boolean mask.
:param int level: Mask level to update.
:param stencil: Boolean mask of mask values to update
:type stencil: numpy.array of same dimension as the mask
:param bool mask: True to mask (default), False to unmask.
"""
rows, cols = numpy.nonzero(stencil)
self.updatePoints(level, rows, cols, mask)
def updateDisk(self, level, crow, ccol, radius, mask=True):
"""Mask/Unmask a disk of the given mask level.
:param int level: Mask level to update.
:param int crow: Disk center row.
:param int ccol: Disk center column.
:param float radius: Radius of the disk in mask array unit
:param bool mask: True to mask (default), False to unmask.
"""
rows, cols = shapes.circle_fill(crow, ccol, radius)
self.updatePoints(level, rows, cols, mask)
def updateLine(self, level, row0, col0, row1, col1, width, mask=True):
"""Mask/Unmask a line of the given mask level.
:param int level: Mask level to update.
:param int row0: Row of the starting point.
:param int col0: Column of the starting point.
:param int row1: Row of the end point.
:param int col1: Column of the end point.
:param int width: Width of the line in mask array unit.
:param bool mask: True to mask (default), False to unmask.
"""
rows, cols = shapes.draw_line(row0, col0, row1, col1, width)
self.updatePoints(level, rows, cols, mask)
class MaskToolsWidget(qt.QWidget):
"""Widget with tools for drawing mask on an image in a PlotWidget."""
_maxLevelNumber = 255
def __init__(self, parent=None, plot=None):
# register if the user as force a color for the corresponding mask level
self._defaultColors = numpy.ones((self._maxLevelNumber + 1), dtype=numpy.bool)
# overlays colors set by the user
self._overlayColors = numpy.zeros((self._maxLevelNumber + 1, 3), dtype=numpy.float32)
self._plot = plot
self._maskName = '__MASK_TOOLS_%d' % id(self) # Legend of the mask
self._colormap = {
'name': None,
'normalization': 'linear',
'autoscale': False,
'vmin': 0, 'vmax': self._maxLevelNumber,
'colors': None}
self._defaultOverlayColor = rgba('gray') # Color of the mask
self._setMaskColors(1, 0.5)
self._origin = (0., 0.) # Mask origin in plot
self._scale = (1., 1.) # Mask scale in plot
self._z = 1 # Mask layer in plot
self._data = numpy.zeros((0, 0), dtype=numpy.uint8) # Store image
self._mask = Mask()
self._mask.sigChanged.connect(self._updatePlotMask)
self._drawingMode = None # Store current drawing mode
self._lastPencilPos = None
self._multipleMasks = 'exclusive'
super(MaskToolsWidget, self).__init__(parent)
self._initWidgets()
self._maskFileDir = qt.QDir.home().absolutePath()
self.plot.sigInteractiveModeChanged.connect(
self._interactiveModeChanged)
def getSelectionMask(self, copy=True):
"""Get the current mask as a 2D array.
:param bool copy: True (default) to get a copy of the mask.
If False, the returned array MUST not be modified.
:return: The array of the mask with dimension of the 'active' image.
If there is no active image, an empty array is returned.
:rtype: 2D numpy.ndarray of uint8
"""
return self._mask.getMask(copy=copy)
def setSelectionMask(self, mask, copy=True):
"""Set the mask to a new array.
:param numpy.ndarray mask: The array to use for the mask.
:type mask: numpy.ndarray of uint8 of dimension 2, C-contiguous.
Array of other types are converted.
:param bool copy: True (the default) to copy the array,
False to use it as is if possible.
:return: None if failed, shape of mask as 2-tuple if successful.
The mask can be cropped or padded to fit active image,
the returned shape is that of the active image.
"""
mask = numpy.array(mask, copy=False, dtype=numpy.uint8)
if len(mask.shape) != 2:
_logger.error('Not an image, shape: %d', len(mask.shape))
return None
if self._data.shape == (0, 0) or mask.shape == self._data.shape:
self._mask.setMask(mask, copy=copy)
self._mask.commit()
return mask.shape
else:
_logger.warning('Mask has not the same size as current image.'
' Mask will be cropped or padded to fit image'
' dimensions. %s != %s',
str(mask.shape), str(self._data.shape))
resizedMask = numpy.zeros(self._data.shape, dtype=numpy.uint8)
height = min(self._data.shape[0], mask.shape[0])
width = min(self._data.shape[1], mask.shape[1])
resizedMask[:height, :width] = mask[:height, :width]
self._mask.setMask(resizedMask, copy=False)
self._mask.commit()
return resizedMask.shape
def multipleMasks(self):
"""Return the current mode of multiple masks support.
See :meth:`setMultipleMasks`
"""
return self._multipleMasks
def setMultipleMasks(self, mode):
"""Set the mode of multiple masks support.
Available modes:
- 'single': Edit a single level of mask
- 'exclusive': Supports to 256 levels of non overlapping masks
:param str mode: The mode to use
"""
assert mode in ('exclusive', 'single')
if mode != self._multipleMasks:
self._multipleMasks = mode
self.levelWidget.setVisible(self._multipleMasks != 'single')
self.clearAllBtn.setVisible(self._multipleMasks != 'single')
@property
def maskFileDir(self):
"""The directory from which to load/save mask from/to files."""
if not os.path.isdir(self._maskFileDir):
self._maskFileDir = qt.QDir.home().absolutePath()
return self._maskFileDir
@maskFileDir.setter
def maskFileDir(self, maskFileDir):
self._maskFileDir = str(maskFileDir)
@property
def plot(self):
"""The :class:`.PlotWindow` this widget is attached to."""
return self._plot
def setDirection(self, direction=qt.QBoxLayout.LeftToRight):
"""Set the direction of the layout of the widget
:param direction: QBoxLayout direction
"""
self.layout().setDirection(direction)
def _initWidgets(self):
"""Create widgets"""
layout = qt.QBoxLayout(qt.QBoxLayout.LeftToRight)
layout.addWidget(self._initMaskGroupBox())
layout.addWidget(self._initDrawGroupBox())
layout.addWidget(self._initThresholdGroupBox())
layout.addStretch(1)
self.setLayout(layout)
@staticmethod
def _hboxWidget(*widgets, **kwargs):
"""Place widgets in widget with horizontal layout
:param widgets: Widgets to position horizontally
:param bool stretch: True for trailing stretch (default),
False for no trailing stretch
:return: A QWidget with a QHBoxLayout
"""
stretch = kwargs.get('stretch', True)
layout = qt.QHBoxLayout()
layout.setContentsMargins(0, 0, 0, 0)
for widget in widgets:
layout.addWidget(widget)
if stretch:
layout.addStretch(1)
widget = qt.QWidget()
widget.setLayout(layout)
return widget
def _initTransparencyWidget(self):
""" Init the mask transparency widget """
transparencyWidget = qt.QWidget(self)
grid = qt.QGridLayout()
grid.setContentsMargins(0, 0, 0, 0)
self.transparencySlider = qt.QSlider(qt.Qt.Horizontal, parent=transparencyWidget)
self.transparencySlider.setRange(3, 10)
self.transparencySlider.setValue(8)
self.transparencySlider.setToolTip(
'Set the transparency of the mask display')
self.transparencySlider.valueChanged.connect(self._updateColors)
grid.addWidget(qt.QLabel('Display:', parent=transparencyWidget), 0, 0)
grid.addWidget(self.transparencySlider, 0, 1, 1, 3)
grid.addWidget(qt.QLabel('<small><b>Transparent</b></small>', parent=transparencyWidget), 1, 1)
grid.addWidget(qt.QLabel('<small><b>Opaque</b></small>', parent=transparencyWidget), 1, 3)
transparencyWidget.setLayout(grid)
return transparencyWidget
def _initMaskGroupBox(self):
"""Init general mask operation widgets"""
# Mask level
self.levelSpinBox = qt.QSpinBox()
self.levelSpinBox.setRange(1, self._maxLevelNumber)
self.levelSpinBox.setToolTip(
'Choose which mask level is edited.\n'
'A mask can have up to 255 non-overlapping levels.')
self.levelSpinBox.valueChanged[int].connect(self._updateColors)
self.levelWidget = self._hboxWidget(qt.QLabel('Mask level:'),
self.levelSpinBox)
# Transparency
self.transparencyWidget = self._initTransparencyWidget()
# Buttons group
invertBtn = qt.QPushButton('Invert')
invertBtn.setShortcut(qt.Qt.CTRL + qt.Qt.Key_I)
invertBtn.setToolTip('Invert current mask <b>%s</b>' %
invertBtn.shortcut().toString())
invertBtn.clicked.connect(self._handleInvertMask)
clearBtn = qt.QPushButton('Clear')
clearBtn.setShortcut(qt.QKeySequence.Delete)
clearBtn.setToolTip('Clear current mask <b>%s</b>' %
clearBtn.shortcut().toString())
clearBtn.clicked.connect(self._handleClearMask)
invertClearWidget = self._hboxWidget(
invertBtn, clearBtn, stretch=False)
undoBtn = qt.QPushButton('Undo')
undoBtn.setShortcut(qt.QKeySequence.Undo)
undoBtn.setToolTip('Undo last mask change <b>%s</b>' %
undoBtn.shortcut().toString())
self._mask.sigUndoable.connect(undoBtn.setEnabled)
undoBtn.clicked.connect(self._mask.undo)
redoBtn = qt.QPushButton('Redo')
redoBtn.setShortcut(qt.QKeySequence.Redo)
redoBtn.setToolTip('Redo last undone mask change <b>%s</b>' %
redoBtn.shortcut().toString())
self._mask.sigRedoable.connect(redoBtn.setEnabled)
redoBtn.clicked.connect(self._mask.redo)
undoRedoWidget = self._hboxWidget(undoBtn, redoBtn, stretch=False)
self.clearAllBtn = qt.QPushButton('Clear all')
self.clearAllBtn.setToolTip('Clear all mask levels')
self.clearAllBtn.clicked.connect(self.resetSelectionMask)
loadBtn = qt.QPushButton('Load...')
loadBtn.clicked.connect(self._loadMask)
saveBtn = qt.QPushButton('Save...')
saveBtn.clicked.connect(self._saveMask)
self.loadSaveWidget = self._hboxWidget(loadBtn, saveBtn, stretch=False)
layout = qt.QVBoxLayout()
layout.addWidget(self.levelWidget)
layout.addWidget(self.transparencyWidget)
layout.addWidget(invertClearWidget)
layout.addWidget(undoRedoWidget)
layout.addWidget(self.clearAllBtn)
layout.addWidget(self.loadSaveWidget)
layout.addStretch(1)
maskGroup = qt.QGroupBox('Mask')
maskGroup.setLayout(layout)
return maskGroup
def _initDrawGroupBox(self):
"""Init drawing tools widgets"""
layout = qt.QVBoxLayout()
# Draw tools
self.browseAction = qt.QAction(
icons.getQIcon('normal'), 'Browse', None)
self.browseAction.setShortcut(qt.QKeySequence(qt.Qt.Key_B))
self.browseAction.setToolTip(
'Disables drawing tools, enables zooming interaction mode'
' <b>B</b>')
self.browseAction.setCheckable(True)
self.browseAction.triggered.connect(self._activeBrowseMode)
self.addAction(self.browseAction)
self.rectAction = qt.QAction(
icons.getQIcon('shape-rectangle'), 'Rectangle selection', None)
self.rectAction.setToolTip(
'Rectangle selection tool: (Un)Mask a rectangular region <b>R</b>')
self.rectAction.setShortcut(qt.QKeySequence(qt.Qt.Key_R))
self.rectAction.setCheckable(True)
self.rectAction.triggered.connect(self._activeRectMode)
self.addAction(self.rectAction)
self.polygonAction = qt.QAction(
icons.getQIcon('shape-polygon'), 'Polygon selection', None)
self.polygonAction.setShortcut(qt.QKeySequence(qt.Qt.Key_S))
self.polygonAction.setToolTip(
'Polygon selection tool: (Un)Mask a polygonal region <b>S</b><br>'
'Left-click to place polygon corners<br>'
'Right-click to place the last corner')
self.polygonAction.setCheckable(True)
self.polygonAction.triggered.connect(self._activePolygonMode)
self.addAction(self.polygonAction)
self.pencilAction = qt.QAction(
icons.getQIcon('draw-pencil'), 'Pencil tool', None)
self.pencilAction.setShortcut(qt.QKeySequence(qt.Qt.Key_P))
self.pencilAction.setToolTip(
'Pencil tool: (Un)Mask using a pencil <b>P</b>')
self.pencilAction.setCheckable(True)
self.pencilAction.triggered.connect(self._activePencilMode)
self.addAction(self.polygonAction)
self.drawActionGroup = qt.QActionGroup(self)
self.drawActionGroup.setExclusive(True)
self.drawActionGroup.addAction(self.browseAction)
self.drawActionGroup.addAction(self.rectAction)
self.drawActionGroup.addAction(self.polygonAction)
self.drawActionGroup.addAction(self.pencilAction)
self.browseAction.setChecked(True)
self.drawButtons = {}
for action in self.drawActionGroup.actions():
btn = qt.QToolButton()
btn.setDefaultAction(action)
self.drawButtons[action.text()] = btn
container = self._hboxWidget(*self.drawButtons.values())
layout.addWidget(container)
# Mask/Unmask radio buttons
maskRadioBtn = qt.QRadioButton('Mask')
maskRadioBtn.setToolTip(
'Drawing masks with current level. Press <b>Ctrl</b> to unmask')
maskRadioBtn.setChecked(True)
unmaskRadioBtn = qt.QRadioButton('Unmask')
unmaskRadioBtn.setToolTip(
'Drawing unmasks with current level. Press <b>Ctrl</b> to mask')
self.maskStateGroup = qt.QButtonGroup()
self.maskStateGroup.addButton(maskRadioBtn, 1)
self.maskStateGroup.addButton(unmaskRadioBtn, 0)
self.maskStateWidget = self._hboxWidget(maskRadioBtn, unmaskRadioBtn)
layout.addWidget(self.maskStateWidget)
# Connect mask state widget visibility with browse action
self.maskStateWidget.setHidden(self.browseAction.isChecked())
self.browseAction.toggled[bool].connect(
self.maskStateWidget.setHidden)
# Pencil settings
self.pencilSetting = self._createPencilSettings(None)
self.pencilSetting.setVisible(False)
layout.addWidget(self.pencilSetting)
layout.addStretch(1)
drawGroup = qt.QGroupBox('Draw tools')
drawGroup.setLayout(layout)
return drawGroup
def _createPencilSettings(self, parent=None):
pencilSetting = qt.QWidget(parent)
self.pencilSpinBox = qt.QSpinBox(parent=pencilSetting)
self.pencilSpinBox.setRange(1, 1024)
pencilToolTip = """Set pencil drawing tool size in pixels of the image
on which to make the mask."""
self.pencilSpinBox.setToolTip(pencilToolTip)
self.pencilSlider = qt.QSlider(qt.Qt.Horizontal, parent=pencilSetting)
self.pencilSlider.setRange(1, 50)
self.pencilSlider.setToolTip(pencilToolTip)
pencilLabel = qt.QLabel('Pencil size:', parent=pencilSetting)
layout = qt.QGridLayout()
layout.addWidget(pencilLabel, 0, 0)
layout.addWidget(self.pencilSpinBox, 0, 1)
layout.addWidget(self.pencilSlider, 1, 1)
pencilSetting.setLayout(layout)
self.pencilSpinBox.valueChanged.connect(self._pencilWidthChanged)
self.pencilSlider.valueChanged.connect(self._pencilWidthChanged)
return pencilSetting
def _initThresholdGroupBox(self):
"""Init thresholding widgets"""
layout = qt.QVBoxLayout()
# Thresholing
self.belowThresholdAction = qt.QAction(
icons.getQIcon('plot-roi-below'), 'Mask below threshold', None)
self.belowThresholdAction.setToolTip(
'Mask image where values are below given threshold')
self.belowThresholdAction.setCheckable(True)
self.belowThresholdAction.triggered[bool].connect(
self._belowThresholdActionTriggered)
self.betweenThresholdAction = qt.QAction(
icons.getQIcon('plot-roi-between'), 'Mask within range', None)
self.betweenThresholdAction.setToolTip(
'Mask image where values are within given range')
self.betweenThresholdAction.setCheckable(True)
self.betweenThresholdAction.triggered[bool].connect(
self._betweenThresholdActionTriggered)
self.aboveThresholdAction = qt.QAction(
icons.getQIcon('plot-roi-above'), 'Mask above threshold', None)
self.aboveThresholdAction.setToolTip(
'Mask image where values are above given threshold')
self.aboveThresholdAction.setCheckable(True)
self.aboveThresholdAction.triggered[bool].connect(
self._aboveThresholdActionTriggered)
self.thresholdActionGroup = qt.QActionGroup(self)
self.thresholdActionGroup.setExclusive(False)
self.thresholdActionGroup.addAction(self.belowThresholdAction)
self.thresholdActionGroup.addAction(self.betweenThresholdAction)
self.thresholdActionGroup.addAction(self.aboveThresholdAction)
self.thresholdActionGroup.triggered.connect(
self._thresholdActionGroupTriggered)
self.loadColormapRangeAction = qt.QAction(
icons.getQIcon('view-refresh'), 'Set min-max from colormap', None)
self.loadColormapRangeAction.setToolTip(
'Set min and max values from current colormap range')
self.loadColormapRangeAction.setCheckable(False)
self.loadColormapRangeAction.triggered.connect(
self._loadRangeFromColormapTriggered)
widgets = []
for action in self.thresholdActionGroup.actions():
btn = qt.QToolButton()
btn.setDefaultAction(action)
widgets.append(btn)
spacer = qt.QWidget()
spacer.setSizePolicy(qt.QSizePolicy.Expanding,
qt.QSizePolicy.Preferred)
widgets.append(spacer)
loadColormapRangeBtn = qt.QToolButton()
loadColormapRangeBtn.setDefaultAction(self.loadColormapRangeAction)
widgets.append(loadColormapRangeBtn)
container = self._hboxWidget(*widgets, stretch=False)
layout.addWidget(container)
form = qt.QFormLayout()
self.minLineEdit = qt.QLineEdit()
self.minLineEdit.setText('0')
self.minLineEdit.setValidator(qt.QDoubleValidator())
self.minLineEdit.setEnabled(False)
form.addRow('Min:', self.minLineEdit)
self.maxLineEdit = qt.QLineEdit()
self.maxLineEdit.setText('0')
self.maxLineEdit.setValidator(qt.QDoubleValidator())
self.maxLineEdit.setEnabled(False)
form.addRow('Max:', self.maxLineEdit)
self.applyMaskBtn = qt.QPushButton('Apply mask')
self.applyMaskBtn.clicked.connect(self._maskBtnClicked)
self.applyMaskBtn.setEnabled(False)
form.addRow(self.applyMaskBtn)
self.maskNanBtn = qt.QPushButton('Mask not finite values')
self.maskNanBtn.setToolTip('Mask Not a Number and infinite values')
self.maskNanBtn.clicked.connect(self._maskNotFiniteBtnClicked)
form.addRow(self.maskNanBtn)
thresholdWidget = qt.QWidget()
thresholdWidget.setLayout(form)
layout.addWidget(thresholdWidget)
layout.addStretch(1)
self.thresholdGroup = qt.QGroupBox('Threshold')
self.thresholdGroup.setLayout(layout)
return self.thresholdGroup
# Handle mask refresh on the plot
def _updatePlotMask(self):
"""Update mask image in plot"""
mask = self.getSelectionMask(copy=False)
if len(mask):
self.plot.addImage(mask, legend=self._maskName,
colormap=self._colormap,
origin=self._origin,
scale=self._scale,
z=self._z,
replace=False, resetzoom=False)
elif self.plot.getImage(self._maskName):
self.plot.remove(self._maskName, kind='image')
# track widget visibility and plot active image changes
def changeEvent(self, event):
"""Reset drawing action when disabling widget"""
if (event.type() == qt.QEvent.EnabledChange and
not self.isEnabled() and
not self.browseAction.isChecked()):
self.browseAction.trigger() # Disable drawing tool
def showEvent(self, event):
try:
self.plot.sigActiveImageChanged.disconnect(
self._activeImageChangedAfterCare)
except (RuntimeError, TypeError):
pass
self._activeImageChanged() # Init mask + enable/disable widget
self.plot.sigActiveImageChanged.connect(self._activeImageChanged)
def hideEvent(self, event):
self.plot.sigActiveImageChanged.disconnect(self._activeImageChanged)
if not self.browseAction.isChecked():
self.browseAction.trigger() # Disable drawing tool
if len(self.getSelectionMask(copy=False)):
self.plot.sigActiveImageChanged.connect(
self._activeImageChangedAfterCare)
def _activeImageChangedAfterCare(self, *args):
"""Check synchro of active image and mask when mask widget is hidden.
If active image has no more the same size as the mask, the mask is
removed, otherwise it is adjusted to origin, scale and z.
"""
activeImage = self.plot.getActiveImage()
if activeImage is None or activeImage.getLegend() == self._maskName:
# No active image or active image is the mask...
self.plot.sigActiveImageChanged.disconnect(
self._activeImageChangedAfterCare)
else:
colormap = activeImage.getColormap()
self._defaultOverlayColor = rgba(cursorColorForColormap(colormap['name']))
self._setMaskColors(self.levelSpinBox.value(),
self.transparencySlider.value() /
self.transparencySlider.maximum())
self._origin = activeImage.getOrigin()
self._scale = activeImage.getScale()
self._z = activeImage.getZValue() + 1
self._data = activeImage.getData(copy=False)
if self._data.shape != self.getSelectionMask(copy=False).shape:
# Image has not the same size, remove mask and stop listening
if self.plot.getImage(self._maskName):
self.plot.remove(self._maskName, kind='image')
self.plot.sigActiveImageChanged.disconnect(
self._activeImageChangedAfterCare)
else:
# Refresh in case origin, scale, z changed
self._updatePlotMask()
def _activeImageChanged(self, *args):
"""Update widget and mask according to active image changes"""
activeImage = self.plot.getActiveImage()
if activeImage is None or activeImage.getLegend() == self._maskName:
# No active image or active image is the mask...
self.setEnabled(False)
self._data = numpy.zeros((0, 0), dtype=numpy.uint8)
self._mask.reset()
self._mask.commit()
else: # There is an active image
self.setEnabled(True)
colormap = activeImage.getColormap()
self._defaultOverlayColor = rgba(cursorColorForColormap(colormap['name']))
self._setMaskColors(self.levelSpinBox.value(),
self.transparencySlider.value() /
self.transparencySlider.maximum())
self._origin = activeImage.getOrigin()
self._scale = activeImage.getScale()
self._z = activeImage.getZValue() + 1
self._data = activeImage.getData(copy=False)
if self._data.shape != self.getSelectionMask(copy=False).shape:
self._mask.reset(self._data.shape)
self._mask.commit()
else:
# Refresh in case origin, scale, z changed
self._updatePlotMask()
self._updateInteractiveMode()
# Handle whole mask operations
def load(self, filename):
"""Load a mask from an image file.
:param str filename: File name from which to load the mask
:raise Exception: An exception in case of failure
:raise RuntimeWarning: In case the mask was applied but with some
import changes to notice
"""
_, extension = os.path.splitext(filename)
extension = extension.lower()[1:]
if extension == "npy":
try:
mask = numpy.load(filename)
except IOError:
_logger.error("Can't load filename '%s'", filename)
_logger.debug("Backtrace", exc_info=True)
raise RuntimeError('File "%s" is not a numpy file.', filename)
elif extension == "edf":
try:
mask = EdfFile(filename, access='r').GetData(0)
except Exception as e:
_logger.error("Can't load filename %s", filename)
_logger.debug("Backtrace", exc_info=True)
raise e
elif extension == "msk":
if fabio is None:
raise ImportError("Fit2d mask files can't be read: Fabio module is not available")
try:
mask = fabio.open(filename).data
except Exception as e:
_logger.error("Can't load fit2d mask file")
_logger.debug("Backtrace", exc_info=True)
raise e
else:
msg = "Extension '%s' is not supported."
raise RuntimeError(msg % extension)
effectiveMaskShape = self.setSelectionMask(mask, copy=False)
if effectiveMaskShape is None:
return
if mask.shape != effectiveMaskShape:
msg = 'Mask was resized from %s to %s'
msg = msg % (str(mask.shape), str(effectiveMaskShape))
raise RuntimeWarning(msg)
def _loadMask(self):
"""Open load mask dialog"""
dialog = qt.QFileDialog(self)
dialog.setWindowTitle("Load Mask")
dialog.setModal(1)
filters = [
'EDF (*.edf)',
'TIFF (*.tif)',
'NumPy binary file (*.npy)',
# Fit2D mask is displayed anyway fabio is here or not
# to show to the user that the option exists
'Fit2D mask (*.msk)',
]
dialog.setNameFilters(filters)
dialog.setFileMode(qt.QFileDialog.ExistingFile)
dialog.setDirectory(self.maskFileDir)
if not dialog.exec_():
dialog.close()
return
filename = dialog.selectedFiles()[0]
dialog.close()
self.maskFileDir = os.path.dirname(filename)
try:
self.load(filename)
except RuntimeWarning as e:
message = e.args[0]
msg = qt.QMessageBox(self)
msg.setIcon(qt.QMessageBox.Warning)
msg.setText("Mask loaded but an operation was applied.\n" + message)
msg.exec_()
except Exception as e:
message = e.args[0]
msg = qt.QMessageBox(self)
msg.setIcon(qt.QMessageBox.Critical)
msg.setText("Cannot load mask from file. " + message)
msg.exec_()
def save(self, filename, kind):
"""Save current mask in a file
:param str filename: The file where to save to mask
:param str kind: The kind of file to save in 'edf', 'tif', 'npy'
:raise Exception: Raised if the process fails
"""
self._mask.save(filename, kind)
def _saveMask(self):
"""Open Save mask dialog"""
dialog = qt.QFileDialog(self)
dialog.setWindowTitle("Save Mask")
dialog.setModal(1)
filters = [
'EDF (*.edf)',
'TIFF (*.tif)',
'NumPy binary file (*.npy)',
# Fit2D mask is displayed anyway fabio is here or not
# to show to the user that the option exists
'Fit2D mask (*.msk)',
]
dialog.setNameFilters(filters)
dialog.setFileMode(qt.QFileDialog.AnyFile)
dialog.setAcceptMode(qt.QFileDialog.AcceptSave)
dialog.setDirectory(self.maskFileDir)
if not dialog.exec_():
dialog.close()
return
# convert filter name to extension name with the .
extension = dialog.selectedNameFilter().split()[-1][2:-1]
filename = dialog.selectedFiles()[0]
dialog.close()
if not filename.lower().endswith(extension):
filename += extension
if os.path.exists(filename):
try:
os.remove(filename)
except IOError:
msg = qt.QMessageBox(self)
msg.setIcon(qt.QMessageBox.Critical)
msg.setText("Cannot save.\n"
"Input Output Error: %s" % (sys.exc_info()[1]))
msg.exec_()
return
self.maskFileDir = os.path.dirname(filename)
try:
self.save(filename, extension[1:])
except Exception as e:
msg = qt.QMessageBox(self)
msg.setIcon(qt.QMessageBox.Critical)
msg.setText("Cannot save file %s\n%s" % (filename, e.args[0]))
msg.exec_()
def getCurrentMaskColor(self):
"""Returns the color of the current selected level.
:rtype: A tuple or a python array
"""
currentLevel = self.levelSpinBox.value()
if self._defaultColors[currentLevel]:
return self._defaultOverlayColor
else:
return self._overlayColors[currentLevel].tolist()
def _setMaskColors(self, level, alpha):
"""Set-up the mask colormap to highlight current mask level.
:param int level: The mask level to highlight
:param float alpha: Alpha level of mask in [0., 1.]
"""
assert 0 < level <= self._maxLevelNumber
colors = numpy.empty((self._maxLevelNumber + 1, 4), dtype=numpy.float32)
# Set color
colors[:, :3] = self._defaultOverlayColor[:3]
# check if some colors has been directly set by the user
mask = numpy.equal(self._defaultColors, False)
colors[mask, :3] = self._overlayColors[mask, :3]
# Set alpha
colors[:, -1] = alpha / 2.
# Set highlighted level color
colors[level, 3] = alpha
# Set no mask level
colors[0] = (0., 0., 0., 0.)
self._colormap['colors'] = colors
def resetMaskColors(self, level=None):
"""Reset the mask color at the given level to be defaultColors
:param level:
The index of the mask for which we want to reset the color.
If none we will reset color for all masks.
"""
if level is None:
self._defaultColors[level] = True
else:
self._defaultColors[:] = True
self._updateColors()
def setMaskColors(self, rgb, level=None):
"""Set the masks color
:param rgb: The rgb color
:param level:
The index of the mask for which we want to change the color.
If none set this color for all the masks
"""
if level is None:
self._overlayColors[:] = rgb
self._defaultColors[:] = False
else:
self._overlayColors[level] = rgb
self._defaultColors[level] = False
self._updateColors()
def getMaskColors(self):
"""masks colors getter"""
return self._overlayColors
def _updateColors(self, *args):
"""Rebuild mask colormap when selected level or transparency change"""
self._setMaskColors(self.levelSpinBox.value(),
self.transparencySlider.value() /
self.transparencySlider.maximum())
self._updatePlotMask()
self._updateInteractiveMode()
def _pencilWidthChanged(self, width):
old = self.pencilSpinBox.blockSignals(True)
try:
self.pencilSpinBox.setValue(width)
finally:
self.pencilSpinBox.blockSignals(old)
old = self.pencilSlider.blockSignals(True)
try:
self.pencilSlider.setValue(width)
finally:
self.pencilSlider.blockSignals(old)
self._updateInteractiveMode()
def _updateInteractiveMode(self):
"""Update the current mode to the same if some cached data have to be
updated. It is the case for the color for example.
"""
if self._drawingMode == 'rectangle':
self._activeRectMode()
elif self._drawingMode == 'polygon':
self._activePolygonMode()
elif self._drawingMode == 'pencil':
self._activePencilMode()
def _handleClearMask(self):
"""Handle clear button clicked: reset current level mask"""
self._mask.clear(self.levelSpinBox.value())
self._mask.commit()
def resetSelectionMask(self):
"""Reset the mask"""
self._mask.reset(shape=self._data.shape)
self._mask.commit()
def _handleInvertMask(self):
"""Invert the current mask level selection."""
self._mask.invert(self.levelSpinBox.value())
self._mask.commit()
# Handle drawing tools UI events
def _interactiveModeChanged(self, source):
"""Handle plot interactive mode changed:
If changed from elsewhere, disable drawing tool
"""
if source is not self:
# Do not trigger browseAction to avoid to call
# self.plot.setInteractiveMode
self.browseAction.setChecked(True)
self._releaseDrawingMode()
def _releaseDrawingMode(self):
"""Release the drawing mode if is was used"""
if self._drawingMode is None:
return
self.plot.sigPlotSignal.disconnect(self._plotDrawEvent)
self._drawingMode = None
def _activeBrowseMode(self):
"""Handle browse action mode triggered by user.
Set plot interactive mode only when
the user is triggering the browse action.
"""
self._releaseDrawingMode()
self.plot.setInteractiveMode('zoom', source=self)
self._updateDrawingModeWidgets()
def _activeRectMode(self):
"""Handle rect action mode triggering"""
self._releaseDrawingMode()
self._drawingMode = 'rectangle'
self.plot.sigPlotSignal.connect(self._plotDrawEvent)
color = self.getCurrentMaskColor()
self.plot.setInteractiveMode(
'draw', shape='rectangle', source=self, color=color)
self._updateDrawingModeWidgets()
def _activePolygonMode(self):
"""Handle polygon action mode triggering"""
self._releaseDrawingMode()
self._drawingMode = 'polygon'
self.plot.sigPlotSignal.connect(self._plotDrawEvent)
color = self.getCurrentMaskColor()
self.plot.setInteractiveMode('draw', shape='polygon', source=self, color=color)
self._updateDrawingModeWidgets()
def _activePencilMode(self):
"""Handle pencil action mode triggering"""
self._releaseDrawingMode()
self._drawingMode = 'pencil'
self.plot.sigPlotSignal.connect(self._plotDrawEvent)
color = self.getCurrentMaskColor()
width = self.pencilSpinBox.value()
self.plot.setInteractiveMode(
'draw', shape='pencil', source=self, color=color, width=width)
self._updateDrawingModeWidgets()
def _updateDrawingModeWidgets(self):
self.pencilSetting.setVisible(self._drawingMode == 'pencil')
# Handle plot drawing events
def _isMasking(self):
"""Returns true if the tool is used for masking, else it is used for
unmasking.
:rtype: bool"""
# First draw event, use current modifiers for all draw sequence
doMask = (self.maskStateGroup.checkedId() == 1)
if qt.QApplication.keyboardModifiers() & qt.Qt.ControlModifier:
doMask = not doMask
return doMask
def _plotDrawEvent(self, event):
"""Handle draw events from the plot"""
if (self._drawingMode is None or
event['event'] not in ('drawingProgress', 'drawingFinished')):
return
if not len(self._data):
return
level = self.levelSpinBox.value()
if (self._drawingMode == 'rectangle' and
event['event'] == 'drawingFinished'):
# Convert from plot to array coords
doMask = self._isMasking()
ox, oy = self._origin
sx, sy = self._scale
height = int(abs(event['height'] / sy))
width = int(abs(event['width'] / sx))
row = int((event['y'] - oy) / sy)
if sy < 0:
row -= height
col = int((event['x'] - ox) / sx)
if sx < 0:
col -= width
self._mask.updateRectangle(
level,
row=row,
col=col,
height=height,
width=width,
mask=doMask)
self._mask.commit()
elif (self._drawingMode == 'polygon' and
event['event'] == 'drawingFinished'):
doMask = self._isMasking()
# Convert from plot to array coords
vertices = (event['points'] - self._origin) / self._scale
vertices = vertices.astype(numpy.int)[:, (1, 0)] # (row, col)
self._mask.updatePolygon(level, vertices, doMask)
self._mask.commit()
elif self._drawingMode == 'pencil':
doMask = self._isMasking()
# convert from plot to array coords
col, row = (event['points'][-1] - self._origin) / self._scale
col, row = int(col), int(row)
brushSize = self.pencilSpinBox.value()
if self._lastPencilPos != (row, col):
if self._lastPencilPos is not None:
# Draw the line
self._mask.updateLine(
level,
self._lastPencilPos[0], self._lastPencilPos[1],
row, col,
brushSize,
doMask)
# Draw the very first, or last point
self._mask.updateDisk(level, row, col, brushSize / 2., doMask)
if event['event'] == 'drawingFinished':
self._mask.commit()
self._lastPencilPos = None
else:
self._lastPencilPos = row, col
# Handle threshold UI events
def _belowThresholdActionTriggered(self, triggered):
if triggered:
self.minLineEdit.setEnabled(True)
self.maxLineEdit.setEnabled(False)
self.applyMaskBtn.setEnabled(True)
def _betweenThresholdActionTriggered(self, triggered):
if triggered:
self.minLineEdit.setEnabled(True)
self.maxLineEdit.setEnabled(True)
self.applyMaskBtn.setEnabled(True)
def _aboveThresholdActionTriggered(self, triggered):
if triggered:
self.minLineEdit.setEnabled(False)
self.maxLineEdit.setEnabled(True)
self.applyMaskBtn.setEnabled(True)
def _thresholdActionGroupTriggered(self, triggeredAction):
"""Threshold action group listener."""
if triggeredAction.isChecked():
# Uncheck other actions
for action in self.thresholdActionGroup.actions():
if action is not triggeredAction and action.isChecked():
action.setChecked(False)
else:
# Disable min/max edit
self.minLineEdit.setEnabled(False)
self.maxLineEdit.setEnabled(False)
self.applyMaskBtn.setEnabled(False)
def _maskBtnClicked(self):
if self.belowThresholdAction.isChecked():
if len(self._data) and self.minLineEdit.text():
min_ = float(self.minLineEdit.text())
self._mask.updateStencil(self.levelSpinBox.value(),
self._data < min_)
self._mask.commit()
elif self.betweenThresholdAction.isChecked():
if (len(self._data) and
self.minLineEdit.text() and self.maxLineEdit.text()):
min_ = float(self.minLineEdit.text())
max_ = float(self.maxLineEdit.text())
self._mask.updateStencil(self.levelSpinBox.value(),
numpy.logical_and(min_ <= self._data,
self._data <= max_))
self._mask.commit()
elif self.aboveThresholdAction.isChecked():
if len(self._data) and self.maxLineEdit.text():
max_ = float(self.maxLineEdit.text())
self._mask.updateStencil(self.levelSpinBox.value(),
self._data > max_)
self._mask.commit()
def _maskNotFiniteBtnClicked(self):
"""Handle not finite mask button clicked: mask NaNs and inf"""
self._mask.updateStencil(
self.levelSpinBox.value(),
numpy.logical_not(numpy.isfinite(self._data)))
self._mask.commit()
def _loadRangeFromColormapTriggered(self):
"""Set range from active image colormap range"""
activeImage = self.plot.getActiveImage()
if (activeImage is not None and
activeImage.getLegend() != self._maskName):
# Update thresholds according to colormap
colormap = activeImage.getColormap()
if colormap['autoscale']:
min_ = numpy.nanmin(activeImage.getData(copy=False))
max_ = numpy.nanmax(activeImage.getData(copy=False))
else:
min_, max_ = colormap['vmin'], colormap['vmax']
self.minLineEdit.setText(str(min_))
self.maxLineEdit.setText(str(max_))
class MaskToolsDockWidget(qt.QDockWidget):
""":class:`MaskToolsDockWidget` embedded in a QDockWidget.
For integration in a :class:`PlotWindow`.
:param parent: See :class:`QDockWidget`
:param plot: The PlotWidget this widget is operating on
:paran str name: The title of this widget
"""
def __init__(self, parent=None, plot=None, name='Mask'):
super(MaskToolsDockWidget, self).__init__(parent)
self.setWindowTitle(name)
self.layout().setContentsMargins(0, 0, 0, 0)
self.setWidget(MaskToolsWidget(plot=plot))
self.dockLocationChanged.connect(self._dockLocationChanged)
self.topLevelChanged.connect(self._topLevelChanged)
def getSelectionMask(self, copy=True):
"""Get the current mask as a 2D array.
:param bool copy: True (default) to get a copy of the mask.
If False, the returned array MUST not be modified.
:return: The array of the mask with dimension of the 'active' image.
If there is no active image, an empty array is returned.
:rtype: 2D numpy.ndarray of uint8
"""
return self.widget().getSelectionMask(copy=copy)
def setSelectionMask(self, mask, copy=True):
"""Set the mask to a new array.
:param numpy.ndarray mask: The array to use for the mask.
:type mask: numpy.ndarray of uint8 of dimension 2, C-contiguous.
Array of other types are converted.
:param bool copy: True (the default) to copy the array,
False to use it as is if possible.
:return: None if failed, shape of mask as 2-tuple if successful.
The mask can be cropped or padded to fit active image,
the returned shape is that of the active image.
"""
return self.widget().setSelectionMask(mask, copy=copy)
def toggleViewAction(self):
"""Returns a checkable action that shows or closes this widget.
See :class:`QMainWindow`.
"""
action = super(MaskToolsDockWidget, self).toggleViewAction()
action.setIcon(icons.getQIcon('image-mask'))
action.setToolTip("Display/hide mask tools")
return action
def _dockLocationChanged(self, area):
if area in (qt.Qt.LeftDockWidgetArea, qt.Qt.RightDockWidgetArea):
direction = qt.QBoxLayout.TopToBottom
else:
direction = qt.QBoxLayout.LeftToRight
self.widget().setDirection(direction)
def _topLevelChanged(self, topLevel):
if topLevel:
self.widget().setDirection(qt.QBoxLayout.LeftToRight)
self.resize(self.widget().minimumSize())
self.adjustSize()
def showEvent(self, event):
"""Make sure this widget is raised when it is shown
(when it is first created as a tab in PlotWindow or when it is shown
again after hiding).
"""
self.raise_()
| [
"logging.getLogger",
"fabio.fabioimage.FabioImage",
"numpy.equal",
"silx.image.shapes.draw_line",
"numpy.array",
"sys.exc_info",
"numpy.isfinite",
"fabio.open",
"silx.third_party.EdfFile.EdfFile",
"os.remove",
"os.path.exists",
"os.path.isdir",
"numpy.empty",
"numpy.ones",
"os.path.split... | [((2042, 2069), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2059, 2069), False, 'import logging\n'), ((2717, 2751), 'numpy.array', 'numpy.array', (['()'], {'dtype': 'numpy.uint8'}), '((), dtype=numpy.uint8)\n', (2728, 2751), False, 'import numpy\n'), ((3435, 3469), 'numpy.array', 'numpy.array', (['self._mask'], {'copy': 'copy'}), '(self._mask, copy=copy)\n', (3446, 3469), False, 'import numpy\n'), ((3941, 3999), 'numpy.array', 'numpy.array', (['mask'], {'copy': 'copy', 'order': '"""C"""', 'dtype': 'numpy.uint8'}), "(mask, copy=copy, order='C', dtype=numpy.uint8)\n", (3952, 3999), False, 'import numpy\n'), ((7942, 7979), 'numpy.zeros', 'numpy.zeros', (['shape'], {'dtype': 'numpy.uint8'}), '(shape, dtype=numpy.uint8)\n', (7953, 7979), False, 'import numpy\n'), ((9491, 9543), 'silx.image.shapes.polygon_fill_mask', 'shapes.polygon_fill_mask', (['vertices', 'self._mask.shape'], {}), '(vertices, self._mask.shape)\n', (9515, 9543), False, 'from silx.image import shapes\n'), ((11004, 11026), 'numpy.nonzero', 'numpy.nonzero', (['stencil'], {}), '(stencil)\n', (11017, 11026), False, 'import numpy\n'), ((11501, 11539), 'silx.image.shapes.circle_fill', 'shapes.circle_fill', (['crow', 'ccol', 'radius'], {}), '(crow, ccol, radius)\n', (11519, 11539), False, 'from silx.image import shapes\n'), ((12139, 12186), 'silx.image.shapes.draw_line', 'shapes.draw_line', (['row0', 'col0', 'row1', 'col1', 'width'], {}), '(row0, col0, row1, col1, width)\n', (12155, 12186), False, 'from silx.image import shapes\n'), ((12536, 12590), 'numpy.ones', 'numpy.ones', (['(self._maxLevelNumber + 1)'], {'dtype': 'numpy.bool'}), '(self._maxLevelNumber + 1, dtype=numpy.bool)\n', (12546, 12590), False, 'import numpy\n'), ((12665, 12728), 'numpy.zeros', 'numpy.zeros', (['(self._maxLevelNumber + 1, 3)'], {'dtype': 'numpy.float32'}), '((self._maxLevelNumber + 1, 3), dtype=numpy.float32)\n', (12676, 12728), False, 'import numpy\n'), ((13316, 13354), 'numpy.zeros', 'numpy.zeros', (['(0, 0)'], {'dtype': 'numpy.uint8'}), '((0, 0), dtype=numpy.uint8)\n', (13327, 13354), False, 'import numpy\n'), ((14974, 15022), 'numpy.array', 'numpy.array', (['mask'], {'copy': '(False)', 'dtype': 'numpy.uint8'}), '(mask, copy=False, dtype=numpy.uint8)\n', (14985, 15022), False, 'import numpy\n'), ((36554, 36580), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (36570, 36580), False, 'import os\n'), ((38863, 38888), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (38878, 38888), False, 'import os\n'), ((40767, 40791), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (40781, 40791), False, 'import os\n'), ((41170, 41195), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (41185, 41195), False, 'import os\n'), ((42149, 42212), 'numpy.empty', 'numpy.empty', (['(self._maxLevelNumber + 1, 4)'], {'dtype': 'numpy.float32'}), '((self._maxLevelNumber + 1, 4), dtype=numpy.float32)\n', (42160, 42212), False, 'import numpy\n'), ((42369, 42408), 'numpy.equal', 'numpy.equal', (['self._defaultColors', '(False)'], {}), '(self._defaultColors, False)\n', (42380, 42408), False, 'import numpy\n'), ((4397, 4427), 'silx.third_party.EdfFile.EdfFile', 'EdfFile', (['filename'], {'access': '"""w+"""'}), "(filename, access='w+')\n", (4404, 4427), False, 'from silx.third_party.EdfFile import EdfFile\n'), ((5597, 5631), 'numpy.array', 'numpy.array', (['self._mask'], {'copy': '(True)'}), '(self._mask, copy=True)\n', (5608, 5631), False, 'import numpy\n'), ((6521, 6562), 'numpy.array', 'numpy.array', (['self._history[-1]'], {'copy': '(True)'}), '(self._history[-1], copy=True)\n', (6532, 6562), False, 'import numpy\n'), ((10214, 10253), 'numpy.logical_and', 'numpy.logical_and', (['(rows >= 0)', '(cols >= 0)'], {}), '(rows >= 0, cols >= 0)\n', (10231, 10253), False, 'import numpy\n'), ((10267, 10340), 'numpy.logical_and', 'numpy.logical_and', (['(rows < self._mask.shape[0])', '(cols < self._mask.shape[1])'], {}), '(rows < self._mask.shape[0], cols < self._mask.shape[1])\n', (10284, 10340), False, 'import numpy\n'), ((15645, 15693), 'numpy.zeros', 'numpy.zeros', (['self._data.shape'], {'dtype': 'numpy.uint8'}), '(self._data.shape, dtype=numpy.uint8)\n', (15656, 15693), False, 'import numpy\n'), ((16873, 16905), 'os.path.isdir', 'os.path.isdir', (['self._maskFileDir'], {}), '(self._maskFileDir)\n', (16886, 16905), False, 'import os\n'), ((35138, 35176), 'numpy.zeros', 'numpy.zeros', (['(0, 0)'], {'dtype': 'numpy.uint8'}), '((0, 0), dtype=numpy.uint8)\n', (35149, 35176), False, 'import numpy\n'), ((4551, 4577), 'silx.third_party.TiffIO.TiffIO', 'TiffIO', (['filename'], {'mode': '"""w"""'}), "(filename, mode='w')\n", (4557, 4577), False, 'from silx.third_party.TiffIO import TiffIO\n'), ((6225, 6259), 'numpy.array', 'numpy.array', (['self._mask'], {'copy': '(True)'}), '(self._mask, copy=True)\n', (6236, 6259), False, 'import numpy\n'), ((7014, 7048), 'numpy.array', 'numpy.array', (['self._mask'], {'copy': '(True)'}), '(self._mask, copy=True)\n', (7025, 7048), False, 'import numpy\n'), ((9640, 9689), 'numpy.logical_and', 'numpy.logical_and', (['(fill != 0)', '(self._mask == level)'], {}), '(fill != 0, self._mask == level)\n', (9657, 9689), False, 'import numpy\n'), ((36695, 36715), 'numpy.load', 'numpy.load', (['filename'], {}), '(filename)\n', (36705, 36715), False, 'import numpy\n'), ((40826, 40845), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (40835, 40845), False, 'import os\n'), ((53536, 53562), 'numpy.isfinite', 'numpy.isfinite', (['self._data'], {}), '(self._data)\n', (53550, 53562), False, 'import numpy\n'), ((5891, 5933), 'numpy.equal', 'numpy.equal', (['self._mask', 'self._history[-1]'], {}), '(self._mask, self._history[-1])\n', (5902, 5933), False, 'import numpy\n'), ((52836, 52893), 'numpy.logical_and', 'numpy.logical_and', (['(min_ <= self._data)', '(self._data <= max_)'], {}), '(min_ <= self._data, self._data <= max_)\n', (52853, 52893), False, 'import numpy\n'), ((5105, 5143), 'fabio.fabioimage.FabioImage', 'fabio.fabioimage.FabioImage', ([], {'data': 'data'}), '(data=data)\n', (5132, 5143), False, 'import fabio\n'), ((37022, 37051), 'silx.third_party.EdfFile.EdfFile', 'EdfFile', (['filename'], {'access': '"""r"""'}), "(filename, access='r')\n", (37029, 37051), False, 'from silx.third_party.EdfFile import EdfFile\n'), ((37448, 37468), 'fabio.open', 'fabio.open', (['filename'], {}), '(filename)\n', (37458, 37468), False, 'import fabio\n'), ((41071, 41085), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (41083, 41085), False, 'import sys\n')] |
import os
import skimage
from skimage import io, util
from skimage.draw import circle
import numpy as np
import math
def circularcrop(img, border=200, threshold=20000, threshold1=100):
"""
This function trims the circular image by border pixels, nullifies outside borders
and crops the total img to the disk size
parameters:
img: retina image to be processed
border: width of the border that will be trimmed from the disk. This allows to get
rid of camera edge distortion
threshold: threshold for detection image shape
threshold1: threshold for detection image shape
"""
s = np.sum(img, axis=2)
cols = np.sum(s, axis=0) > threshold
rows = np.sum(s, axis=1) > threshold
height = rows.shape[0]
width = cols.shape[0]
x_min = np.argmax(cols[0:width])
x_max = width/2 + np.argmin(cols[width/2:width-1])
y_min = np.argmax(rows[0:height/2])
y_max = np.argmin(cols[height/2:height-1])
y_max = height/2 + y_max if y_max > 0 else height
radius = (x_max - x_min)/2
center_x = x_min + radius
center_y = y_min + radius # the default case (if y_min != 0)
if y_min == 0: # the upper side is cropped
if height - y_max > 0: # lower border is not 0
center_y = y_max - radius
else:
upper_line_width = np.sum(s[0,:] > threshold1) # threshold for single line
center_y = math.sqrt( radius**2 - (upper_line_width/2)**2)
radius1 = radius - border
mask = np.zeros(img.shape[0:2])
rr, cc = circle(center_y, center_x, radius1, img.shape)
mask[rr, cc] = 1
img[:,:,0] *= mask
img[:,:,1] *= mask
img[:,:,2] *= mask
x_borders = (center_x - radius1, img.shape[1] - center_x - radius1)
y_borders = (max(center_y - radius1,0), max(img.shape[0] - center_y - radius1, 0))
imgres = util.crop(img, (y_borders, x_borders, (0,0)))
maskT = util.crop(mask, (y_borders, x_borders))
border_pixels = np.sum(1 - maskT)
return imgres, maskT, center_x, center_y, radius | [
"skimage.draw.circle",
"math.sqrt",
"numpy.argmax",
"numpy.sum",
"numpy.zeros",
"skimage.util.crop",
"numpy.argmin"
] | [((627, 646), 'numpy.sum', 'np.sum', (['img'], {'axis': '(2)'}), '(img, axis=2)\n', (633, 646), True, 'import numpy as np\n'), ((798, 822), 'numpy.argmax', 'np.argmax', (['cols[0:width]'], {}), '(cols[0:width])\n', (807, 822), True, 'import numpy as np\n'), ((890, 919), 'numpy.argmax', 'np.argmax', (['rows[0:height / 2]'], {}), '(rows[0:height / 2])\n', (899, 919), True, 'import numpy as np\n'), ((930, 968), 'numpy.argmin', 'np.argmin', (['cols[height / 2:height - 1]'], {}), '(cols[height / 2:height - 1])\n', (939, 968), True, 'import numpy as np\n'), ((1508, 1532), 'numpy.zeros', 'np.zeros', (['img.shape[0:2]'], {}), '(img.shape[0:2])\n', (1516, 1532), True, 'import numpy as np\n'), ((1546, 1592), 'skimage.draw.circle', 'circle', (['center_y', 'center_x', 'radius1', 'img.shape'], {}), '(center_y, center_x, radius1, img.shape)\n', (1552, 1592), False, 'from skimage.draw import circle\n'), ((1862, 1908), 'skimage.util.crop', 'util.crop', (['img', '(y_borders, x_borders, (0, 0))'], {}), '(img, (y_borders, x_borders, (0, 0)))\n', (1871, 1908), False, 'from skimage import io, util\n'), ((1921, 1960), 'skimage.util.crop', 'util.crop', (['mask', '(y_borders, x_borders)'], {}), '(mask, (y_borders, x_borders))\n', (1930, 1960), False, 'from skimage import io, util\n'), ((1982, 1999), 'numpy.sum', 'np.sum', (['(1 - maskT)'], {}), '(1 - maskT)\n', (1988, 1999), True, 'import numpy as np\n'), ((658, 675), 'numpy.sum', 'np.sum', (['s'], {'axis': '(0)'}), '(s, axis=0)\n', (664, 675), True, 'import numpy as np\n'), ((701, 718), 'numpy.sum', 'np.sum', (['s'], {'axis': '(1)'}), '(s, axis=1)\n', (707, 718), True, 'import numpy as np\n'), ((845, 881), 'numpy.argmin', 'np.argmin', (['cols[width / 2:width - 1]'], {}), '(cols[width / 2:width - 1])\n', (854, 881), True, 'import numpy as np\n'), ((1331, 1359), 'numpy.sum', 'np.sum', (['(s[0, :] > threshold1)'], {}), '(s[0, :] > threshold1)\n', (1337, 1359), True, 'import numpy as np\n'), ((1410, 1462), 'math.sqrt', 'math.sqrt', (['(radius ** 2 - (upper_line_width / 2) ** 2)'], {}), '(radius ** 2 - (upper_line_width / 2) ** 2)\n', (1419, 1462), False, 'import math\n')] |
#!/bin/env/python
# -*- encoding: utf-8 -*-
"""
GridWorld Environment
"""
from __future__ import division, print_function
import cv2
import numpy as np
from matplotlib import pyplot as plt
from markov_rlzoo import MDPEnv, MDPState
class GridWorld(MDPEnv):
def __init__(self, shape: tuple = (4, 4), ends: list = [(0, 0), (3, 3)]):
"""
:param shape:
:param ends:
"""
super().__init__()
self.shape = shape
self.ends = ends
self.action_space = [self.north, self.south, self.east, self.west]
non_terminal_value = -1
terminal_value = 0
self.grid = [[None for _ in range(shape[1])] for __ in range(shape[0])]
self.states = []
for h in range(shape[0]):
for w in range(shape[1]):
crd = (h, w)
actions = []
terminal = True if crd in ends else False
reward = terminal_value if terminal else non_terminal_value
if not terminal:
actions = self.action_space
state = MDPState(reward, actions, terminal, self,
action_args=crd)
self.grid[h][w] = state
self.states.append(state)
for s in self.states:
s.init_state()
self.load_states(self.states)
def north(self, env, crd):
"""
:param env:
:param crd:
:return:
"""
if crd[0] > 0:
return env.grid[crd[0] - 1][crd[1]]
else:
return env.grid[crd[0]][crd[1]]
def south(self, env, crd):
"""
:param env:
:param crd:
:return:
"""
if crd[0] < self.shape[0] - 1:
return env.grid[crd[0] + 1][crd[1]]
else:
return env.grid[crd[0]][crd[1]]
def east(self, env, crd):
"""
:param env:
:param crd:
:return:
"""
if crd[1] < self.shape[1] - 1:
return env.grid[crd[0]][crd[1] + 1]
else:
return env.grid[crd[0]][crd[1]]
def west(self, env, crd):
"""
:param env:
:param crd:
:return:
"""
if crd[1] > 0:
return env.grid[crd[0]][crd[1] - 1]
else:
return env.grid[crd[0]][crd[1]]
def print(self):
"""
Print GridWorld
"""
for h in range(self.shape[0]):
print('+---------' * self.shape[1] + '+')
row = ''
for w in range(self.shape[1]):
row += '| {}'.format(str(round(float(
self.grid[h][w].value), 2)).ljust(6))
print(row + '|')
print('+---------' * self.shape[1] + '+')
def cv2_visualize(self, display_size=(500, 500), grayscale=False,
use_plt=True):
"""
:param display_size:
:param grayscale:
:return:
"""
frame = np.zeros(self.shape)
for h in range(self.shape[0]):
for w in range(self.shape[1]):
frame[h][w] = int(abs(self.grid[h][w].value))
max_frame = np.max(frame)
frame = frame * (255 / max_frame)
if grayscale:
frame = np.uint8(frame)
if use_plt:
plt.imshow(frame)
plt.show()
else:
cv2.imshow("frame", frame)
cv2.waitKey(0)
else:
color_frame = np.zeros((self.shape[0], self.shape[1], 3))
for h in range(self.shape[0]):
for w in range(self.shape[1]):
color_frame[h][w] = (0, 255 - frame[h][w], frame[h][w])
color_frame = np.uint8(color_frame)
color_frame = cv2.resize(color_frame, display_size)
if use_plt:
plt.imshow(color_frame)
plt.show()
else:
cv2.imshow('frame', color_frame)
cv2.waitKey(0)
| [
"numpy.uint8",
"matplotlib.pyplot.imshow",
"markov_rlzoo.MDPState",
"numpy.max",
"cv2.imshow",
"numpy.zeros",
"cv2.resize",
"cv2.waitKey",
"matplotlib.pyplot.show"
] | [((3015, 3035), 'numpy.zeros', 'np.zeros', (['self.shape'], {}), '(self.shape)\n', (3023, 3035), True, 'import numpy as np\n'), ((3202, 3215), 'numpy.max', 'np.max', (['frame'], {}), '(frame)\n', (3208, 3215), True, 'import numpy as np\n'), ((3302, 3317), 'numpy.uint8', 'np.uint8', (['frame'], {}), '(frame)\n', (3310, 3317), True, 'import numpy as np\n'), ((3539, 3582), 'numpy.zeros', 'np.zeros', (['(self.shape[0], self.shape[1], 3)'], {}), '((self.shape[0], self.shape[1], 3))\n', (3547, 3582), True, 'import numpy as np\n'), ((3776, 3797), 'numpy.uint8', 'np.uint8', (['color_frame'], {}), '(color_frame)\n', (3784, 3797), True, 'import numpy as np\n'), ((3825, 3862), 'cv2.resize', 'cv2.resize', (['color_frame', 'display_size'], {}), '(color_frame, display_size)\n', (3835, 3862), False, 'import cv2\n'), ((1101, 1159), 'markov_rlzoo.MDPState', 'MDPState', (['reward', 'actions', 'terminal', 'self'], {'action_args': 'crd'}), '(reward, actions, terminal, self, action_args=crd)\n', (1109, 1159), False, 'from markov_rlzoo import MDPEnv, MDPState\n'), ((3359, 3376), 'matplotlib.pyplot.imshow', 'plt.imshow', (['frame'], {}), '(frame)\n', (3369, 3376), True, 'from matplotlib import pyplot as plt\n'), ((3393, 3403), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3401, 3403), True, 'from matplotlib import pyplot as plt\n'), ((3439, 3465), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (3449, 3465), False, 'import cv2\n'), ((3482, 3496), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3493, 3496), False, 'import cv2\n'), ((3904, 3927), 'matplotlib.pyplot.imshow', 'plt.imshow', (['color_frame'], {}), '(color_frame)\n', (3914, 3927), True, 'from matplotlib import pyplot as plt\n'), ((3944, 3954), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3952, 3954), True, 'from matplotlib import pyplot as plt\n'), ((3990, 4022), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'color_frame'], {}), "('frame', color_frame)\n", (4000, 4022), False, 'import cv2\n'), ((4039, 4053), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (4050, 4053), False, 'import cv2\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import random
import sys
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tracer import data_utils
from tracer import seq2seq_model
from tensorflow.python.platform import flags
'''
# Have to use same model params between training and decoding, would rather have them always
# come from a config script so it can easily be re-used. The config script should be saved with
# all the other model data and loaded automatically.
tracer-train --data_dir=$TRACERDIR/data --train_dir=$TRACERDIR/data/checkpoints
tracer-decode --trace_path=$TRACERDIR/data/dev.ids20000.in --output=$TRACERDIR/data/test
'''
tf.app.flags.DEFINE_float("learning_rate", 0.5, "Learning rate.")
tf.app.flags.DEFINE_float("learning_rate_decay_factor", 0.99,
"Learning rate decays by this much.")
tf.app.flags.DEFINE_float("max_gradient_norm", 5.0,
"Clip gradients to this norm.")
tf.app.flags.DEFINE_integer("batch_size", 64,
"Batch size to use during training.")
tf.app.flags.DEFINE_integer("size", 10, "Size of each model layer.")
tf.app.flags.DEFINE_integer("num_layers", 1, "Number of layers in the model.")
tf.app.flags.DEFINE_integer("in_vocab_size", 20000, "Source vocabulary size.")
tf.app.flags.DEFINE_integer("out_vocab_size", 8, "Target vocabulary size.")
tf.app.flags.DEFINE_string("in_train", "", "File containing encoded input for training.")
tf.app.flags.DEFINE_string("out_train", "", "File containing encoded output for training.")
tf.app.flags.DEFINE_string("in_dev", "", "File containing encoded input for checkpoint reporting.")
tf.app.flags.DEFINE_string("out_dev", "", "File containing encoded output for checkpoint reporting.")
tf.app.flags.DEFINE_string("data_dir", "/tmp", "Directory containing data to be used to train the model.")
tf.app.flags.DEFINE_string("train_dir", "/tmp", "Directory to which to write training checkpoints.")
tf.app.flags.DEFINE_string("data_tag", "train", "Tag to look for in input files.")
tf.app.flags.DEFINE_integer("max_train_data_size", 0,
"Limit on the size of training data (0: no limit).")
tf.app.flags.DEFINE_integer("steps_per_checkpoint", 50,
"How many training steps to do per checkpoint.")
tf.app.flags.DEFINE_boolean("decode", False,
"Set to True for interactive decoding.")
tf.app.flags.DEFINE_boolean("self_test", False,
"Run a self-test if this is set to True.")
tf.app.flags.DEFINE_boolean("use_lstm", True,
"Whether to use an LSTM (True) or GRU (False).")
tf.app.flags.DEFINE_boolean("debug", False,
"Whether to run in debugging mode.")
# For decoding
tf.app.flags.DEFINE_string("trace_path", "/tmp", "Path to a trace to decode.")
tf.app.flags.DEFINE_string("output", "/tmp", "Path to file to which to write the decoded output.")
FLAGS = tf.app.flags.FLAGS
#_buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]
#_buckets = [(5, 30), (30, 60), (60, 90), (90, 120)] # Read length buckets, so minibatches
# are always working with data of approximately the same size, to avoid wasting computation.
_buckets = [(90,120)]
def cli():
"""Takes pairs of raw PacBio instrument traces together with their DNA sequence and trains a deep LSTM model to be used for making base calls from trace data."""
f = flags.FLAGS
f._parse_flags()
train()
def read_data(source_path, target_path, max_size=None):
data_set = [[] for _ in _buckets]
with tf.gfile.GFile(source_path, mode="r") as source_file:
with tf.gfile.GFile(target_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
counter = 0
while source and target and (not max_size or counter < max_size):
counter += 1
if counter % 100000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
target_ids.append(data_utils.EOS_ID)
for bucket_id, (source_size, target_size) in enumerate(_buckets):
if len(source_ids) < source_size and len(target_ids) < target_size:
data_set[bucket_id].append([source_ids, target_ids])
break
source, target = source_file.readline(), target_file.readline()
return data_set
def read_flat_data(source_path, target_path, max_size=None):
data_set = [[] for _ in _buckets]
with open(source_path, "r") as source_file:
with open(target_path, "r") as target_file:
source, target = source_file.readline(), target_file.readline()
counter = 0
while source and target and (not max_size or counter < max_size):
counter += 1
if counter % 1000 == 0:
print(" reading data line %d" % counter)
sys.stdout.flush()
source_ids = [int(x) for x in source.split()]
target_ids = [int(x) for x in target.split()]
#print(source_ids)
#print(target_ids)
target_ids.append(data_utils.EOS_ID)
# This bucket thing won't work for the problem of traces because the
# length of input and output are so different.
#for bucket_id, (source_size, target_size) in enumerate(_buckets):
# print(bucket_id, source_size, target_size)
# if len(source_ids) < source_size and len(target_ids) < target_size:
# print("met condition")
# data_set[bucket_id].append([source_ids, target_ids])
# break
data_set[0].append([source_ids, target_ids]) #hack
source, target = source_file.readline(), target_file.readline()
return data_set
def create_model(session, forward_only, debug=False):
"""Create translation model and initialize or load parameters in session."""
if debug:
print("creating model...")
model = seq2seq_model.Seq2SeqModel(
FLAGS.in_vocab_size, FLAGS.out_vocab_size, _buckets,
FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,
FLAGS.learning_rate, FLAGS.learning_rate_decay_factor,
forward_only=forward_only, use_lstm=FLAGS.use_lstm, debug=FLAGS.debug)
if debug:
print("getting checkpoint state")
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if debug:
print("finished getting checkpoint state")
if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" % ckpt.model_checkpoint_path)
model.saver.restore(session, ckpt.model_checkpoint_path)
else:
print("Created model with fresh parameters.")
session.run(tf.initialize_all_variables())
return model
def train():
"""Train a SMRT sequencing trace to DNA sequence translation model using traces of known sequence."""
# Prepare the input data.
#in_train, out_train, in_dev, out_dev, _, _ = data_utils.prepare_data(FLAGS.train_path,
# FLAGS.dev_path, FLAGS.data_dir, FLAGS.in_vocab_size, FLAGS.out_vocab_size)
# If paths to inputs are not provided, assume they are the following (will fail if not present)
if len(FLAGS.in_train) == 0:
in_train = os.path.join(FLAGS.data_dir, "train.ids" + str(FLAGS.in_vocab_size) + ".in")
if len(FLAGS.out_train) == 0:
out_train = os.path.join(FLAGS.data_dir, "train.ids" + str(FLAGS.out_vocab_size) + ".out")
if len(FLAGS.in_dev) == 0:
in_dev = os.path.join(FLAGS.data_dir, "dev.ids" + str(FLAGS.in_vocab_size) + ".in")
if len(FLAGS.out_dev) == 0:
out_dev = os.path.join(FLAGS.data_dir, "dev.ids" + str(FLAGS.out_vocab_size) + ".out")
with tf.Session() as sess:
# Create model.
print("Creating %d layers of %d units." % (FLAGS.num_layers, FLAGS.size))
model = create_model(sess, False)
# Read data into buckets and compute their sizes.
print ("Reading development and training data (limit: %d)."
% FLAGS.max_train_data_size)
#dev_set = read_data(in_dev, out_dev)
dev_set = read_flat_data(in_dev, out_dev)
#train_set = read_data(in_train, out_train, FLAGS.max_train_data_size)
train_set = read_flat_data(in_train, out_train, FLAGS.max_train_data_size)
#print(len(dev_set))
#print(len(train_set))
train_bucket_sizes = [len(train_set[b]) for b in xrange(len(_buckets))]
train_total_size = float(sum(train_bucket_sizes))
# A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
# to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
# the size if i-th training bucket, as used later.
train_buckets_scale = [sum(train_bucket_sizes[:i + 1]) / train_total_size
for i in xrange(len(train_bucket_sizes))]
# This is the training loop.
step_time, loss = 0.0, 0.0
current_step = 0
previous_losses = []
while True:
# Choose a bucket according to data distribution. We pick a random number
# in [0, 1] and use the corresponding interval in train_buckets_scale.
random_number_01 = np.random.random_sample()
bucket_id = min([i for i in xrange(len(train_buckets_scale))
if train_buckets_scale[i] > random_number_01])
# Get a batch and make a step.
start_time = time.time()
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
train_set, bucket_id)
_, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, False)
step_time += (time.time() - start_time) / FLAGS.steps_per_checkpoint
loss += step_loss / FLAGS.steps_per_checkpoint
current_step += 1
# Once in a while, we save checkpoint, print statistics, and run evals.
if current_step % FLAGS.steps_per_checkpoint == 0:
# Print statistics for the previous epoch.
perplexity = math.exp(loss) if loss < 300 else float('inf')
print ("global step %d learning rate %.4f step-time %.2f perplexity "
"%.2f" % (model.global_step.eval(), model.learning_rate.eval(),
step_time, perplexity))
# Decrease learning rate if no improvement was seen over last 3 times.
if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):
sess.run(model.learning_rate_decay_op)
previous_losses.append(loss)
# Save checkpoint and zero timer and loss.
checkpoint_path = os.path.join(FLAGS.train_dir, "translate.ckpt")
model.saver.save(sess, checkpoint_path, global_step=model.global_step)
step_time, loss = 0.0, 0.0
# Run evals on development set and print their perplexity.
for bucket_id in xrange(len(_buckets)):
if len(dev_set[bucket_id]) == 0:
print(" eval: empty bucket %d" % (bucket_id))
continue
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
dev_set, bucket_id)
_, eval_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
eval_ppx = math.exp(eval_loss) if eval_loss < 300 else float('inf')
print(" eval: bucket %d perplexity %.2f" % (bucket_id, eval_ppx))
sys.stdout.flush()
def decode_cli():
'''Make basecalls given trace input and a previously trained model.'''
with tf.Session() as sess:
print("opened session")
# Create model and load parameters.
model = create_model(sess, True, debug=True)
model.batch_size = 1 # We decode one sentence at a time.
# Currently this expects traces to be fed in in their encoded form
print("initialized model")
with open(FLAGS.output, "w") as decoded_file:
with open(FLAGS.trace_path, "r") as tracefile:
trace = map(int, tracefile.readline().strip().split(" "))
while trace:
# Which bucket does it belong to?
bucket_id = 0
#bucket_id = min([b for b in xrange(len(_buckets))
# if _buckets[b][0] > len(trace)])
# Get a 1-element batch to feed the sentence to the model.
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
{bucket_id: [(trace, [])]}, bucket_id)
# Get output logits for the sentence.
_, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
# This is a greedy decoder - outputs are just argmaxes of output_logits.
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is an EOS symbol in outputs, cut them at that point.
if data_utils.EOS_ID in outputs:
outputs = outputs[:outputs.index(data_utils.EOS_ID)]
# Print out decoded DNA sequence.
decoded_file.write("".join([data_utils.decode_base(output) for output in outputs]))
#decoded_file.write(" ".join([tf.compat.as_str(rev_target_vocab[output]) for output in outputs]))
decoded_file.write("\n")
trace = map(int, tracefile.readline().strip().split(" "))
| [
"tensorflow.app.flags.DEFINE_float",
"tensorflow.initialize_all_variables",
"tensorflow.gfile.Exists",
"numpy.random.random_sample",
"tensorflow.app.flags.DEFINE_integer",
"tracer.data_utils.decode_base",
"tensorflow.Session",
"os.path.join",
"numpy.argmax",
"tensorflow.app.flags.DEFINE_string",
... | [((857, 922), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learning_rate"""', '(0.5)', '"""Learning rate."""'], {}), "('learning_rate', 0.5, 'Learning rate.')\n", (882, 922), True, 'import tensorflow as tf\n'), ((923, 1026), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learning_rate_decay_factor"""', '(0.99)', '"""Learning rate decays by this much."""'], {}), "('learning_rate_decay_factor', 0.99,\n 'Learning rate decays by this much.')\n", (948, 1026), True, 'import tensorflow as tf\n'), ((1049, 1136), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""max_gradient_norm"""', '(5.0)', '"""Clip gradients to this norm."""'], {}), "('max_gradient_norm', 5.0,\n 'Clip gradients to this norm.')\n", (1074, 1136), True, 'import tensorflow as tf\n'), ((1159, 1246), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(64)', '"""Batch size to use during training."""'], {}), "('batch_size', 64,\n 'Batch size to use during training.')\n", (1186, 1246), True, 'import tensorflow as tf\n'), ((1271, 1339), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""size"""', '(10)', '"""Size of each model layer."""'], {}), "('size', 10, 'Size of each model layer.')\n", (1298, 1339), True, 'import tensorflow as tf\n'), ((1340, 1418), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_layers"""', '(1)', '"""Number of layers in the model."""'], {}), "('num_layers', 1, 'Number of layers in the model.')\n", (1367, 1418), True, 'import tensorflow as tf\n'), ((1419, 1497), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""in_vocab_size"""', '(20000)', '"""Source vocabulary size."""'], {}), "('in_vocab_size', 20000, 'Source vocabulary size.')\n", (1446, 1497), True, 'import tensorflow as tf\n'), ((1498, 1573), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""out_vocab_size"""', '(8)', '"""Target vocabulary size."""'], {}), "('out_vocab_size', 8, 'Target vocabulary size.')\n", (1525, 1573), True, 'import tensorflow as tf\n'), ((1575, 1668), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""in_train"""', '""""""', '"""File containing encoded input for training."""'], {}), "('in_train', '',\n 'File containing encoded input for training.')\n", (1601, 1668), True, 'import tensorflow as tf\n'), ((1665, 1760), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""out_train"""', '""""""', '"""File containing encoded output for training."""'], {}), "('out_train', '',\n 'File containing encoded output for training.')\n", (1691, 1760), True, 'import tensorflow as tf\n'), ((1757, 1860), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""in_dev"""', '""""""', '"""File containing encoded input for checkpoint reporting."""'], {}), "('in_dev', '',\n 'File containing encoded input for checkpoint reporting.')\n", (1783, 1860), True, 'import tensorflow as tf\n'), ((1857, 1962), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""out_dev"""', '""""""', '"""File containing encoded output for checkpoint reporting."""'], {}), "('out_dev', '',\n 'File containing encoded output for checkpoint reporting.')\n", (1883, 1962), True, 'import tensorflow as tf\n'), ((1960, 2070), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""data_dir"""', '"""/tmp"""', '"""Directory containing data to be used to train the model."""'], {}), "('data_dir', '/tmp',\n 'Directory containing data to be used to train the model.')\n", (1986, 2070), True, 'import tensorflow as tf\n'), ((2067, 2171), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""train_dir"""', '"""/tmp"""', '"""Directory to which to write training checkpoints."""'], {}), "('train_dir', '/tmp',\n 'Directory to which to write training checkpoints.')\n", (2093, 2171), True, 'import tensorflow as tf\n'), ((2168, 2254), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""data_tag"""', '"""train"""', '"""Tag to look for in input files."""'], {}), "('data_tag', 'train',\n 'Tag to look for in input files.')\n", (2194, 2254), True, 'import tensorflow as tf\n'), ((2251, 2361), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""max_train_data_size"""', '(0)', '"""Limit on the size of training data (0: no limit)."""'], {}), "('max_train_data_size', 0,\n 'Limit on the size of training data (0: no limit).')\n", (2278, 2361), True, 'import tensorflow as tf\n'), ((2386, 2494), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""steps_per_checkpoint"""', '(50)', '"""How many training steps to do per checkpoint."""'], {}), "('steps_per_checkpoint', 50,\n 'How many training steps to do per checkpoint.')\n", (2413, 2494), True, 'import tensorflow as tf\n'), ((2519, 2608), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""decode"""', '(False)', '"""Set to True for interactive decoding."""'], {}), "('decode', False,\n 'Set to True for interactive decoding.')\n", (2546, 2608), True, 'import tensorflow as tf\n'), ((2633, 2727), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""self_test"""', '(False)', '"""Run a self-test if this is set to True."""'], {}), "('self_test', False,\n 'Run a self-test if this is set to True.')\n", (2660, 2727), True, 'import tensorflow as tf\n'), ((2752, 2850), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""use_lstm"""', '(True)', '"""Whether to use an LSTM (True) or GRU (False)."""'], {}), "('use_lstm', True,\n 'Whether to use an LSTM (True) or GRU (False).')\n", (2779, 2850), True, 'import tensorflow as tf\n'), ((2875, 2960), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""debug"""', '(False)', '"""Whether to run in debugging mode."""'], {}), "('debug', False, 'Whether to run in debugging mode.'\n )\n", (2902, 2960), True, 'import tensorflow as tf\n'), ((3001, 3079), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""trace_path"""', '"""/tmp"""', '"""Path to a trace to decode."""'], {}), "('trace_path', '/tmp', 'Path to a trace to decode.')\n", (3027, 3079), True, 'import tensorflow as tf\n'), ((3080, 3182), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""output"""', '"""/tmp"""', '"""Path to file to which to write the decoded output."""'], {}), "('output', '/tmp',\n 'Path to file to which to write the decoded output.')\n", (3106, 3182), True, 'import tensorflow as tf\n'), ((6181, 6472), 'tracer.seq2seq_model.Seq2SeqModel', 'seq2seq_model.Seq2SeqModel', (['FLAGS.in_vocab_size', 'FLAGS.out_vocab_size', '_buckets', 'FLAGS.size', 'FLAGS.num_layers', 'FLAGS.max_gradient_norm', 'FLAGS.batch_size', 'FLAGS.learning_rate', 'FLAGS.learning_rate_decay_factor'], {'forward_only': 'forward_only', 'use_lstm': 'FLAGS.use_lstm', 'debug': 'FLAGS.debug'}), '(FLAGS.in_vocab_size, FLAGS.out_vocab_size,\n _buckets, FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.\n batch_size, FLAGS.learning_rate, FLAGS.learning_rate_decay_factor,\n forward_only=forward_only, use_lstm=FLAGS.use_lstm, debug=FLAGS.debug)\n', (6207, 6472), False, 'from tracer import seq2seq_model\n'), ((6544, 6590), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['FLAGS.train_dir'], {}), '(FLAGS.train_dir)\n', (6573, 6590), True, 'import tensorflow as tf\n'), ((3798, 3835), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['source_path'], {'mode': '"""r"""'}), "(source_path, mode='r')\n", (3812, 3835), True, 'import tensorflow as tf\n'), ((6664, 6707), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['ckpt.model_checkpoint_path'], {}), '(ckpt.model_checkpoint_path)\n', (6679, 6707), True, 'import tensorflow as tf\n'), ((7880, 7892), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7890, 7892), True, 'import tensorflow as tf\n'), ((11681, 11693), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (11691, 11693), True, 'import tensorflow as tf\n'), ((3861, 3898), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['target_path'], {'mode': '"""r"""'}), "(target_path, mode='r')\n", (3875, 3898), True, 'import tensorflow as tf\n'), ((6919, 6948), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (6946, 6948), True, 'import tensorflow as tf\n'), ((9291, 9316), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (9314, 9316), True, 'import numpy as np\n'), ((9511, 9522), 'time.time', 'time.time', ([], {}), '()\n', (9520, 9522), False, 'import time\n'), ((10725, 10772), 'os.path.join', 'os.path.join', (['FLAGS.train_dir', '"""translate.ckpt"""'], {}), "(FLAGS.train_dir, 'translate.ckpt')\n", (10737, 10772), False, 'import os\n'), ((11556, 11574), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11572, 11574), False, 'import sys\n'), ((4192, 4210), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4208, 4210), False, 'import sys\n'), ((5156, 5174), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5172, 5174), False, 'import sys\n'), ((9789, 9800), 'time.time', 'time.time', ([], {}), '()\n', (9798, 9800), False, 'import time\n'), ((10138, 10152), 'math.exp', 'math.exp', (['loss'], {}), '(loss)\n', (10146, 10152), False, 'import math\n'), ((11414, 11433), 'math.exp', 'math.exp', (['eval_loss'], {}), '(eval_loss)\n', (11422, 11433), False, 'import math\n'), ((13128, 13152), 'numpy.argmax', 'np.argmax', (['logit'], {'axis': '(1)'}), '(logit, axis=1)\n', (13137, 13152), True, 'import numpy as np\n'), ((13540, 13570), 'tracer.data_utils.decode_base', 'data_utils.decode_base', (['output'], {}), '(output)\n', (13562, 13570), False, 'from tracer import data_utils\n')] |
import cv2
import os
import sys
import numpy as np
def main(argv):
content = argv[0]
style = argv[1]
output = argv[2]
loss_ratio = "1"
interim_content = "img_without_alpha.jpg"
interim_output = "interim_" + output
exec_format = "python run_test.py --content {} --style_model {} --output {}"
norm_icon_w, norm_icon_h = (256, 256)
norm_frame_w, norm_frame_h = (512, 512)
icon_x = (norm_frame_w - norm_icon_w) >> 1;
icon_y = (norm_frame_h - norm_icon_h) >> 1;
img_with_alpha = cv2.imread(content, -1);
norm_with_alpha = cv2.resize(img_with_alpha, (norm_icon_w, norm_icon_h), cv2.INTER_CUBIC)
black_frame = np.zeros((norm_frame_w, norm_frame_h, 3), np.uint8)
height, width, channels = norm_with_alpha.shape
if channels >= 4:
alpha = norm_with_alpha[:,:,3]
img_without_alpha = norm_with_alpha[:,:,:3]
else:
img_without_alpha = norm_with_alpha
black_frame[icon_y:icon_y+norm_icon_h, icon_x:icon_x+norm_icon_w] = img_without_alpha
cv2.imwrite(interim_content, black_frame);
exec_string = exec_format.format(interim_content, style, interim_output)
print(exec_string)
os.system(exec_string)
processed = cv2.imread(interim_output)
processed = processed[icon_y:icon_y+norm_icon_h, icon_x:icon_x+norm_icon_w]
try:
os.remove(interim_content)
os.remove(interim_output)
except:
pass
original_resized = cv2.resize(processed, (width, height), cv2.INTER_CUBIC)
if channels >= 4:
merged = cv2.merge((original_resized, alpha))
else:
merged = original_resized
cv2.imwrite(output, merged)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"cv2.imwrite",
"cv2.merge",
"numpy.zeros",
"os.system",
"cv2.resize",
"cv2.imread",
"os.remove"
] | [((525, 548), 'cv2.imread', 'cv2.imread', (['content', '(-1)'], {}), '(content, -1)\n', (535, 548), False, 'import cv2\n'), ((572, 643), 'cv2.resize', 'cv2.resize', (['img_with_alpha', '(norm_icon_w, norm_icon_h)', 'cv2.INTER_CUBIC'], {}), '(img_with_alpha, (norm_icon_w, norm_icon_h), cv2.INTER_CUBIC)\n', (582, 643), False, 'import cv2\n'), ((662, 713), 'numpy.zeros', 'np.zeros', (['(norm_frame_w, norm_frame_h, 3)', 'np.uint8'], {}), '((norm_frame_w, norm_frame_h, 3), np.uint8)\n', (670, 713), True, 'import numpy as np\n'), ((1030, 1071), 'cv2.imwrite', 'cv2.imwrite', (['interim_content', 'black_frame'], {}), '(interim_content, black_frame)\n', (1041, 1071), False, 'import cv2\n'), ((1177, 1199), 'os.system', 'os.system', (['exec_string'], {}), '(exec_string)\n', (1186, 1199), False, 'import os\n'), ((1217, 1243), 'cv2.imread', 'cv2.imread', (['interim_output'], {}), '(interim_output)\n', (1227, 1243), False, 'import cv2\n'), ((1452, 1507), 'cv2.resize', 'cv2.resize', (['processed', '(width, height)', 'cv2.INTER_CUBIC'], {}), '(processed, (width, height), cv2.INTER_CUBIC)\n', (1462, 1507), False, 'import cv2\n'), ((1634, 1661), 'cv2.imwrite', 'cv2.imwrite', (['output', 'merged'], {}), '(output, merged)\n', (1645, 1661), False, 'import cv2\n'), ((1342, 1368), 'os.remove', 'os.remove', (['interim_content'], {}), '(interim_content)\n', (1351, 1368), False, 'import os\n'), ((1377, 1402), 'os.remove', 'os.remove', (['interim_output'], {}), '(interim_output)\n', (1386, 1402), False, 'import os\n'), ((1548, 1584), 'cv2.merge', 'cv2.merge', (['(original_resized, alpha)'], {}), '((original_resized, alpha))\n', (1557, 1584), False, 'import cv2\n')] |
"""
Lyapunov module
=================
Module with the classes of multi-thread the computation of the various
`Lyapunov vectors`_ and `exponents`_. Integrate using the `Runge-Kutta method`_
defined in the :mod:`~.integrators.integrate` module.
See :cite:`lyap-KP2012` for more details on the Lyapunov vectors theoretical framework.
Module classes
--------------
* :class:`LyapunovsEstimator` to estimate the Backward and Forward Lyapunov Vectors (BLVs and FLVs) along a trajectory
* :class:`CovariantLyapunovsEstimator` to estimate the Covariant Lyapunov Vectors (CLVs) along a trajectory
.. _Lyapunov vectors: https://en.wikipedia.org/wiki/Lyapunov_vector
.. _exponents: https://en.wikipedia.org/wiki/Lyapunov_exponent
.. _Runge-Kutta method: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
.. _Numba: https://numba.pydata.org/
References
----------
.. bibliography:: ../model/ref.bib
:labelprefix: LYAP-
:keyprefix: lyap-
"""
from numba import njit
import numpy as np
import qgs.integrators.integrate as integrate
from qgs.functions.util import normalize_matrix_columns, solve_triangular_matrix, reverse
import multiprocessing
class LyapunovsEstimator(object):
"""Class to compute the Forward and Backward `Lyapunov vectors`_ and `exponents`_ along a trajectory of a dynamical system
.. math:: \\dot{\\boldsymbol{x}} = \\boldsymbol{f}(t, \\boldsymbol{x})
with a set of :class:`LyapProcess` and a specified `Runge-Kutta method`_.
The tangent linear model must also be provided. I.e. one must provide the linearized ODEs
.. math :: \\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}
where :math:`\\boldsymbol{\\mathrm{J}} = \\frac{\\partial \\boldsymbol{f}}{\\partial \\boldsymbol{x}}` is the
Jacobian matrix of :math:`\\boldsymbol{f}`.
The method used to compute the Lyapunov vectors is the one introduced by
Benettin et al. :cite:`lyap-BGGS1980`.
Parameters
----------
num_threads: None or int, optional
Number of :class:`LyapProcess` workers (threads) to use. If `None`, use the number of machine's
cores available. Default to `None`.
b: None or ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
c: None or ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
a: None or ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
number_of_dimensions: None or int, optional
Allow to hardcode the dynamical system dimension. If `None`, evaluate the dimension from the
callable :attr:`func`. Default to `None`.
Attributes
----------
num_threads: int
Number of :class:`LyapProcess` workers (threads) to use.
b: ~numpy.ndarray
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
n_dim: int
Dynamical system dimension.
n_vec: int
The number of Lyapunov vectors to compute.
n_traj: int
The number of trajectories (initial conditions) computed at the last estimation
performed by the estimator.
n_records: int
The number of saved states of the last estimation performed by the estimator.
ic: ~numpy.ndarray
Store the estimator initial conditions.
func: callable
Last function :math:`\\boldsymbol{f}` used by the estimator.
func_jac: callable
Last Jacobian matrix function :math:`\\boldsymbol{J}` used by the estimator.
"""
def __init__(self, num_threads=None, b=None, c=None, a=None, number_of_dimensions=None):
if num_threads is None:
self.num_threads = multiprocessing.cpu_count()
else:
self.num_threads = num_threads
# Default is RK4
if a is None and b is None and c is None:
self.c = np.array([0., 0.5, 0.5, 1.])
self.b = np.array([1./6, 1./3, 1./3, 1./6])
self.a = np.zeros((len(self.c), len(self.b)))
self.a[1, 0] = 0.5
self.a[2, 1] = 0.5
self.a[3, 2] = 1.
else:
self.a = a
self.b = b
self.c = c
self.ic = None
self._time = None
self._pretime = None
self._recorded_traj = None
self._recorded_exp = None
self._recorded_vec = None
self.n_traj = 0
self.n_dim = number_of_dimensions
self.n_records = 0
self.n_vec = 0
self.write_steps = 0
self._adjoint = False
self._forward = -1
self._inverse = 1.
self.func = None
self.func_jac = None
self._ics_queue = None
self._lyap_queue = None
self._processes_list = list()
def terminate(self):
"""Stop the workers (threads) and release the resources of the estimator."""
for process in self._processes_list:
process.terminate()
process.join()
def start(self):
"""Start or restart the workers (threads) of the estimator.
Warnings
--------
If the estimator was not previously terminated, it will be terminated first in the case
of a restart.
"""
self.terminate()
self._processes_list = list()
self._ics_queue = multiprocessing.JoinableQueue()
self._lyap_queue = multiprocessing.Queue()
for i in range(self.num_threads):
self._processes_list.append(LyapProcess(i, self.func, self.func_jac, self.b, self.c, self.a,
self._ics_queue, self._lyap_queue))
for process in self._processes_list:
process.daemon = True
process.start()
def set_bca(self, b=None, c=None, a=None, ic_init=True):
"""Set the coefficients of the `Runge-Kutta method`_ and restart the estimator.
.. _Runge-Kutta method: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
Parameters
----------
b: None or ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
c: None or ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
a: None or ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
ic_init: bool, optional
Re-initialize or not the initial conditions of the estimator. Default to `True`.
"""
if a is not None:
self.a = a
if b is not None:
self.b = b
if c is not None:
self.c = c
if ic_init:
self.ic = None
self.start()
def set_func(self, f, fjac):
"""Set the `Numba`_-jitted function :math:`\\boldsymbol{f}` and Jacobian matrix function
:math:`\\boldsymbol{\\mathrm{J}}` to integrate.
.. _Numba: https://numba.pydata.org/
Parameters
----------
f: callable
The `Numba`_-jitted function :math:`\\boldsymbol{f}`.
Should have the signature ``f(t, x)`` where ``x`` is the state value and ``t`` is the time.
fjac: callable
The `Numba`_-jitted Jacobian matrix function :math:`\\boldsymbol{J}`.
Should have the signature ``J(t, x)`` where ``x`` is the state value and ``t`` is the time.
Warnings
--------
This function restarts the estimator!
"""
self.func = f
self.func_jac = fjac
self.start()
def compute_lyapunovs(self, t0, tw, t, dt, mdt, ic=None, write_steps=1, n_vec=None, forward=False, adjoint=False,
inverse=False):
"""Estimate the Lyapunov vectors using the Benettin algorithm along a given trajectory, always integrating the said trajectory
forward in time from `ic` at `t0` to time `t`.
The result of the estimation can be obtained afterward by calling :meth:`get_lyapunovs`.
If `forward` is `True`, it yields the Forward Lyapunov Vectors (FLVs) between `t0` and `tw`, otherwise, returns the Backward
Lyapunov Vectors (BLVs) between `tw` and `t`.
Parameters
----------
t0: float
Initial time of the time integration. Corresponds to the initial condition's `ic` time.
tw: float
Time at which the algorithm start to store the Lyapunov vectors. Define thus also the transient before the which the Lyapunov
vectors are considered as having not yet converged. Must be between `t0` and `t`.
t: float
Final time of the time integration. Corresponds to the final condition.
dt: float
Timestep of the integration.
mdt: float
Micro-timestep to integrate the tangent linear equation between the nonlinear system `dt` timesteps. Should be smaller or equal to `dt`.
ic: None or ~numpy.ndarray(float), optional
Initial conditions of the system. Can be a 1D or a 2D array:
* 1D: Provide a single initial condition.
Should be of shape (`n_dim`,) where `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`.
* 2D: Provide an ensemble of initial condition.
Should be of shape (`n_traj`, `n_dim`) where `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`,
and where `n_traj` is the number of initial conditions.
If `None`, use the initial conditions stored in :attr:`ic`.
If then :attr:`ic` is `None`, use a zero initial condition.
Default to `None`.
forward: bool, optional
If `True`, yield the `Forward Lyapunov Vectors` (FLVs) between `t0` and `tw`.
If `False`, yield the `Backward Lyapunov Vectors` (BLVs) between `tw` and `t`.
Default to `False`, i.e. Backward Lyapunov Vectors estimation.
adjoint: bool, optional
If true, integrate the tangent :math:`\\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}` ,
else, integrate the adjoint linear model :math:`\\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}^T(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}`.
Integrate the tangent model by default.
inverse: bool, optional
Whether or not to invert the Jacobian matrix
:math:`\\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\rightarrow \\boldsymbol{\\mathrm{J}}^{-1}(t, \\boldsymbol{x})`.
`False` by default.
write_steps: int, optional
Save the state of the integration in memory every `write_steps` steps. The other intermediary
steps are lost. It determines the size of the returned objects. Default is 1.
Set to 0 to return only the final state.
n_vec: int, optional
The number of Lyapunov vectors to compute. Should be smaller or equal to :attr:`n_dim`.
"""
if self.func is None or self.func_jac is None:
print('No function to integrate defined!')
return 0
if ic is None:
i = 1
while True:
self.ic = np.zeros(i)
try:
x = self.func(0., self.ic)
except:
i += 1
else:
break
i = len(self.func(0., self.ic))
self.ic = np.zeros(i)
else:
self.ic = ic
if len(self.ic.shape) == 1:
self.ic = self.ic.reshape((1, -1))
self.n_traj = self.ic.shape[0]
self.n_dim = self.ic.shape[1]
if n_vec is not None:
self.n_vec = n_vec
else:
self.n_vec = self.n_dim
self._pretime = np.concatenate((np.arange(t0, tw, dt), np.full((1,), tw)))
self._time = np.concatenate((np.arange(tw, t, dt), np.full((1,), t)))
self.write_steps = write_steps
if forward:
self._forward = 1
else:
self._forward = -1
self._adjoint = adjoint
self._inverse = 1.
if inverse:
self._inverse *= -1.
if write_steps == 0:
self.n_records = 1
else:
if not forward:
tot = self._time[::self.write_steps]
self.n_records = len(tot)
if tot[-1] != self._time[-1]:
self.n_records += 1
else:
tot = self._pretime[::self.write_steps]
self.n_records = len(tot)
if tot[-1] != self._pretime[-1]:
self.n_records += 1
self._recorded_traj = np.zeros((self.n_traj, self.n_dim, self.n_records))
self._recorded_vec = np.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
self._recorded_exp = np.zeros((self.n_traj, self.n_vec, self.n_records))
for i in range(self.n_traj):
self._ics_queue.put((i, self._pretime, self._time, mdt, self.ic[i], self.n_vec, self.write_steps,
self._forward, self._adjoint, self._inverse))
self._ics_queue.join()
for i in range(self.n_traj):
args = self._lyap_queue.get()
self._recorded_traj[args[0]] = args[1]
self._recorded_exp[args[0]] = args[2]
self._recorded_vec[args[0]] = args[3]
def get_lyapunovs(self):
"""Returns the result of the previous Lyapunov vectors estimation.
Returns
-------
time, traj, exponents, vectors: ~numpy.ndarray
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D array of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D array of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the input initial conditions, it is maximum a 4D array of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is squeezed.
"""
if self._forward == -1:
tt = self._time
else:
tt = self._pretime
if self.write_steps > 0:
if tt[::self.write_steps][-1] == tt[-1]:
return tt[::self.write_steps], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_vec)
else:
return np.concatenate((tt[::self.write_steps], np.full((1,), tt[-1]))), \
np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), np.squeeze(self._recorded_vec)
else:
return tt[-1], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_vec)
class LyapProcess(multiprocessing.Process):
""":class:`LyapunovsEstimator`'s workers class. Allows to multi-thread Lyapunov vectors estimation.
Parameters
----------
processID: int
Number identifying the worker.
func: callable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: callable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
ics_queue: multiprocessing.JoinableQueue
Queue to which the worker ask for initial conditions and parameters input.
lyap_queue: multiprocessing.Queue
Queue to which the worker returns the estimation results.
Attributes
----------
processID: int
Number identifying the worker.
func: callable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: callable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~numpy.ndarray
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
"""
def __init__(self, processID, func, func_jac, b, c, a, ics_queue, lyap_queue):
super().__init__()
self.processID = processID
self._ics_queue = ics_queue
self._lyap_queue = lyap_queue
self.func = func
self.func_jac = func_jac
self.a = a
self.b = b
self.c = c
def run(self):
"""Main worker computing routine. Perform the estimation with the fetched initial conditions and parameters."""
while True:
args = self._ics_queue.get()
if args[7] == -1:
recorded_traj, recorded_exp, recorded_vec = _compute_backward_lyap_jit(self.func, self.func_jac,
args[1], args[2], args[3],
args[4][np.newaxis, :], args[5],
args[6], args[8], args[9],
self.b, self.c, self.a)
else:
recorded_traj, recorded_exp, recorded_vec = _compute_forward_lyap_jit(self.func, self.func_jac,
args[1], args[2], args[3],
args[4][np.newaxis, :], args[5],
args[6], args[8], args[9],
self.b, self.c, self.a)
self._lyap_queue.put((args[0], np.squeeze(recorded_traj), np.squeeze(recorded_exp),
np.squeeze(recorded_vec)))
self._ics_queue.task_done()
@njit
def _compute_forward_lyap_jit(f, fjac, time, posttime, mdt, ic, n_vec, write_steps, adjoint, inverse, b, c, a):
ttraj = integrate._integrate_runge_kutta_jit(f, np.concatenate((time[:-1], posttime)), ic, 1, 1, b, c, a)
recorded_traj, recorded_exp, recorded_vec = _compute_forward_lyap_traj_jit(f, fjac, time, posttime, ttraj, mdt,
n_vec, write_steps, adjoint, inverse, b, c, a)
return recorded_traj, recorded_exp, recorded_vec
@njit
def _compute_forward_lyap_traj_jit(f, fjac, time, posttime, ttraj, mdt, n_vec, write_steps, adjoint, inverse, b, c, a):
traj = ttraj[:, :, :len(time)]
posttraj = ttraj[:, :, len(time)-1:]
n_traj = ttraj.shape[0]
n_dim = ttraj.shape[1]
Id = np.zeros((1, n_dim, n_dim))
Id[0] = np.eye(n_dim)
if write_steps == 0:
n_records = 1
else:
tot = time[::write_steps]
n_records = len(tot)
if tot[-1] != time[-1]:
n_records += 1
recorded_vec = np.zeros((n_traj, n_dim, n_vec, n_records))
recorded_traj = np.zeros((n_traj, n_dim, n_records))
recorded_exp = np.zeros((n_traj, n_vec, n_records))
rposttime = reverse(posttime)
rtime = reverse(time)
for i_traj in range(n_traj):
y = np.zeros((1, n_dim))
qr = np.linalg.qr(np.random.random((n_dim, n_vec)))
q = qr[0]
m_exp = np.zeros((n_dim))
for ti, (tt, dt) in enumerate(zip(rposttime[:-1], np.diff(rposttime))):
y[0] = posttraj[i_traj, :, -1-ti]
subtime = np.concatenate((np.arange(tt + dt, tt, mdt), np.full((1,), tt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, -1, 0, b, c, a,
adjoint, inverse, integrate._zeros_func)
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
r = qr[1]
iw = -1
for ti, (tt, dt) in enumerate(zip(rtime[:-1], np.diff(rtime))):
y[0] = traj[i_traj, :, -1-ti]
m_exp = np.log(np.abs(np.diag(r)))/dt
if write_steps > 0 and np.mod(ti, write_steps) == 0:
recorded_exp[i_traj, :, iw] = m_exp
recorded_traj[i_traj, :, iw] = y[0]
recorded_vec[i_traj, :, :, iw] = q
iw -= 1
subtime = np.concatenate((np.arange(tt + dt, tt, mdt), np.full((1,), tt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, -1, 0, b, c, a,
adjoint, inverse, integrate._zeros_func)
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
r = qr[1]
recorded_exp[i_traj, :, 0] = m_exp
recorded_traj[i_traj, :, 0] = y[0]
recorded_vec[i_traj, :, :, 0] = q
return recorded_traj, recorded_exp, recorded_vec
@njit
def _compute_backward_lyap_jit(f, fjac, pretime, time, mdt, ic, n_vec, write_steps, adjoint, inverse, b, c, a):
ttraj = integrate._integrate_runge_kutta_jit(f, np.concatenate((pretime[:-1], time)), ic, 1, 1, b, c, a)
recorded_traj, recorded_exp, recorded_vec = _compute_backward_lyap_traj_jit(f, fjac, pretime, time, ttraj, mdt,
n_vec, write_steps, adjoint, inverse, b, c, a)
return recorded_traj, recorded_exp, recorded_vec
@njit
def _compute_backward_lyap_traj_jit(f, fjac, pretime, time, ttraj, mdt, n_vec, write_steps, adjoint, inverse, b, c, a):
pretraj = ttraj[:, :, :len(pretime)]
traj = ttraj[:, :, (len(pretime)-1):]
n_traj = ttraj.shape[0]
n_dim = ttraj.shape[1]
Id = np.zeros((1, n_dim, n_dim))
Id[0] = np.eye(n_dim)
if write_steps == 0:
n_records = 1
else:
tot = time[::write_steps]
n_records = len(tot)
if tot[-1] != time[-1]:
n_records += 1
recorded_vec = np.zeros((n_traj, n_dim, n_vec, n_records))
recorded_traj = np.zeros((n_traj, n_dim, n_records))
recorded_exp = np.zeros((n_traj, n_vec, n_records))
for i_traj in range(n_traj):
y = np.zeros((1, n_dim))
y[0] = pretraj[i_traj, :, 0]
qr = np.linalg.qr(np.random.random((n_dim, n_vec)))
q = qr[0]
m_exp = np.zeros((n_dim))
for ti, (tt, dt) in enumerate(zip(pretime[:-1], np.diff(pretime))):
subtime = np.concatenate((np.arange(tt, tt + dt, mdt), np.full((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
adjoint, inverse, integrate._zeros_func)
y[0] = pretraj[i_traj, :, ti+1]
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
r = qr[1]
iw = 0
for ti, (tt, dt) in enumerate(zip(time[:-1], np.diff(time))):
m_exp = np.log(np.abs(np.diag(r)))/dt
if write_steps > 0 and np.mod(ti, write_steps) == 0:
recorded_exp[i_traj, :, iw] = m_exp
recorded_traj[i_traj, :, iw] = y[0]
recorded_vec[i_traj, :, :, iw] = q
iw += 1
subtime = np.concatenate((np.arange(tt, tt + dt, mdt), np.full((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
adjoint, inverse, integrate._zeros_func)
y[0] = traj[i_traj, :, ti+1]
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
r = qr[1]
recorded_exp[i_traj, :, -1] = m_exp
recorded_traj[i_traj, :, -1] = y[0]
recorded_vec[i_traj, :, :, -1] = q
return recorded_traj, recorded_exp, recorded_vec
class CovariantLyapunovsEstimator(object):
"""Class to compute the Covariant `Lyapunov vectors`_ (CLVs) and `exponents`_ along a trajectory of a dynamical system
.. math:: \\dot{\\boldsymbol{x}} = \\boldsymbol{f}(t, \\boldsymbol{x})
with a set of :class:`LyapProcess` and a specified `Runge-Kutta method`_.
The tangent linear model must also be provided. I.e. one must provide the linearized ODEs
.. math :: \\dot{\\boldsymbol{\\delta x}} = \\boldsymbol{\\mathrm{J}}(t, \\boldsymbol{x}) \\cdot \\boldsymbol{\\delta x}
where :math:`\\boldsymbol{\\mathrm{J}} = \\frac{\\partial \\boldsymbol{f}}{\\partial \\boldsymbol{x}}` is the
Jacobian matrix of :math:`\\boldsymbol{f}`.
Parameters
----------
num_threads: None or int, optional
Number of :class:`LyapProcess` workers (threads) to use. If `None`, use the number of machine's
cores available. Default to `None`.
b: None or ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
c: None or ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
a: None or ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, use the classic RK4 method coefficients. Default to `None`.
number_of_dimensions: None or int, optional
Allow to hardcode the dynamical system dimension. If `None`, evaluate the dimension from the
callable :attr:`func`. Default to `None`.
method: int, optional
Allow to select the method used to compute the CLVs. Presently can be `0` or `1`:
* `0`: Uses the method of Ginelli et al. :cite:`lyap-GPTCLP2007`. Suitable for a trajectory not too long (depends on the memory available).
* `1`: Uses the method of the intersection of the subspace spanned by the BLVs and FLVs described in :cite:`lyap-ER1985` and :cite:`lyap-KP2012`
(see also :cite:`lyap-DPV2021`, Appendix A). Suitable for longer trajectories (uses less memory).
Default to `0`, i.e. Ginelli et al. algorithm.
noise_pert: float, optional
Noise perturbation amplitude parameter of the diagonal of the R matrix in the QR decomposition during the Ginelli step. Mainly done to avoid ill-conditioned matrices
near tangencies (see :cite:`lyap-KP2012`). Default to 0 (no perturbation).
Only apply if using the Ginelli et al. algorithm, i.e. if ``method=0``.
Attributes
----------
num_threads: int
Number of :class:`LyapProcess` workers (threads) to use.
b: ~numpy.ndarray
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
n_dim: int
Dynamical system dimension.
n_vec: int
The number of Lyapunov vectors to compute.
n_traj: int
The number of trajectories (initial conditions) computed at the last estimation
performed by the estimator.
n_records: int
The number of saved states of the last estimation performed by the estimator.
ic: ~numpy.ndarray
Store the estimator initial conditions.
func: callable
Last function :math:`\\boldsymbol{f}` used by the estimator.
func_jac: callable
Last Jacobian matrix function :math:`\\boldsymbol{J}` used by the estimator.
method: int
Select the method used to compute the CLVs:
* `0`: Uses the method of Ginelli et al. :cite:`lyap-GPTCLP2007`. Suitable for a trajectory not too long (depends on the memory available).
* `1`: Uses the method of the intersection of the subspaces spanned by the BLVs and FLVs described in :cite:`lyap-ER1985` and :cite:`lyap-KP2012`
(see also :cite:`lyap-DPV2021`, Appendix A). Suitable for longer trajectories (uses less memory).
noise_pert: float
Noise perturbation parameter of the diagonal of the matrix resulting from the backpropagation during the Ginelli step.
Mainly done to avoid ill-conditioned matrices near tangencies (see :cite:`lyap-KP2012`).
Only apply if using the Ginelli et al. algorithm, i.e. if ``method=0``.
"""
def __init__(self, num_threads=None, b=None, c=None, a=None, number_of_dimensions=None, noise_pert=0., method=0):
if num_threads is None:
self.num_threads = multiprocessing.cpu_count()
else:
self.num_threads = num_threads
# Default is RK4
if a is None and b is None and c is None:
self.c = np.array([0., 0.5, 0.5, 1.])
self.b = np.array([1./6, 1./3, 1./3, 1./6])
self.a = np.zeros((len(self.c), len(self.b)))
self.a[1, 0] = 0.5
self.a[2, 1] = 0.5
self.a[3, 2] = 1.
else:
self.a = a
self.b = b
self.c = c
self.noise_pert = noise_pert
self.ic = None
self._time = None
self._pretime = None
self._aftertime = None
self._recorded_traj = None
self._recorded_exp = None
self._recorded_vec = None
self._recorded_bvec = None
self._recorded_fvec = None
self.n_traj = 0
self.n_dim = number_of_dimensions
self.n_records = 0
self.n_vec = 0
self.write_steps = 0
self.method = method
self.func = None
self.func_jac = None
self._ics_queue = None
self._clv_queue = None
self._processes_list = list()
def terminate(self):
"""Stop the workers (threads) and release the resources of the estimator."""
for process in self._processes_list:
process.terminate()
process.join()
def set_noise_pert(self, noise_pert):
"""Set the noise perturbation :attr:`noise_pert` parameter.
Parameters
----------
noise_pert: float, optional
Noise perturbation amplitude parameter of the diagonal of the R matrix in the QR decomposition during the Ginelli step. Mainly done to avoid ill-conditioned matrices
near tangencies (see :cite:`lyap-KP2012`).
Only apply if using the Ginelli et al. algorithm, i.e. if :attr:`method` is 0.
"""
self.noise_pert = noise_pert
self.start()
def set_bca(self, b=None, c=None, a=None, ic_init=True):
"""Set the coefficients of the `Runge-Kutta method`_ and restart the estimator.
.. _Runge-Kutta method: https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
Parameters
----------
b: None or ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
c: None or ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
a: None or ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
If `None`, does not reinitialize these coefficients.
ic_init: bool, optional
Re-initialize or not the initial conditions of the estimator. Default to `True`.
"""
if a is not None:
self.a = a
if b is not None:
self.b = b
if c is not None:
self.c = c
if ic_init:
self.ic = None
self.start()
def start(self):
"""Start or restart the workers (threads) of the estimator.
Warnings
--------
If the estimator was not previously terminated, it will be terminated first in the case
of a restart.
"""
self.terminate()
self._processes_list = list()
self._ics_queue = multiprocessing.JoinableQueue()
self._clv_queue = multiprocessing.Queue()
for i in range(self.num_threads):
self._processes_list.append(ClvProcess(i, self.func, self.func_jac, self.b, self.c, self.a,
self._ics_queue, self._clv_queue, self.noise_pert))
for process in self._processes_list:
process.daemon = True
process.start()
def set_func(self, f, fjac):
"""Set the `Numba`_-jitted function :math:`\\boldsymbol{f}` and Jacobian matrix function
:math:`\\boldsymbol{\\mathrm{J}}` to integrate.
.. _Numba: https://numba.pydata.org/
Parameters
----------
f: callable
The `Numba`_-jitted function :math:`\\boldsymbol{f}`.
Should have the signature ``f(t, x)`` where ``x`` is the state value and ``t`` is the time.
fjac: callable
The `Numba`_-jitted Jacobian matrix function :math:`\\boldsymbol{J}`.
Should have the signature ``J(t, x)`` where ``x`` is the state value and ``t`` is the time.
Warnings
--------
This function restarts the estimator!
"""
self.func = f
self.func_jac = fjac
self.start()
def compute_clvs(self, t0, ta, tb, tc, dt, mdt, ic=None, write_steps=1, n_vec=None, method=None, backward_vectors=False, forward_vectors=False):
"""Estimate the Covariant Lyapunov Vectors (CLVs) along a given trajectory, always integrating the said trajectory
forward in time from `ic` at `t0` to time `tc`. Return the CLVs between `ta` and `tb`.
The result of the estimation can be obtained afterward by calling :meth:`get_clvs`.
Parameters
----------
t0: float
Initial time of the time integration. Corresponds to the initial condition's `ic` time.
ta: float
Define the time span between `t0` and `ta` of the first part of the algorithm, which obtain the convergence to the Backward Lyapunov vectors
(initialization of the Benettin algorithm).
tb: float
Define the time span between `ta` and `tb` where the Covariant Lyapunov Vectors are computed.
tc: float
Final time of the time integration algorithm. Define the time span between `tb` and `tc` where, depending on the value of :attr:`method`,
the convergence to the Forward Lyapunov Vectors or to the Covariant Lyapunov Vectors (thanks to the Ginelli steps) is obtained.
dt: float
Timestep of the integration.
mdt: float
Micro-timestep to integrate the tangent linear equation between the nonlinear system `dt` timesteps. Should be smaller or equal to `dt`.
ic: None or ~numpy.ndarray(float), optional
Initial conditions of the system. Can be a 1D or a 2D array:
* 1D: Provide a single initial condition.
Should be of shape (`n_dim`,) where `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`.
* 2D: Provide an ensemble of initial condition.
Should be of shape (`n_traj`, `n_dim`) where `n_dim` = :math:`\\mathrm{dim}(\\boldsymbol{x})`,
and where `n_traj` is the number of initial conditions.
If `None`, use the initial conditions stored in :attr:`ic`.
If then :attr:`ic` is `None`, use a zero initial condition.
Default to `None`.
write_steps: int, optional
Save the state of the integration in memory every `write_steps` steps. The other intermediary
steps are lost. It determines the size of the returned objects. Default is 1.
Set to 0 to return only the final state.
n_vec: int, optional
The number of Lyapunov vectors to compute. Should be smaller or equal to :attr:`n_dim`.
method: int, optional
Allow to select the method used to compute the CLVs. Presently can be `0` or `1`:
* `0`: Uses the method of Ginelli et al. :cite:`lyap-GPTCLP2007`. Suitable for a trajectory not too long (depends on the memory available).
* `1`: Uses the method of the intersection of the subspace spanned by the BLVs and FLVs described in :cite:`lyap-ER1985` and :cite:`lyap-KP2012`
(see also :cite:`lyap-DPV2021`, Appendix A). Suitable for longer trajectories (uses less memory).
Use the Ginelli et al. algorithm if not provided.
backward_vectors: bool, optional
Store also the computed Backward Lyapunov vectors between `ta` and `tb`. Only applies if ``method=1``.
Does not store the BLVs if not provided.
forward_vectors: bool, optional
Store also the computed Forward Lyapunov vectors between `ta` and `tb`. Only applies if ``method=1``.
Does not store the FLVs if not provided.
"""
if self.func is None or self.func_jac is None:
print('No function to integrate defined!')
return 0
if ic is None:
i = 1
while True:
self.ic = np.zeros(i)
try:
x = self.func(0., self.ic)
except:
i += 1
else:
break
i = len(self.func(0., self.ic))
self.ic = np.zeros(i)
else:
self.ic = ic
if len(self.ic.shape) == 1:
self.ic = self.ic.reshape((1, -1))
self.n_traj = self.ic.shape[0]
self.n_dim = self.ic.shape[1]
if n_vec is not None:
self.n_vec = n_vec
else:
self.n_vec = self.n_dim
if method is not None:
self.method = method
self._pretime = np.concatenate((np.arange(t0, ta, dt), np.full((1,), ta)))
self._time = np.concatenate((np.arange(ta, tb, dt), np.full((1,), tb)))
self._aftertime = np.concatenate((np.arange(tb, tc, dt), np.full((1,), tc)))
self.write_steps = write_steps
if write_steps == 0:
self.n_records = 1
else:
tot = self._time[::self.write_steps]
self.n_records = len(tot)
if tot[-1] != self._time[-1]:
self.n_records += 1
self._recorded_traj = np.zeros((self.n_traj, self.n_dim, self.n_records))
self._recorded_vec = np.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
self._recorded_exp = np.zeros((self.n_traj, self.n_vec, self.n_records))
if self.method == 1:
if forward_vectors:
self._recorded_fvec = np.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
if backward_vectors:
self._recorded_bvec = np.zeros((self.n_traj, self.n_dim, self.n_vec, self.n_records))
for i in range(self.n_traj):
self._ics_queue.put((i, self._pretime, self._time, self._aftertime, mdt, self.ic[i], self.n_vec,
self.write_steps, self.method))
self._ics_queue.join()
for i in range(self.n_traj):
args = self._clv_queue.get()
self._recorded_traj[args[0]] = args[1]
self._recorded_exp[args[0]] = args[2]
self._recorded_vec[args[0]] = args[3]
if self.method == 1:
if forward_vectors:
self._recorded_fvec[args[0]] = args[5]
if backward_vectors:
self._recorded_bvec[args[0]] = args[4]
def get_clvs(self):
"""Returns the result of the previous CLVs estimation.
Returns
-------
time, traj, exponents, vectors: ~numpy.ndarray
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D array of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D array of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the input initial conditions, it is maximum a 4D array of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is squeezed.
"""
if self.write_steps > 0:
if self._time[::self.write_steps][-1] == self._time[-1]:
return self._time[::self.write_steps], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_vec)
else:
return np.concatenate((self._time[::self.write_steps], np.full((1,), self._time[-1]))), \
np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), np.squeeze(self._recorded_vec)
else:
return self._time[-1], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_vec)
def get_blvs(self):
"""Returns the BLVs obtained during the previous CLVs estimation.
Returns
-------
time, traj, exponents, vectors: ~numpy.ndarray
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D array of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D array of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the input initial conditions, it is maximum a 4D array of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is squeezed.
Warnings
--------
The BLVs are only available if :attr:`method` is set to 1.
"""
if self._recorded_bvec is None:
return None
if self.write_steps > 0:
if self._time[::self.write_steps][-1] == self._time[-1]:
return self._time[::self.write_steps], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_bvec)
else:
return np.concatenate((self._time[::self.write_steps], np.full((1,), self._time[-1]))), \
np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), np.squeeze(self._recorded_bvec)
else:
return self._time[-1], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_bvec)
def get_flvs(self):
"""Returns the FLVs obtained during the previous CLVs estimation.
Returns
-------
time, traj, exponents, vectors: ~numpy.ndarray
The result of the estimation:
* **time:** Time at which the state of the system was saved. Array of shape (:attr:`n_records`,).
* **traj:** Saved dynamical system states. 3D array of shape (:attr:`n_traj`, :attr:`n_dim`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_dim`, :attr:`n_records`) is returned instead.
* **exponents:** Saved estimates of the local Lyapunov exponents along the trajectory. 3D array of shape (:attr:`n_traj`, :attr:`n_vec`, :attr:`n_records`).
If :attr:`n_traj` = 1, a 2D array of shape (:attr:`n_vec`, :attr:`n_records`) is returned instead.
* **vectors:** Saved estimates of the local Lyapunov vectors along the trajectory.
Depending on the input initial conditions, it is maximum a 4D array of shape
(:attr:`n_traj`, :attr:`n_dim`, :attr:`n_vec`, :attr:`n_records`).
If one of the dimension is 1, it is squeezed.
Warnings
--------
The FLVs are only available if :attr:`method` is set to 1.
"""
if self._recorded_fvec is None:
return None
if self.write_steps > 0:
if self._time[::self.write_steps][-1] == self._time[-1]:
return self._time[::self.write_steps], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_fvec)
else:
return np.concatenate((self._time[::self.write_steps], np.full((1,), self._time[-1]))), \
np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), np.squeeze(self._recorded_fvec)
else:
return self._time[-1], np.squeeze(self._recorded_traj), np.squeeze(self._recorded_exp), \
np.squeeze(self._recorded_fvec)
class ClvProcess(multiprocessing.Process):
""":class:`CovariantLyapunovsEstimator`'s workers class. Allows to multi-thread Lyapunov vectors estimation.
Parameters
----------
processID: int
Number identifying the worker.
func: callable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: callable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~numpy.ndarray, optional
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray, optional
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray, optional
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
ics_queue: multiprocessing.JoinableQueue
Queue to which the worker ask for initial conditions and parameters input.
clv_queue: multiprocessing.Queue
Queue to which the worker returns the estimation results.
Attributes
----------
processID: int
Number identifying the worker.
func: callable
`Numba`_-jitted function to integrate assigned to the worker.
func_jac: callable
`Numba`_-jitted Jacobian matrix function to integrate assigned to the worker.
b: ~numpy.ndarray
Vector of coefficients :math:`b_i` of the `Runge-Kutta method`_ .
c: ~numpy.ndarray
Matrix of coefficients :math:`c_{i,j}` of the `Runge-Kutta method`_ .
a: ~numpy.ndarray
Vector of coefficients :math:`a_i` of the `Runge-Kutta method`_ .
"""
def __init__(self, processID, func, func_jac, b, c, a, ics_queue, clv_queue, noise_pert):
super().__init__()
self.processID = processID
self._ics_queue = ics_queue
self._clv_queue = clv_queue
self.func = func
self.func_jac = func_jac
self.a = a
self.b = b
self.c = c
self.noise_pert = noise_pert
def run(self):
"""Main worker computing routine. Perform the estimation with the fetched initial conditions and parameters."""
while True:
args = self._ics_queue.get()
method = args[8]
if method == 0:
recorded_traj, recorded_exp, recorded_vec = _compute_clv_gin_jit(self.func, self.func_jac, args[1], args[2],
args[3], args[4], args[5][np.newaxis, :],
args[6], args[7],
self.b, self.c, self.a, self.noise_pert)
self._clv_queue.put((args[0], np.squeeze(recorded_traj), np.squeeze(recorded_exp),
np.squeeze(recorded_vec)))
else:
recorded_traj, recorded_exp, recorded_vec, backward_vec, forward_vec = _compute_clv_sub_jit(self.func, self.func_jac, args[1], args[2],
args[3], args[4], args[5][np.newaxis, :],
args[7], self.b, self.c, self.a)
self._clv_queue.put((args[0], np.squeeze(recorded_traj), np.squeeze(recorded_exp),
np.squeeze(recorded_vec), np.squeeze(backward_vec), np.squeeze(forward_vec)))
self._ics_queue.task_done()
# Ginelli et al. method
@njit
def _compute_clv_gin_jit(f, fjac, pretime, time, aftertime, mdt, ic, n_vec, write_steps, b, c, a, noise_pert):
n_traj = ic.shape[0]
n_dim = ic.shape[1]
Id = np.zeros((1, n_dim, n_dim))
Id[0] = np.eye(n_dim)
if write_steps == 0:
n_records = 1
else:
tot = time[::write_steps]
n_records = len(tot)
if tot[-1] != time[-1]:
n_records += 1
recorded_vec = np.zeros((n_traj, n_dim, n_vec, n_records))
recorded_traj = np.zeros((n_traj, n_dim, n_records))
recorded_exp = np.zeros((n_traj, n_vec, n_records))
for i_traj in range(n_traj):
# first part, making the backward vectors converge (initialization of the Benettin algorithm)
y = np.zeros((1, n_dim))
y[0] = ic[i_traj]
qr = np.linalg.qr(np.random.randn(n_dim, n_vec))
q = qr[0]
for tt, dt in zip(pretime[:-1], np.diff(pretime)):
subtime = np.concatenate((np.arange(tt, tt + dt, mdt), np.full((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
False, 1, integrate._zeros_func)
y[0] = y_new[0, :, 0]
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
# second part, stores the backward vectors and the r matrix (Benettin steps)
# save the trajectories
tw = len(time)-1
tew = len(time)+len(aftertime)-2
tmp_traj = np.zeros((tw+1, n_dim))
tmp_vec = np.zeros((tw+1, n_dim, n_vec))
tmp_R = np.zeros((tew, n_vec, n_vec))
for ti, (tt, dt) in enumerate(zip(time[:-1], np.diff(time))):
tmp_vec[ti] = q.copy()
tmp_traj[ti] = y[0].copy()
subtime = np.concatenate((np.arange(tt, tt + dt, mdt), np.full((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
False, 1, integrate._zeros_func)
y[0] = y_new[0, :, 0]
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
tmp_R[ti] = qr[1].copy()
tmp_vec[-1] = q.copy()
tmp_traj[-1] = y[0].copy()
# third part, stores the r matrix (Benettin steps)
for ti, (tt, dt) in enumerate(zip(aftertime[:-1], np.diff(aftertime))):
subtime = np.concatenate((np.arange(tt, tt + dt, mdt), np.full((1,), tt + dt)))
y_new, prop = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, Id, 1, 0, b, c, a,
False, 1, integrate._zeros_func)
y[0] = y_new[0, :, 0]
q_new = prop[0, :, :, 0] @ q
qr = np.linalg.qr(q_new)
q = qr[0]
tmp_R[ti+tw] = qr[1].copy()
# fourth part, going backward until tb (Ginelli steps)
qr = np.linalg.qr(np.random.randn(n_dim, n_vec))
am, norm = normalize_matrix_columns(qr[1])
for ti in range(tew-1, tw, -1):
am_new = solve_triangular_matrix(tmp_R[ti], am)
noise = np.random.randn(n_dim)
for i in range(n_vec):
am_new[i, i] += noise[i] * noise_pert
am, norm = normalize_matrix_columns(am_new)
# fifth and last part, going backward from tb to ta (Ginelli steps)
# save the data
dte = np.concatenate((np.diff(time), np.full((1,), aftertime[1] - aftertime[0])))
iw = 1
for ti in range(tw, -1, -1):
am_new = solve_triangular_matrix(tmp_R[ti], am)
noise = np.random.randn(n_vec)
for i in range(n_dim):
am_new[i, i] += noise[i] * noise_pert
am, mloc_exp = normalize_matrix_columns(am_new)
if write_steps > 0 and np.mod(tw-ti, write_steps) == 0:
recorded_traj[i_traj, :, -iw] = tmp_traj[ti]
recorded_exp[i_traj, :, -iw] = -np.log(np.abs(mloc_exp))/dte[ti]
recorded_vec[i_traj, :, :, -iw] = tmp_vec[ti] @ am
iw += 1
recorded_traj[i_traj, :, 0] = tmp_traj[0]
recorded_exp[i_traj, :, 0] = -np.log(np.abs(mloc_exp))/dte[0]
recorded_vec[i_traj, :, :, 0] = tmp_vec[0] @ am
return recorded_traj, recorded_exp, recorded_vec
# Subspace intersection method
@njit
def _compute_clv_sub_jit(f, fjac, pretime, time, aftertime, mdt, ic, write_steps, b, c, a):
n_traj = ic.shape[0]
n_dim = ic.shape[1]
lp = len(pretime)
la = len(aftertime)
ttraj = integrate._integrate_runge_kutta_jit(f, np.concatenate((pretime[:-1], time[:-1], aftertime)), ic, 1, 1, b, c, a)
traj, exp, fvec = _compute_forward_lyap_traj_jit(f, fjac, time, aftertime, ttraj[:, :, lp-1:], mdt,
n_dim, write_steps, False, 1, b, c, a)
traj, exp, bvec = _compute_backward_lyap_traj_jit(f, fjac, pretime, time, ttraj[:, :, :-la+1], mdt,
n_dim, write_steps, False, 1, b, c, a)
recorded_traj = traj
recorded_exp = np.zeros_like(traj)
n_records = traj.shape[-1]
recorded_vec = np.zeros((n_traj, n_dim, n_dim, n_records))
subtime = np.array([0., mdt])
y = np.zeros((1, n_dim))
vec = np.zeros((1, n_dim, n_dim))
for i_traj in range(n_traj):
for ti in range(n_records):
for j in range(n_dim):
u, z, w = np.linalg.svd(bvec[i_traj, :, :j+1, ti].T @ fvec[i_traj, :, :n_dim-j, ti])
basis = bvec[i_traj, :, :j+1, ti] @ u
recorded_vec[i_traj, :, j, ti] = basis[:, 0]
y[0] = recorded_traj[i_traj, :, ti]
vec[0] = recorded_vec[i_traj, :, :, ti]
y_new, sol = integrate._integrate_runge_kutta_tgls_jit(f, fjac, subtime, y, vec, 1, 0, b, c, a,
False, 1, integrate._zeros_func)
soln, mloc_exp = normalize_matrix_columns(sol[0, :, :, 0])
recorded_exp[i_traj, :, ti] = np.log(np.abs(mloc_exp))/mdt
return recorded_traj, recorded_exp, recorded_vec, bvec, fvec
if __name__ == "__main__":
a = 0.25
F = 8.
G = 1.
b = 4.
@njit
def fL84(t, x):
xx = -x[1] ** 2 - x[2] ** 2 - a * x[0] + a * F
yy = x[0] * x[1] - b * x[0] * x[2] - x[1] + G
zz = b * x[0] * x[1] + x[0] * x[2] - x[2]
return np.array([xx, yy, zz])
@njit
def DfL84(t, x):
return np.array([[ -a , -2. * x[1], -2. * x[2]],
[x[1] - b * x[2], -1. + x[0], -b * x[0]],
[b * x[1] + x[2], b * x[0], -1. + x[0]]])
sigma = 10.
r = 28.
bb = 8. / 3.
@njit
def fL63(t, x):
xx = sigma * (x[1] - x[0])
yy = r * x[0] - x[1] - x[0] * x[2]
zz = x[0] * x[1] - bb * x[2]
return np.array([xx, yy, zz])
@njit
def DfL63(t, x):
return np.array([[-sigma, sigma, 0.],
[r - x[2], -1., - x[0]],
[x[1], x[0], -bb]])
ic = np.random.random(3)
# tt, ic_L84 = integrate.integrate_runge_kutta(fL84, 0., 10000., 0.01, ic=ic, write_steps=0)
tt, ic = integrate.integrate_runge_kutta(fL63, 0., 10000., 0.01, ic=ic, write_steps=0)
print('Computing Backward Lyapunovs')
lyapint = LyapunovsEstimator()
# lyapint.set_func(fL84, DfL84)
lyapint.set_func(fL63, DfL63)
lyapint.compute_lyapunovs(0., 10000., 30000., 0.01, 0.01, ic, write_steps=1) #, n_vec=2)
btl, btraj, bexp, bvec = lyapint.get_lyapunovs()
print('Computing Forward Lyapunovs')
# lyapint.set_func(fL84, DfL84)
lyapint.set_func(fL63, DfL63)
lyapint.compute_lyapunovs(0., 20000., 30000., 0.01, 0.01, ic, write_steps=1, forward=True, adjoint=False, inverse=False) #, n_vec=2)
ftl, ftraj, fexp, fvec = lyapint.get_lyapunovs()
print('Computing Covariant Lyapunovs')
clvint = CovariantLyapunovsEstimator()
# clvint.set_func(fL84, DfL84)
clvint.set_func(fL63, DfL63)
clvint.compute_clvs(0., 10000., 20000., 30000., 0.01, 0.01, ic, write_steps=1) #, n_vec=2)
ctl, ctraj, cexp, cvec = clvint.get_clvs()
clvint.compute_clvs(0., 10000., 20000., 30000., 0.01, 0.01, ic, write_steps=10, method=1, backward_vectors=True) #, n_vec=2)
ctl2, ctraj2, cexp2, cvec2 = clvint.get_clvs()
lyapint.terminate()
clvint.terminate() | [
"multiprocessing.JoinableQueue",
"multiprocessing.cpu_count",
"numpy.array",
"qgs.functions.util.solve_triangular_matrix",
"numpy.mod",
"numpy.arange",
"numpy.linalg.qr",
"numpy.random.random",
"numpy.diff",
"qgs.functions.util.reverse",
"numpy.concatenate",
"numpy.abs",
"numpy.eye",
"qgs.... | [((20501, 20528), 'numpy.zeros', 'np.zeros', (['(1, n_dim, n_dim)'], {}), '((1, n_dim, n_dim))\n', (20509, 20528), True, 'import numpy as np\n'), ((20541, 20554), 'numpy.eye', 'np.eye', (['n_dim'], {}), '(n_dim)\n', (20547, 20554), True, 'import numpy as np\n'), ((20755, 20798), 'numpy.zeros', 'np.zeros', (['(n_traj, n_dim, n_vec, n_records)'], {}), '((n_traj, n_dim, n_vec, n_records))\n', (20763, 20798), True, 'import numpy as np\n'), ((20819, 20855), 'numpy.zeros', 'np.zeros', (['(n_traj, n_dim, n_records)'], {}), '((n_traj, n_dim, n_records))\n', (20827, 20855), True, 'import numpy as np\n'), ((20875, 20911), 'numpy.zeros', 'np.zeros', (['(n_traj, n_vec, n_records)'], {}), '((n_traj, n_vec, n_records))\n', (20883, 20911), True, 'import numpy as np\n'), ((20929, 20946), 'qgs.functions.util.reverse', 'reverse', (['posttime'], {}), '(posttime)\n', (20936, 20946), False, 'from qgs.functions.util import normalize_matrix_columns, solve_triangular_matrix, reverse\n'), ((20959, 20972), 'qgs.functions.util.reverse', 'reverse', (['time'], {}), '(time)\n', (20966, 20972), False, 'from qgs.functions.util import normalize_matrix_columns, solve_triangular_matrix, reverse\n'), ((23549, 23576), 'numpy.zeros', 'np.zeros', (['(1, n_dim, n_dim)'], {}), '((1, n_dim, n_dim))\n', (23557, 23576), True, 'import numpy as np\n'), ((23589, 23602), 'numpy.eye', 'np.eye', (['n_dim'], {}), '(n_dim)\n', (23595, 23602), True, 'import numpy as np\n'), ((23803, 23846), 'numpy.zeros', 'np.zeros', (['(n_traj, n_dim, n_vec, n_records)'], {}), '((n_traj, n_dim, n_vec, n_records))\n', (23811, 23846), True, 'import numpy as np\n'), ((23867, 23903), 'numpy.zeros', 'np.zeros', (['(n_traj, n_dim, n_records)'], {}), '((n_traj, n_dim, n_records))\n', (23875, 23903), True, 'import numpy as np\n'), ((23923, 23959), 'numpy.zeros', 'np.zeros', (['(n_traj, n_vec, n_records)'], {}), '((n_traj, n_vec, n_records))\n', (23931, 23959), True, 'import numpy as np\n'), ((51293, 51320), 'numpy.zeros', 'np.zeros', (['(1, n_dim, n_dim)'], {}), '((1, n_dim, n_dim))\n', (51301, 51320), True, 'import numpy as np\n'), ((51333, 51346), 'numpy.eye', 'np.eye', (['n_dim'], {}), '(n_dim)\n', (51339, 51346), True, 'import numpy as np\n'), ((51546, 51589), 'numpy.zeros', 'np.zeros', (['(n_traj, n_dim, n_vec, n_records)'], {}), '((n_traj, n_dim, n_vec, n_records))\n', (51554, 51589), True, 'import numpy as np\n'), ((51610, 51646), 'numpy.zeros', 'np.zeros', (['(n_traj, n_dim, n_records)'], {}), '((n_traj, n_dim, n_records))\n', (51618, 51646), True, 'import numpy as np\n'), ((51666, 51702), 'numpy.zeros', 'np.zeros', (['(n_traj, n_vec, n_records)'], {}), '((n_traj, n_vec, n_records))\n', (51674, 51702), True, 'import numpy as np\n'), ((56382, 56401), 'numpy.zeros_like', 'np.zeros_like', (['traj'], {}), '(traj)\n', (56395, 56401), True, 'import numpy as np\n'), ((56452, 56495), 'numpy.zeros', 'np.zeros', (['(n_traj, n_dim, n_dim, n_records)'], {}), '((n_traj, n_dim, n_dim, n_records))\n', (56460, 56495), True, 'import numpy as np\n'), ((56510, 56530), 'numpy.array', 'np.array', (['[0.0, mdt]'], {}), '([0.0, mdt])\n', (56518, 56530), True, 'import numpy as np\n'), ((56538, 56558), 'numpy.zeros', 'np.zeros', (['(1, n_dim)'], {}), '((1, n_dim))\n', (56546, 56558), True, 'import numpy as np\n'), ((56569, 56596), 'numpy.zeros', 'np.zeros', (['(1, n_dim, n_dim)'], {}), '((1, n_dim, n_dim))\n', (56577, 56596), True, 'import numpy as np\n'), ((58388, 58407), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (58404, 58407), True, 'import numpy as np\n'), ((58519, 58598), 'qgs.integrators.integrate.integrate_runge_kutta', 'integrate.integrate_runge_kutta', (['fL63', '(0.0)', '(10000.0)', '(0.01)'], {'ic': 'ic', 'write_steps': '(0)'}), '(fL63, 0.0, 10000.0, 0.01, ic=ic, write_steps=0)\n', (58550, 58598), True, 'import qgs.integrators.integrate as integrate\n'), ((5896, 5927), 'multiprocessing.JoinableQueue', 'multiprocessing.JoinableQueue', ([], {}), '()\n', (5925, 5927), False, 'import multiprocessing\n'), ((5955, 5978), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (5976, 5978), False, 'import multiprocessing\n'), ((13534, 13585), 'numpy.zeros', 'np.zeros', (['(self.n_traj, self.n_dim, self.n_records)'], {}), '((self.n_traj, self.n_dim, self.n_records))\n', (13542, 13585), True, 'import numpy as np\n'), ((13615, 13678), 'numpy.zeros', 'np.zeros', (['(self.n_traj, self.n_dim, self.n_vec, self.n_records)'], {}), '((self.n_traj, self.n_dim, self.n_vec, self.n_records))\n', (13623, 13678), True, 'import numpy as np\n'), ((13708, 13759), 'numpy.zeros', 'np.zeros', (['(self.n_traj, self.n_vec, self.n_records)'], {}), '((self.n_traj, self.n_vec, self.n_records))\n', (13716, 13759), True, 'import numpy as np\n'), ((19877, 19914), 'numpy.concatenate', 'np.concatenate', (['(time[:-1], posttime)'], {}), '((time[:-1], posttime))\n', (19891, 19914), True, 'import numpy as np\n'), ((21020, 21040), 'numpy.zeros', 'np.zeros', (['(1, n_dim)'], {}), '((1, n_dim))\n', (21028, 21040), True, 'import numpy as np\n'), ((21135, 21150), 'numpy.zeros', 'np.zeros', (['n_dim'], {}), '(n_dim)\n', (21143, 21150), True, 'import numpy as np\n'), ((22918, 22954), 'numpy.concatenate', 'np.concatenate', (['(pretime[:-1], time)'], {}), '((pretime[:-1], time))\n', (22932, 22954), True, 'import numpy as np\n'), ((24007, 24027), 'numpy.zeros', 'np.zeros', (['(1, n_dim)'], {}), '((1, n_dim))\n', (24015, 24027), True, 'import numpy as np\n'), ((24159, 24174), 'numpy.zeros', 'np.zeros', (['n_dim'], {}), '(n_dim)\n', (24167, 24174), True, 'import numpy as np\n'), ((33957, 33988), 'multiprocessing.JoinableQueue', 'multiprocessing.JoinableQueue', ([], {}), '()\n', (33986, 33988), False, 'import multiprocessing\n'), ((34015, 34038), 'multiprocessing.Queue', 'multiprocessing.Queue', ([], {}), '()\n', (34036, 34038), False, 'import multiprocessing\n'), ((40300, 40351), 'numpy.zeros', 'np.zeros', (['(self.n_traj, self.n_dim, self.n_records)'], {}), '((self.n_traj, self.n_dim, self.n_records))\n', (40308, 40351), True, 'import numpy as np\n'), ((40381, 40444), 'numpy.zeros', 'np.zeros', (['(self.n_traj, self.n_dim, self.n_vec, self.n_records)'], {}), '((self.n_traj, self.n_dim, self.n_vec, self.n_records))\n', (40389, 40444), True, 'import numpy as np\n'), ((40474, 40525), 'numpy.zeros', 'np.zeros', (['(self.n_traj, self.n_vec, self.n_records)'], {}), '((self.n_traj, self.n_vec, self.n_records))\n', (40482, 40525), True, 'import numpy as np\n'), ((51853, 51873), 'numpy.zeros', 'np.zeros', (['(1, n_dim)'], {}), '((1, n_dim))\n', (51861, 51873), True, 'import numpy as np\n'), ((52675, 52700), 'numpy.zeros', 'np.zeros', (['(tw + 1, n_dim)'], {}), '((tw + 1, n_dim))\n', (52683, 52700), True, 'import numpy as np\n'), ((52717, 52749), 'numpy.zeros', 'np.zeros', (['(tw + 1, n_dim, n_vec)'], {}), '((tw + 1, n_dim, n_vec))\n', (52725, 52749), True, 'import numpy as np\n'), ((52764, 52793), 'numpy.zeros', 'np.zeros', (['(tew, n_vec, n_vec)'], {}), '((tew, n_vec, n_vec))\n', (52772, 52793), True, 'import numpy as np\n'), ((54238, 54269), 'qgs.functions.util.normalize_matrix_columns', 'normalize_matrix_columns', (['qr[1]'], {}), '(qr[1])\n', (54262, 54269), False, 'from qgs.functions.util import normalize_matrix_columns, solve_triangular_matrix, reverse\n'), ((55871, 55923), 'numpy.concatenate', 'np.concatenate', (['(pretime[:-1], time[:-1], aftertime)'], {}), '((pretime[:-1], time[:-1], aftertime))\n', (55885, 55923), True, 'import numpy as np\n'), ((57716, 57738), 'numpy.array', 'np.array', (['[xx, yy, zz]'], {}), '([xx, yy, zz])\n', (57724, 57738), True, 'import numpy as np\n'), ((57786, 57917), 'numpy.array', 'np.array', (['[[-a, -2.0 * x[1], -2.0 * x[2]], [x[1] - b * x[2], -1.0 + x[0], -b * x[0]],\n [b * x[1] + x[2], b * x[0], -1.0 + x[0]]]'], {}), '([[-a, -2.0 * x[1], -2.0 * x[2]], [x[1] - b * x[2], -1.0 + x[0], -b *\n x[0]], [b * x[1] + x[2], b * x[0], -1.0 + x[0]]])\n', (57794, 57917), True, 'import numpy as np\n'), ((58181, 58203), 'numpy.array', 'np.array', (['[xx, yy, zz]'], {}), '([xx, yy, zz])\n', (58189, 58203), True, 'import numpy as np\n'), ((58251, 58327), 'numpy.array', 'np.array', (['[[-sigma, sigma, 0.0], [r - x[2], -1.0, -x[0]], [x[1], x[0], -bb]]'], {}), '([[-sigma, sigma, 0.0], [r - x[2], -1.0, -x[0]], [x[1], x[0], -bb]])\n', (58259, 58327), True, 'import numpy as np\n'), ((4262, 4289), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (4287, 4289), False, 'import multiprocessing\n'), ((4444, 4474), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.5, 1.0]'], {}), '([0.0, 0.5, 0.5, 1.0])\n', (4452, 4474), True, 'import numpy as np\n'), ((4494, 4540), 'numpy.array', 'np.array', (['[1.0 / 6, 1.0 / 3, 1.0 / 3, 1.0 / 6]'], {}), '([1.0 / 6, 1.0 / 3, 1.0 / 3, 1.0 / 6])\n', (4502, 4540), True, 'import numpy as np\n'), ((12279, 12290), 'numpy.zeros', 'np.zeros', (['i'], {}), '(i)\n', (12287, 12290), True, 'import numpy as np\n'), ((21067, 21099), 'numpy.random.random', 'np.random.random', (['(n_dim, n_vec)'], {}), '((n_dim, n_vec))\n', (21083, 21099), True, 'import numpy as np\n'), ((21394, 21521), 'qgs.integrators.integrate._integrate_runge_kutta_tgls_jit', 'integrate._integrate_runge_kutta_tgls_jit', (['f', 'fjac', 'subtime', 'y', 'Id', '(-1)', '(0)', 'b', 'c', 'a', 'adjoint', 'inverse', 'integrate._zeros_func'], {}), '(f, fjac, subtime, y, Id, -1, 0, b,\n c, a, adjoint, inverse, integrate._zeros_func)\n', (21435, 21521), True, 'import qgs.integrators.integrate as integrate\n'), ((21645, 21664), 'numpy.linalg.qr', 'np.linalg.qr', (['q_new'], {}), '(q_new)\n', (21657, 21664), True, 'import numpy as np\n'), ((22247, 22374), 'qgs.integrators.integrate._integrate_runge_kutta_tgls_jit', 'integrate._integrate_runge_kutta_tgls_jit', (['f', 'fjac', 'subtime', 'y', 'Id', '(-1)', '(0)', 'b', 'c', 'a', 'adjoint', 'inverse', 'integrate._zeros_func'], {}), '(f, fjac, subtime, y, Id, -1, 0, b,\n c, a, adjoint, inverse, integrate._zeros_func)\n', (22288, 22374), True, 'import qgs.integrators.integrate as integrate\n'), ((22498, 22517), 'numpy.linalg.qr', 'np.linalg.qr', (['q_new'], {}), '(q_new)\n', (22510, 22517), True, 'import numpy as np\n'), ((24091, 24123), 'numpy.random.random', 'np.random.random', (['(n_dim, n_vec)'], {}), '((n_dim, n_vec))\n', (24107, 24123), True, 'import numpy as np\n'), ((24373, 24499), 'qgs.integrators.integrate._integrate_runge_kutta_tgls_jit', 'integrate._integrate_runge_kutta_tgls_jit', (['f', 'fjac', 'subtime', 'y', 'Id', '(1)', '(0)', 'b', 'c', 'a', 'adjoint', 'inverse', 'integrate._zeros_func'], {}), '(f, fjac, subtime, y, Id, 1, 0, b,\n c, a, adjoint, inverse, integrate._zeros_func)\n', (24414, 24499), True, 'import qgs.integrators.integrate as integrate\n'), ((24666, 24685), 'numpy.linalg.qr', 'np.linalg.qr', (['q_new'], {}), '(q_new)\n', (24678, 24685), True, 'import numpy as np\n'), ((25228, 25354), 'qgs.integrators.integrate._integrate_runge_kutta_tgls_jit', 'integrate._integrate_runge_kutta_tgls_jit', (['f', 'fjac', 'subtime', 'y', 'Id', '(1)', '(0)', 'b', 'c', 'a', 'adjoint', 'inverse', 'integrate._zeros_func'], {}), '(f, fjac, subtime, y, Id, 1, 0, b,\n c, a, adjoint, inverse, integrate._zeros_func)\n', (25269, 25354), True, 'import qgs.integrators.integrate as integrate\n'), ((25518, 25537), 'numpy.linalg.qr', 'np.linalg.qr', (['q_new'], {}), '(q_new)\n', (25530, 25537), True, 'import numpy as np\n'), ((30462, 30489), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (30487, 30489), False, 'import multiprocessing\n'), ((30644, 30674), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.5, 1.0]'], {}), '([0.0, 0.5, 0.5, 1.0])\n', (30652, 30674), True, 'import numpy as np\n'), ((30694, 30740), 'numpy.array', 'np.array', (['[1.0 / 6, 1.0 / 3, 1.0 / 3, 1.0 / 6]'], {}), '([1.0 / 6, 1.0 / 3, 1.0 / 3, 1.0 / 6])\n', (30702, 30740), True, 'import numpy as np\n'), ((39352, 39363), 'numpy.zeros', 'np.zeros', (['i'], {}), '(i)\n', (39360, 39363), True, 'import numpy as np\n'), ((51926, 51955), 'numpy.random.randn', 'np.random.randn', (['n_dim', 'n_vec'], {}), '(n_dim, n_vec)\n', (51941, 51955), True, 'import numpy as np\n'), ((52016, 52032), 'numpy.diff', 'np.diff', (['pretime'], {}), '(pretime)\n', (52023, 52032), True, 'import numpy as np\n'), ((52154, 52272), 'qgs.integrators.integrate._integrate_runge_kutta_tgls_jit', 'integrate._integrate_runge_kutta_tgls_jit', (['f', 'fjac', 'subtime', 'y', 'Id', '(1)', '(0)', 'b', 'c', 'a', '(False)', '(1)', 'integrate._zeros_func'], {}), '(f, fjac, subtime, y, Id, 1, 0, b,\n c, a, False, 1, integrate._zeros_func)\n', (52195, 52272), True, 'import qgs.integrators.integrate as integrate\n'), ((52429, 52448), 'numpy.linalg.qr', 'np.linalg.qr', (['q_new'], {}), '(q_new)\n', (52441, 52448), True, 'import numpy as np\n'), ((53059, 53177), 'qgs.integrators.integrate._integrate_runge_kutta_tgls_jit', 'integrate._integrate_runge_kutta_tgls_jit', (['f', 'fjac', 'subtime', 'y', 'Id', '(1)', '(0)', 'b', 'c', 'a', '(False)', '(1)', 'integrate._zeros_func'], {}), '(f, fjac, subtime, y, Id, 1, 0, b,\n c, a, False, 1, integrate._zeros_func)\n', (53100, 53177), True, 'import qgs.integrators.integrate as integrate\n'), ((53334, 53353), 'numpy.linalg.qr', 'np.linalg.qr', (['q_new'], {}), '(q_new)\n', (53346, 53353), True, 'import numpy as np\n'), ((53740, 53858), 'qgs.integrators.integrate._integrate_runge_kutta_tgls_jit', 'integrate._integrate_runge_kutta_tgls_jit', (['f', 'fjac', 'subtime', 'y', 'Id', '(1)', '(0)', 'b', 'c', 'a', '(False)', '(1)', 'integrate._zeros_func'], {}), '(f, fjac, subtime, y, Id, 1, 0, b,\n c, a, False, 1, integrate._zeros_func)\n', (53781, 53858), True, 'import qgs.integrators.integrate as integrate\n'), ((54015, 54034), 'numpy.linalg.qr', 'np.linalg.qr', (['q_new'], {}), '(q_new)\n', (54027, 54034), True, 'import numpy as np\n'), ((54188, 54217), 'numpy.random.randn', 'np.random.randn', (['n_dim', 'n_vec'], {}), '(n_dim, n_vec)\n', (54203, 54217), True, 'import numpy as np\n'), ((54333, 54371), 'qgs.functions.util.solve_triangular_matrix', 'solve_triangular_matrix', (['tmp_R[ti]', 'am'], {}), '(tmp_R[ti], am)\n', (54356, 54371), False, 'from qgs.functions.util import normalize_matrix_columns, solve_triangular_matrix, reverse\n'), ((54392, 54414), 'numpy.random.randn', 'np.random.randn', (['n_dim'], {}), '(n_dim)\n', (54407, 54414), True, 'import numpy as np\n'), ((54527, 54559), 'qgs.functions.util.normalize_matrix_columns', 'normalize_matrix_columns', (['am_new'], {}), '(am_new)\n', (54551, 54559), False, 'from qgs.functions.util import normalize_matrix_columns, solve_triangular_matrix, reverse\n'), ((54826, 54864), 'qgs.functions.util.solve_triangular_matrix', 'solve_triangular_matrix', (['tmp_R[ti]', 'am'], {}), '(tmp_R[ti], am)\n', (54849, 54864), False, 'from qgs.functions.util import normalize_matrix_columns, solve_triangular_matrix, reverse\n'), ((54885, 54907), 'numpy.random.randn', 'np.random.randn', (['n_vec'], {}), '(n_vec)\n', (54900, 54907), True, 'import numpy as np\n'), ((55024, 55056), 'qgs.functions.util.normalize_matrix_columns', 'normalize_matrix_columns', (['am_new'], {}), '(am_new)\n', (55048, 55056), False, 'from qgs.functions.util import normalize_matrix_columns, solve_triangular_matrix, reverse\n'), ((57044, 57163), 'qgs.integrators.integrate._integrate_runge_kutta_tgls_jit', 'integrate._integrate_runge_kutta_tgls_jit', (['f', 'fjac', 'subtime', 'y', 'vec', '(1)', '(0)', 'b', 'c', 'a', '(False)', '(1)', 'integrate._zeros_func'], {}), '(f, fjac, subtime, y, vec, 1, 0, b,\n c, a, False, 1, integrate._zeros_func)\n', (57085, 57163), True, 'import qgs.integrators.integrate as integrate\n'), ((57256, 57297), 'qgs.functions.util.normalize_matrix_columns', 'normalize_matrix_columns', (['sol[0, :, :, 0]'], {}), '(sol[0, :, :, 0])\n', (57280, 57297), False, 'from qgs.functions.util import normalize_matrix_columns, solve_triangular_matrix, reverse\n'), ((12033, 12044), 'numpy.zeros', 'np.zeros', (['i'], {}), '(i)\n', (12041, 12044), True, 'import numpy as np\n'), ((12644, 12665), 'numpy.arange', 'np.arange', (['t0', 'tw', 'dt'], {}), '(t0, tw, dt)\n', (12653, 12665), True, 'import numpy as np\n'), ((12667, 12684), 'numpy.full', 'np.full', (['(1,)', 'tw'], {}), '((1,), tw)\n', (12674, 12684), True, 'import numpy as np\n'), ((12724, 12744), 'numpy.arange', 'np.arange', (['tw', 't', 'dt'], {}), '(tw, t, dt)\n', (12733, 12744), True, 'import numpy as np\n'), ((12746, 12762), 'numpy.full', 'np.full', (['(1,)', 't'], {}), '((1,), t)\n', (12753, 12762), True, 'import numpy as np\n'), ((16084, 16115), 'numpy.squeeze', 'np.squeeze', (['self._recorded_traj'], {}), '(self._recorded_traj)\n', (16094, 16115), True, 'import numpy as np\n'), ((16117, 16147), 'numpy.squeeze', 'np.squeeze', (['self._recorded_exp'], {}), '(self._recorded_exp)\n', (16127, 16147), True, 'import numpy as np\n'), ((16170, 16200), 'numpy.squeeze', 'np.squeeze', (['self._recorded_vec'], {}), '(self._recorded_vec)\n', (16180, 16200), True, 'import numpy as np\n'), ((21212, 21230), 'numpy.diff', 'np.diff', (['rposttime'], {}), '(rposttime)\n', (21219, 21230), True, 'import numpy as np\n'), ((21777, 21791), 'numpy.diff', 'np.diff', (['rtime'], {}), '(rtime)\n', (21784, 21791), True, 'import numpy as np\n'), ((24234, 24250), 'numpy.diff', 'np.diff', (['pretime'], {}), '(pretime)\n', (24241, 24250), True, 'import numpy as np\n'), ((24796, 24809), 'numpy.diff', 'np.diff', (['time'], {}), '(time)\n', (24803, 24809), True, 'import numpy as np\n'), ((39106, 39117), 'numpy.zeros', 'np.zeros', (['i'], {}), '(i)\n', (39114, 39117), True, 'import numpy as np\n'), ((39781, 39802), 'numpy.arange', 'np.arange', (['t0', 'ta', 'dt'], {}), '(t0, ta, dt)\n', (39790, 39802), True, 'import numpy as np\n'), ((39804, 39821), 'numpy.full', 'np.full', (['(1,)', 'ta'], {}), '((1,), ta)\n', (39811, 39821), True, 'import numpy as np\n'), ((39861, 39882), 'numpy.arange', 'np.arange', (['ta', 'tb', 'dt'], {}), '(ta, tb, dt)\n', (39870, 39882), True, 'import numpy as np\n'), ((39884, 39901), 'numpy.full', 'np.full', (['(1,)', 'tb'], {}), '((1,), tb)\n', (39891, 39901), True, 'import numpy as np\n'), ((39946, 39967), 'numpy.arange', 'np.arange', (['tb', 'tc', 'dt'], {}), '(tb, tc, dt)\n', (39955, 39967), True, 'import numpy as np\n'), ((39969, 39986), 'numpy.full', 'np.full', (['(1,)', 'tc'], {}), '((1,), tc)\n', (39976, 39986), True, 'import numpy as np\n'), ((40625, 40688), 'numpy.zeros', 'np.zeros', (['(self.n_traj, self.n_dim, self.n_vec, self.n_records)'], {}), '((self.n_traj, self.n_dim, self.n_vec, self.n_records))\n', (40633, 40688), True, 'import numpy as np\n'), ((40760, 40823), 'numpy.zeros', 'np.zeros', (['(self.n_traj, self.n_dim, self.n_vec, self.n_records)'], {}), '((self.n_traj, self.n_dim, self.n_vec, self.n_records))\n', (40768, 40823), True, 'import numpy as np\n'), ((43281, 43312), 'numpy.squeeze', 'np.squeeze', (['self._recorded_traj'], {}), '(self._recorded_traj)\n', (43291, 43312), True, 'import numpy as np\n'), ((43314, 43344), 'numpy.squeeze', 'np.squeeze', (['self._recorded_exp'], {}), '(self._recorded_exp)\n', (43324, 43344), True, 'import numpy as np\n'), ((43367, 43397), 'numpy.squeeze', 'np.squeeze', (['self._recorded_vec'], {}), '(self._recorded_vec)\n', (43377, 43397), True, 'import numpy as np\n'), ((45337, 45368), 'numpy.squeeze', 'np.squeeze', (['self._recorded_traj'], {}), '(self._recorded_traj)\n', (45347, 45368), True, 'import numpy as np\n'), ((45370, 45400), 'numpy.squeeze', 'np.squeeze', (['self._recorded_exp'], {}), '(self._recorded_exp)\n', (45380, 45400), True, 'import numpy as np\n'), ((45423, 45454), 'numpy.squeeze', 'np.squeeze', (['self._recorded_bvec'], {}), '(self._recorded_bvec)\n', (45433, 45454), True, 'import numpy as np\n'), ((47394, 47425), 'numpy.squeeze', 'np.squeeze', (['self._recorded_traj'], {}), '(self._recorded_traj)\n', (47404, 47425), True, 'import numpy as np\n'), ((47427, 47457), 'numpy.squeeze', 'np.squeeze', (['self._recorded_exp'], {}), '(self._recorded_exp)\n', (47437, 47457), True, 'import numpy as np\n'), ((47480, 47511), 'numpy.squeeze', 'np.squeeze', (['self._recorded_fvec'], {}), '(self._recorded_fvec)\n', (47490, 47511), True, 'import numpy as np\n'), ((52848, 52861), 'numpy.diff', 'np.diff', (['time'], {}), '(time)\n', (52855, 52861), True, 'import numpy as np\n'), ((53599, 53617), 'numpy.diff', 'np.diff', (['aftertime'], {}), '(aftertime)\n', (53606, 53617), True, 'import numpy as np\n'), ((54692, 54705), 'numpy.diff', 'np.diff', (['time'], {}), '(time)\n', (54699, 54705), True, 'import numpy as np\n'), ((54707, 54749), 'numpy.full', 'np.full', (['(1,)', '(aftertime[1] - aftertime[0])'], {}), '((1,), aftertime[1] - aftertime[0])\n', (54714, 54749), True, 'import numpy as np\n'), ((56728, 56806), 'numpy.linalg.svd', 'np.linalg.svd', (['(bvec[i_traj, :, :j + 1, ti].T @ fvec[i_traj, :, :n_dim - j, ti])'], {}), '(bvec[i_traj, :, :j + 1, ti].T @ fvec[i_traj, :, :n_dim - j, ti])\n', (56741, 56806), True, 'import numpy as np\n'), ((15695, 15726), 'numpy.squeeze', 'np.squeeze', (['self._recorded_traj'], {}), '(self._recorded_traj)\n', (15705, 15726), True, 'import numpy as np\n'), ((15728, 15758), 'numpy.squeeze', 'np.squeeze', (['self._recorded_exp'], {}), '(self._recorded_exp)\n', (15738, 15758), True, 'import numpy as np\n'), ((15785, 15815), 'numpy.squeeze', 'np.squeeze', (['self._recorded_vec'], {}), '(self._recorded_vec)\n', (15795, 15815), True, 'import numpy as np\n'), ((15947, 15978), 'numpy.squeeze', 'np.squeeze', (['self._recorded_traj'], {}), '(self._recorded_traj)\n', (15957, 15978), True, 'import numpy as np\n'), ((15980, 16010), 'numpy.squeeze', 'np.squeeze', (['self._recorded_exp'], {}), '(self._recorded_exp)\n', (15990, 16010), True, 'import numpy as np\n'), ((16012, 16042), 'numpy.squeeze', 'np.squeeze', (['self._recorded_vec'], {}), '(self._recorded_vec)\n', (16022, 16042), True, 'import numpy as np\n'), ((19549, 19574), 'numpy.squeeze', 'np.squeeze', (['recorded_traj'], {}), '(recorded_traj)\n', (19559, 19574), True, 'import numpy as np\n'), ((19576, 19600), 'numpy.squeeze', 'np.squeeze', (['recorded_exp'], {}), '(recorded_exp)\n', (19586, 19600), True, 'import numpy as np\n'), ((19636, 19660), 'numpy.squeeze', 'np.squeeze', (['recorded_vec'], {}), '(recorded_vec)\n', (19646, 19660), True, 'import numpy as np\n'), ((21319, 21346), 'numpy.arange', 'np.arange', (['(tt + dt)', 'tt', 'mdt'], {}), '(tt + dt, tt, mdt)\n', (21328, 21346), True, 'import numpy as np\n'), ((21348, 21365), 'numpy.full', 'np.full', (['(1,)', 'tt'], {}), '((1,), tt)\n', (21355, 21365), True, 'import numpy as np\n'), ((21924, 21947), 'numpy.mod', 'np.mod', (['ti', 'write_steps'], {}), '(ti, write_steps)\n', (21930, 21947), True, 'import numpy as np\n'), ((22172, 22199), 'numpy.arange', 'np.arange', (['(tt + dt)', 'tt', 'mdt'], {}), '(tt + dt, tt, mdt)\n', (22181, 22199), True, 'import numpy as np\n'), ((22201, 22218), 'numpy.full', 'np.full', (['(1,)', 'tt'], {}), '((1,), tt)\n', (22208, 22218), True, 'import numpy as np\n'), ((24293, 24320), 'numpy.arange', 'np.arange', (['tt', '(tt + dt)', 'mdt'], {}), '(tt, tt + dt, mdt)\n', (24302, 24320), True, 'import numpy as np\n'), ((24322, 24344), 'numpy.full', 'np.full', (['(1,)', '(tt + dt)'], {}), '((1,), tt + dt)\n', (24329, 24344), True, 'import numpy as np\n'), ((24900, 24923), 'numpy.mod', 'np.mod', (['ti', 'write_steps'], {}), '(ti, write_steps)\n', (24906, 24923), True, 'import numpy as np\n'), ((25148, 25175), 'numpy.arange', 'np.arange', (['tt', '(tt + dt)', 'mdt'], {}), '(tt, tt + dt, mdt)\n', (25157, 25175), True, 'import numpy as np\n'), ((25177, 25199), 'numpy.full', 'np.full', (['(1,)', '(tt + dt)'], {}), '((1,), tt + dt)\n', (25184, 25199), True, 'import numpy as np\n'), ((42868, 42899), 'numpy.squeeze', 'np.squeeze', (['self._recorded_traj'], {}), '(self._recorded_traj)\n', (42878, 42899), True, 'import numpy as np\n'), ((42901, 42931), 'numpy.squeeze', 'np.squeeze', (['self._recorded_exp'], {}), '(self._recorded_exp)\n', (42911, 42931), True, 'import numpy as np\n'), ((42958, 42988), 'numpy.squeeze', 'np.squeeze', (['self._recorded_vec'], {}), '(self._recorded_vec)\n', (42968, 42988), True, 'import numpy as np\n'), ((43136, 43167), 'numpy.squeeze', 'np.squeeze', (['self._recorded_traj'], {}), '(self._recorded_traj)\n', (43146, 43167), True, 'import numpy as np\n'), ((43169, 43199), 'numpy.squeeze', 'np.squeeze', (['self._recorded_exp'], {}), '(self._recorded_exp)\n', (43179, 43199), True, 'import numpy as np\n'), ((43201, 43231), 'numpy.squeeze', 'np.squeeze', (['self._recorded_vec'], {}), '(self._recorded_vec)\n', (43211, 43231), True, 'import numpy as np\n'), ((44922, 44953), 'numpy.squeeze', 'np.squeeze', (['self._recorded_traj'], {}), '(self._recorded_traj)\n', (44932, 44953), True, 'import numpy as np\n'), ((44955, 44985), 'numpy.squeeze', 'np.squeeze', (['self._recorded_exp'], {}), '(self._recorded_exp)\n', (44965, 44985), True, 'import numpy as np\n'), ((45012, 45043), 'numpy.squeeze', 'np.squeeze', (['self._recorded_bvec'], {}), '(self._recorded_bvec)\n', (45022, 45043), True, 'import numpy as np\n'), ((45191, 45222), 'numpy.squeeze', 'np.squeeze', (['self._recorded_traj'], {}), '(self._recorded_traj)\n', (45201, 45222), True, 'import numpy as np\n'), ((45224, 45254), 'numpy.squeeze', 'np.squeeze', (['self._recorded_exp'], {}), '(self._recorded_exp)\n', (45234, 45254), True, 'import numpy as np\n'), ((45256, 45287), 'numpy.squeeze', 'np.squeeze', (['self._recorded_bvec'], {}), '(self._recorded_bvec)\n', (45266, 45287), True, 'import numpy as np\n'), ((46979, 47010), 'numpy.squeeze', 'np.squeeze', (['self._recorded_traj'], {}), '(self._recorded_traj)\n', (46989, 47010), True, 'import numpy as np\n'), ((47012, 47042), 'numpy.squeeze', 'np.squeeze', (['self._recorded_exp'], {}), '(self._recorded_exp)\n', (47022, 47042), True, 'import numpy as np\n'), ((47069, 47100), 'numpy.squeeze', 'np.squeeze', (['self._recorded_fvec'], {}), '(self._recorded_fvec)\n', (47079, 47100), True, 'import numpy as np\n'), ((47248, 47279), 'numpy.squeeze', 'np.squeeze', (['self._recorded_traj'], {}), '(self._recorded_traj)\n', (47258, 47279), True, 'import numpy as np\n'), ((47281, 47311), 'numpy.squeeze', 'np.squeeze', (['self._recorded_exp'], {}), '(self._recorded_exp)\n', (47291, 47311), True, 'import numpy as np\n'), ((47313, 47344), 'numpy.squeeze', 'np.squeeze', (['self._recorded_fvec'], {}), '(self._recorded_fvec)\n', (47323, 47344), True, 'import numpy as np\n'), ((52074, 52101), 'numpy.arange', 'np.arange', (['tt', '(tt + dt)', 'mdt'], {}), '(tt, tt + dt, mdt)\n', (52083, 52101), True, 'import numpy as np\n'), ((52103, 52125), 'numpy.full', 'np.full', (['(1,)', '(tt + dt)'], {}), '((1,), tt + dt)\n', (52110, 52125), True, 'import numpy as np\n'), ((52979, 53006), 'numpy.arange', 'np.arange', (['tt', '(tt + dt)', 'mdt'], {}), '(tt, tt + dt, mdt)\n', (52988, 53006), True, 'import numpy as np\n'), ((53008, 53030), 'numpy.full', 'np.full', (['(1,)', '(tt + dt)'], {}), '((1,), tt + dt)\n', (53015, 53030), True, 'import numpy as np\n'), ((53660, 53687), 'numpy.arange', 'np.arange', (['tt', '(tt + dt)', 'mdt'], {}), '(tt, tt + dt, mdt)\n', (53669, 53687), True, 'import numpy as np\n'), ((53689, 53711), 'numpy.full', 'np.full', (['(1,)', '(tt + dt)'], {}), '((1,), tt + dt)\n', (53696, 53711), True, 'import numpy as np\n'), ((55093, 55121), 'numpy.mod', 'np.mod', (['(tw - ti)', 'write_steps'], {}), '(tw - ti, write_steps)\n', (55099, 55121), True, 'import numpy as np\n'), ((55455, 55471), 'numpy.abs', 'np.abs', (['mloc_exp'], {}), '(mloc_exp)\n', (55461, 55471), True, 'import numpy as np\n'), ((57347, 57363), 'numpy.abs', 'np.abs', (['mloc_exp'], {}), '(mloc_exp)\n', (57353, 57363), True, 'import numpy as np\n'), ((21872, 21882), 'numpy.diag', 'np.diag', (['r'], {}), '(r)\n', (21879, 21882), True, 'import numpy as np\n'), ((24848, 24858), 'numpy.diag', 'np.diag', (['r'], {}), '(r)\n', (24855, 24858), True, 'import numpy as np\n'), ((50256, 50281), 'numpy.squeeze', 'np.squeeze', (['recorded_traj'], {}), '(recorded_traj)\n', (50266, 50281), True, 'import numpy as np\n'), ((50283, 50307), 'numpy.squeeze', 'np.squeeze', (['recorded_exp'], {}), '(recorded_exp)\n', (50293, 50307), True, 'import numpy as np\n'), ((50346, 50370), 'numpy.squeeze', 'np.squeeze', (['recorded_vec'], {}), '(recorded_vec)\n', (50356, 50370), True, 'import numpy as np\n'), ((50881, 50906), 'numpy.squeeze', 'np.squeeze', (['recorded_traj'], {}), '(recorded_traj)\n', (50891, 50906), True, 'import numpy as np\n'), ((50908, 50932), 'numpy.squeeze', 'np.squeeze', (['recorded_exp'], {}), '(recorded_exp)\n', (50918, 50932), True, 'import numpy as np\n'), ((50971, 50995), 'numpy.squeeze', 'np.squeeze', (['recorded_vec'], {}), '(recorded_vec)\n', (50981, 50995), True, 'import numpy as np\n'), ((50997, 51021), 'numpy.squeeze', 'np.squeeze', (['backward_vec'], {}), '(backward_vec)\n', (51007, 51021), True, 'import numpy as np\n'), ((51023, 51046), 'numpy.squeeze', 'np.squeeze', (['forward_vec'], {}), '(forward_vec)\n', (51033, 51046), True, 'import numpy as np\n'), ((15897, 15918), 'numpy.full', 'np.full', (['(1,)', 'tt[-1]'], {}), '((1,), tt[-1])\n', (15904, 15918), True, 'import numpy as np\n'), ((43078, 43107), 'numpy.full', 'np.full', (['(1,)', 'self._time[-1]'], {}), '((1,), self._time[-1])\n', (43085, 43107), True, 'import numpy as np\n'), ((45133, 45162), 'numpy.full', 'np.full', (['(1,)', 'self._time[-1]'], {}), '((1,), self._time[-1])\n', (45140, 45162), True, 'import numpy as np\n'), ((47190, 47219), 'numpy.full', 'np.full', (['(1,)', 'self._time[-1]'], {}), '((1,), self._time[-1])\n', (47197, 47219), True, 'import numpy as np\n'), ((55242, 55258), 'numpy.abs', 'np.abs', (['mloc_exp'], {}), '(mloc_exp)\n', (55248, 55258), True, 'import numpy as np\n')] |
import numpy as np
def pack_selector_from_mask(boolarray):
"""
pack all contiguous selectors into slices. Remember that
tiledb multi_index requires INCLUSIVE indices.
"""
if boolarray is None:
return slice(None)
assert type(boolarray) == np.ndarray
assert boolarray.dtype == bool
selector = np.nonzero(boolarray)[0]
return pack_selector_from_indices(selector)
def pack_selector_from_indices(selector):
if len(selector) == 0:
return None
result = []
current = slice(selector[0], selector[0])
for sel in selector[1:]:
if sel == current.stop + 1:
current = slice(current.start, sel)
else:
result.append(current if current.start != current.stop else current.start)
current = slice(sel, sel)
if len(result) == 0 or result[-1] != current:
result.append(current if current.start != current.stop else current.start)
return result
| [
"numpy.nonzero"
] | [((337, 358), 'numpy.nonzero', 'np.nonzero', (['boolarray'], {}), '(boolarray)\n', (347, 358), True, 'import numpy as np\n')] |
# The MIT License (MIT) # Copyright (c) 2014-2017 University of Bristol
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
import unittest
from datetime import datetime, timedelta
from sklearn.datasets import load_iris
import numpy as np
from hyperstream import TimeInterval
from .helpers import *
nan = float('nan')
true_means = [
nan, nan , nan, 0.92, 0.93, 0.57, 0.52, 0.91, 0.81, 0.41, 0.84, 0.53,
0.77, 0.88, 0.95, 0.79, 0.58, 0.84, 0.69, 0.82, 0.63, 0.55, 0.99, 0.9 ,
0.89, 0.65, 0.61, 0.83, 0.87, 0.86, 0.78, 0.75, 0.74, 0.66, 0.84, 0.92,
0.81, 0.9 , 0.99, 0.72, 0.78, 0.84, 0.6 , 0.81, 0.62, 0.83, 0.6 , 0.42,
0.87, 0.6 , 0.8 , 0.89, 0.77, 0.88, 0.72, 0.75, 0.81, 0.42, 0.81, 0.67,
0.87, 0.89, 0.8 , 0.56, 0.67, 0.8 , 0.74, 0.8 , 0.71, 0.66, 0.75, 0.82,
0.73, 0.82, 0.87, 0.98, 0.75, 0.95, 0.71, 0.73, 0.72, 0.55, 0.83, 0.83,
0.33, 0.81, 0.48, 0.77, 0.88, 0.93, 0.78, 0.6 , 0.83, 0.69, 0.76, 0.64,
0.53, 0.98, 0.87, 0.82, 0.63, 0.55, 0.85, 0.81, 0.81, 0.74, 0.78, 0.71,
0.66, 0.82, 0.91, 0.8 , 0.89, 0.98, 0.73, 0.78, 0.84, 0.59, 0.85, 0.61,
0.83, 0.58, 0.42, 0.85, 0.57, 0.79, 0.91, 0.75, 0.88, 0.71, 0.73, 0.8 ,
0.42, 0.82, 0.68, 0.87, 0.88, 0.8 , 0.56, 0.67, 0.8 , 0.75, 0.81, 0.71,
0.67, 0.76, 0.82, 0.73, 0.82, 0.87, 0.98, 0.75, 0.95, 0.71, 0.74, 0.73,
0.56, 0.83, 0.83, 0.33, 0.81, 0.48, 0.77, 0.88, 0.93, 0.78, 0.61, 0.83,
0.69, 0.76, 0.64, 0.53, 0.97, 0.86, 0.82, 0.62, 0.54, 0.85, 0.8 , 0.8 ,
0.74, 0.78, 0.71, 0.66, 0.82, 0.9 , 0.8 , 0.89, 0.98, 0.74, 0.78, 0.85,
0.59, 0.86, 0.61, 0.83, 0.58, 0.42, 0.85, 0.56, 0.79, 0.91, 0.75, 0.88,
0.71, 0.73, 0.8 , 0.42, 0.82, 0.68, 0.87, 0.87, 0.8 , 0.56, 0.68, 0.8 ,
0.75, 0.81, 0.72, 0.68, 0.76, 0.82, 0.73, 0.82, 0.87, 0.98, 0.75, 0.95,
0.71, 0.74, 0.73, 0.56, 0.83, 0.84, 0.33, 0.81, 0.48, 0.77, 0.88, 0.93,
0.78, 0.61, 0.83, 0.69, 0.75, 0.64, 0.53, 0.97, 0.86, 0.81, 0.62, 0.54,
0.85, 0.8 , 0.8 , 0.73, 0.78, 0.71, 0.66, 0.82, 0.9 , 0.8 , 0.89, 0.98,
0.74, 0.78, 0.85, 0.59, 0.86, 0.61, 0.83, 0.58, 0.42, 0.85, 0.56, 0.79,
0.91, 0.75, 0.88, 0.71, 0.73, 0.79, 0.42, 0.82, 0.68, 0.87, 0.87, 0.8 ,
0.56, 0.68, 0.8 , 0.75, 0.81, 0.72, 0.68, 0.76, 0.82, 0.73, 0.82, 0.87,
0.98, 0.75, 0.95, 0.71, 0.74, 0.73, 0.56, 0.82, 0.84, 0.33, 0.81, 0.48,
0.78, 0.88, 0.93, 0.78, 0.61, 0.83, 0.69, 0.75, 0.64, 0.53, 0.97, 0.86,
0.81, 0.62, 0.54, 0.85, 0.8 , 0.8 , 0.73, 0.79, 0.71, 0.66, 0.82, 0.9 ,
0.8 , 0.89, 0.98, 0.74, 0.78, 0.85, 0.59, 0.87, 0.61, 0.83, 0.58, 0.42,
0.85, 0.56, 0.79, 0.91, 0.75, 0.88, 0.71, 0.73, 0.79, 0.42, 0.82, 0.68,
0.87, 0.87, 0.8 , 0.56, 0.68, 0.8 , 0.75, 0.82, 0.72, 0.68, 0.76, 0.82,
0.73, 0.82, 0.87]
class TestAnomalies(unittest.TestCase):
def run(self, result=None):
with resource_manager() as resource:
self.hs = resource
super(TestAnomalies, self).run(result)
def test_data_generators(self):
M = self.hs.channel_manager.memory
T = self.hs.plugins.sklearn.tools
data = load_iris()
epochs = 10
seed = 42
batchsize = 2
data_tool = T.dataset(data, shuffle=True, epochs=epochs, seed=seed)
data_stream = M.get_or_create_stream('dataset')
model = 'Gaussian'
anomaly_detector_tool = T.anomaly_detector(model)
anomaly_detector_stream = M.get_or_create_stream('anomaly_detector')
now = datetime.utcnow()
now = (now - timedelta(hours=1))
before = datetime.utcfromtimestamp(0)
ti = TimeInterval(before, now)
data_tool.execute(sources=[], sink=data_stream, interval=ti)
print("Example of a data stream")
key, value = next(iter(data_stream.window()))
print('[%s]: %s' % (key, value))
mini_batch_tool = T.minibatch(batchsize=batchsize)
mini_batch_stream = M.get_or_create_stream('mini_batch')
mini_batch_tool.execute(sources=[data_stream], sink=mini_batch_stream,
interval=ti)
anomaly_detector_tool.execute(sources=[mini_batch_stream],
sink=anomaly_detector_stream, interval=ti)
probas = []
for key, value in anomaly_detector_stream.window():
probas.append(value['proba'])
# The data is repeated the number of epochs. This makes the mini-batches to
# cycle and contain data from the beginning and end of the dataset. This
# makes possible that the number of scores is not divisible by epochs.
probas = np.array(probas)
print(probas.shape)
means = np.array([np.nanmean(aux) for aux in probas])
np.testing.assert_almost_equal(true_means, means, decimal=2)
print(means.shape)
print("Test probabilities per minibatch (cyclic)")
print(means.round(decimals=2))
| [
"sklearn.datasets.load_iris",
"datetime.datetime.utcfromtimestamp",
"datetime.datetime.utcnow",
"numpy.array",
"hyperstream.TimeInterval",
"numpy.testing.assert_almost_equal",
"numpy.nanmean",
"datetime.timedelta"
] | [((4352, 4363), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (4361, 4363), False, 'from sklearn.datasets import load_iris\n'), ((4735, 4752), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4750, 4752), False, 'from datetime import datetime, timedelta\n'), ((4811, 4839), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['(0)'], {}), '(0)\n', (4836, 4839), False, 'from datetime import datetime, timedelta\n'), ((4853, 4878), 'hyperstream.TimeInterval', 'TimeInterval', (['before', 'now'], {}), '(before, now)\n', (4865, 4878), False, 'from hyperstream import TimeInterval\n'), ((5870, 5886), 'numpy.array', 'np.array', (['probas'], {}), '(probas)\n', (5878, 5886), True, 'import numpy as np\n'), ((5985, 6045), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['true_means', 'means'], {'decimal': '(2)'}), '(true_means, means, decimal=2)\n', (6015, 6045), True, 'import numpy as np\n'), ((4774, 4792), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (4783, 4792), False, 'from datetime import datetime, timedelta\n'), ((5941, 5956), 'numpy.nanmean', 'np.nanmean', (['aux'], {}), '(aux)\n', (5951, 5956), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import itertools
rng = np.random.RandomState(42)
def svdw(m):
n = m.shape[0]
assert m.shape == (n, n)
u, s, vt = np.linalg.svd(m)
w = u @ vt
assert np.allclose(u.T @ u, np.eye(n))
assert np.allclose(w.T @ w, np.eye(n))
assert np.allclose(u @ np.diag(s) @ u.T @ w, m)
return u, s, w
def check_eq(msg, a, b):
diff = np.abs(a - b).max()
assert diff < 1e-5, (msg, diff)
def svdw_jacobian(M, u, s, w):
n = M.shape[0]
assert M.shape == u.shape == w.shape == (n, n)
v = w.T @ u
dsdm = np.empty((n, n*n), dtype=M.dtype)
for i in range(n):
for j in range(n):
for k in range(n):
dsdm[i, j*n+k] = u.T[i, j] * v[k, i]
dwdy = np.empty((n*n, n*n), dtype=M.dtype)
dydm = np.empty_like(dwdy)
dudx = np.empty_like(dwdy)
dxdm = np.empty_like(dwdy)
for i, j, k, l in itertools.product(range(n), range(n), range(n), range(n)):
cij = u.T[i, k] * v[l, j]
cji = u.T[j, k] * v[l, i]
dydm[i*n+j, k*n+l] = 0 if i == j else (cij - cji) / (s[i] + s[j])
dwdy[i*n+j, k*n+l] = u[i, k] * v.T[l, j]
dudx[i*n+j, k*n+l] = 0 if l != j else u[i, k]
dxdm[i*n+j, k*n+l] = 0 if i == j else (
cij * s[j] + cji * s[i]) / (s[j]**2 - s[i]**2)
return dudx @ dxdm, dsdm, dwdy @ dydm
def svdw_jacobian_num(M, u, s, w, eps=1e-4):
n = M.shape[0]
assert M.shape == (n, n)
dudm = np.zeros((n*n, n*n), dtype=M.dtype)
dsdm = np.zeros((n, n*n), dtype=M.dtype)
dwdm = np.zeros((n*n, n*n), dtype=M.dtype)
grad = lambda x, y: (y - x).flatten() / (eps * 2)
for i in range(n):
for j in range(n):
x0 = M[i, j]
M[i, j] = x0 - eps
u1, s1, w1 = svdw(M)
M[i, j] = x0 + eps
u2, s2, w2 = svdw(M)
M[i, j] = x0
p = i*n+j
dudm[:, p] = grad(u1, u2)
dsdm[:, p] = grad(s1, s2)
dwdm[:, p] = grad(w1, w2)
return dudm, dsdm, dwdm
def main():
np.set_printoptions(4, suppress=True)
n = 8
m = rng.normal(size=(n, n))
u, s, w = svdw(m)
print('det(m):', np.linalg.det(m))
print('s:', s)
for gsym, gnum, name in zip(svdw_jacobian(m, u, s, w),
svdw_jacobian_num(m, u, s, w),
['dU/dM', 'dS/dM', 'dW/dM']):
print(f'====== {name}')
if gsym.shape[0] == gsym.shape[1]:
print('grad det:', np.linalg.det(gsym))
print('grad rank:', np.linalg.matrix_rank(gsym))
diff = np.abs(gsym - gnum).mean()
print(diff, diff / np.abs(gnum).mean())
if __name__ == '__main__':
main()
| [
"numpy.abs",
"numpy.eye",
"numpy.linalg.matrix_rank",
"numpy.linalg.det",
"numpy.diag",
"numpy.zeros",
"numpy.empty_like",
"numpy.empty",
"numpy.linalg.svd",
"numpy.random.RandomState",
"numpy.set_printoptions"
] | [((91, 116), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (112, 116), True, 'import numpy as np\n'), ((194, 210), 'numpy.linalg.svd', 'np.linalg.svd', (['m'], {}), '(m)\n', (207, 210), True, 'import numpy as np\n'), ((605, 640), 'numpy.empty', 'np.empty', (['(n, n * n)'], {'dtype': 'M.dtype'}), '((n, n * n), dtype=M.dtype)\n', (613, 640), True, 'import numpy as np\n'), ((785, 824), 'numpy.empty', 'np.empty', (['(n * n, n * n)'], {'dtype': 'M.dtype'}), '((n * n, n * n), dtype=M.dtype)\n', (793, 824), True, 'import numpy as np\n'), ((832, 851), 'numpy.empty_like', 'np.empty_like', (['dwdy'], {}), '(dwdy)\n', (845, 851), True, 'import numpy as np\n'), ((863, 882), 'numpy.empty_like', 'np.empty_like', (['dwdy'], {}), '(dwdy)\n', (876, 882), True, 'import numpy as np\n'), ((894, 913), 'numpy.empty_like', 'np.empty_like', (['dwdy'], {}), '(dwdy)\n', (907, 913), True, 'import numpy as np\n'), ((1495, 1534), 'numpy.zeros', 'np.zeros', (['(n * n, n * n)'], {'dtype': 'M.dtype'}), '((n * n, n * n), dtype=M.dtype)\n', (1503, 1534), True, 'import numpy as np\n'), ((1542, 1577), 'numpy.zeros', 'np.zeros', (['(n, n * n)'], {'dtype': 'M.dtype'}), '((n, n * n), dtype=M.dtype)\n', (1550, 1577), True, 'import numpy as np\n'), ((1587, 1626), 'numpy.zeros', 'np.zeros', (['(n * n, n * n)'], {'dtype': 'M.dtype'}), '((n * n, n * n), dtype=M.dtype)\n', (1595, 1626), True, 'import numpy as np\n'), ((2087, 2124), 'numpy.set_printoptions', 'np.set_printoptions', (['(4)'], {'suppress': '(True)'}), '(4, suppress=True)\n', (2106, 2124), True, 'import numpy as np\n'), ((258, 267), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (264, 267), True, 'import numpy as np\n'), ((301, 310), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (307, 310), True, 'import numpy as np\n'), ((2210, 2226), 'numpy.linalg.det', 'np.linalg.det', (['m'], {}), '(m)\n', (2223, 2226), True, 'import numpy as np\n'), ((420, 433), 'numpy.abs', 'np.abs', (['(a - b)'], {}), '(a - b)\n', (426, 433), True, 'import numpy as np\n'), ((2537, 2556), 'numpy.linalg.det', 'np.linalg.det', (['gsym'], {}), '(gsym)\n', (2550, 2556), True, 'import numpy as np\n'), ((2590, 2617), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['gsym'], {}), '(gsym)\n', (2611, 2617), True, 'import numpy as np\n'), ((2634, 2653), 'numpy.abs', 'np.abs', (['(gsym - gnum)'], {}), '(gsym - gnum)\n', (2640, 2653), True, 'import numpy as np\n'), ((339, 349), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (346, 349), True, 'import numpy as np\n'), ((2688, 2700), 'numpy.abs', 'np.abs', (['gnum'], {}), '(gnum)\n', (2694, 2700), True, 'import numpy as np\n')] |
import numpy as np
class TwoStepDom(object):
'''Class to get the two step dominance matrix and rankings'''
def __init__(self, N_teams, week, sq_weight=0.25, decay_penalty=0.5):
self.w_sq = sq_weight
self.w_l = 1. - sq_weight
self.win_matrix = np.zeros(shape=(N_teams,N_teams))
self.week = week
self.dp = decay_penalty
def _calc_win_matrix(self, teams):
'''Calculate the win matrix for specified week'''
# Loop over the teams to find the wins versus other opponents
for t_index, t in enumerate(teams):
# Loop over each week, retreive MOV and opponent instance
for w, (mov, opponent) in enumerate(zip(t.stats.mov[:self.week], t.stats.schedule[:self.week])):
o_index = int(opponent.teamId) - 1
# Positive MOV is a win, weight older games less using decay penalty
# Oldest game will be weighted by (1.0 - decay penalty). Nominal value is 0.5
if mov > 0:
self.win_matrix[t_index][o_index] += (1-self.dp) + (self.dp*w)/float(self.week)
def _calc_two_step_dom(self, teams):
'''Calculate the two step dominance matrix and save rankings'''
# Square the win matirx, and apply weight
m_sq = np.linalg.matrix_power(self.win_matrix, 2)
m_sq *= self.w_sq
# Weigh the linear dominance matrix
m_lin = self.win_matrix * self.w_l
# Get the 2SD matrix
tsd_matrix = m_sq + m_lin
# Calc the dominance rank by summing rows
for row, t in zip(tsd_matrix, teams):
t.rank.dom = sum(row)
# Normalize avg dom rank to 1
dom_list = [x.rank.dom for x in teams]
avg_dom = float(sum(dom_list))/len(dom_list)
for t in teams:
t.rank.dom /= avg_dom
def get_ranks(self, teams):
'''Get the rankings for each team from two step dominance matrix'''
self._calc_win_matrix(teams)
self._calc_two_step_dom(teams)
| [
"numpy.zeros",
"numpy.linalg.matrix_power"
] | [((276, 310), 'numpy.zeros', 'np.zeros', ([], {'shape': '(N_teams, N_teams)'}), '(shape=(N_teams, N_teams))\n', (284, 310), True, 'import numpy as np\n'), ((1219, 1261), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['self.win_matrix', '(2)'], {}), '(self.win_matrix, 2)\n', (1241, 1261), True, 'import numpy as np\n')] |
import io
from logging import raiseExceptions
from typing import Any
import magic
import numpy as np
import pandas as pd
from icecream import ic
from PIL import Image
import cv2
from .file_management import get_buffer_category, get_buffer_type, get_mime_category
from pathlib import Path
def _open(input, btype=None, options=dict()) -> Any:
"""
convert input to infer, numpy, PIL image, binary, pdf,
"""
output = None
if isinstance(input, io.BytesIO):
ic("Converting io.BytesIO to bytes object")
buffer = input.read()
elif isinstance(input, str):
if Path(input).is_file():
with open(input, "rb") as fh:
buffer = io.BytesIO(fh.read())
buffer = buffer.getvalue()
else:
buffer = input
else:
buffer = input
if btype in ["numpy", "pil"]:
if get_buffer_category(buffer) == "image":
ic("pil/numpy-image")
output = cv2.imdecode(np.fromstring(buffer, np.uint8), cv2.IMREAD_COLOR)
else:
ic("pil/numpy-else")
output = globals()[f"to_{btype}"](buffer)
else:
ic("infere type")
btype = get_buffer_category(buffer)
if btype == "image":
ic("infere type image")
output = to_pil(buffer)
elif btype == "flat_structured_data":
ic("infere type structured data")
output = to_pandas(buffer)
else:
output = buffer
return output
def to_numpy(buffer):
return np.array(Image.open(io.BytesIO(buffer)))
def to_pil(buffer):
data = to_numpy(buffer)
return Image.fromarray(np.uint8(data))
def to_pandas(buffer):
buffer_mime_type = get_buffer_type(buffer)
get_buffer_category = get_mime_category(buffer_mime_type)
output = None
if buffer_mime_type == "text/csv":
output = pd.read_csv(buffer)
elif buffer_mime_type == "json":
output = pd.read_json(buffer)
elif get_buffer_category == "excel":
output = pd.read_json(buffer)
elif get_buffer_category == "web_content":
output = pd.read_html(buffer)
elif buffer_mime_type == "hdf5":
raiseExceptions("Not implemented Yet")
elif buffer_mime_type == "orc":
raiseExceptions("Not implemented Yet")
elif buffer_mime_type == "parquet":
raiseExceptions("Not implemented Yet")
elif buffer_mime_type == "sas":
raiseExceptions("Not implemented Yet")
elif buffer_mime_type == "spss":
raiseExceptions("Not implemented Yet")
elif buffer_mime_type == "pickle":
raiseExceptions("Not implemented Yet")
return output
| [
"numpy.uint8",
"icecream.ic",
"pandas.read_csv",
"pathlib.Path",
"io.BytesIO",
"logging.raiseExceptions",
"pandas.read_html",
"numpy.fromstring",
"pandas.read_json"
] | [((485, 528), 'icecream.ic', 'ic', (['"""Converting io.BytesIO to bytes object"""'], {}), "('Converting io.BytesIO to bytes object')\n", (487, 528), False, 'from icecream import ic\n'), ((1157, 1174), 'icecream.ic', 'ic', (['"""infere type"""'], {}), "('infere type')\n", (1159, 1174), False, 'from icecream import ic\n'), ((1665, 1679), 'numpy.uint8', 'np.uint8', (['data'], {}), '(data)\n', (1673, 1679), True, 'import numpy as np\n'), ((1890, 1909), 'pandas.read_csv', 'pd.read_csv', (['buffer'], {}), '(buffer)\n', (1901, 1909), True, 'import pandas as pd\n'), ((930, 951), 'icecream.ic', 'ic', (['"""pil/numpy-image"""'], {}), "('pil/numpy-image')\n", (932, 951), False, 'from icecream import ic\n'), ((1063, 1083), 'icecream.ic', 'ic', (['"""pil/numpy-else"""'], {}), "('pil/numpy-else')\n", (1065, 1083), False, 'from icecream import ic\n'), ((1260, 1283), 'icecream.ic', 'ic', (['"""infere type image"""'], {}), "('infere type image')\n", (1262, 1283), False, 'from icecream import ic\n'), ((1567, 1585), 'io.BytesIO', 'io.BytesIO', (['buffer'], {}), '(buffer)\n', (1577, 1585), False, 'import io\n'), ((1964, 1984), 'pandas.read_json', 'pd.read_json', (['buffer'], {}), '(buffer)\n', (1976, 1984), True, 'import pandas as pd\n'), ((986, 1017), 'numpy.fromstring', 'np.fromstring', (['buffer', 'np.uint8'], {}), '(buffer, np.uint8)\n', (999, 1017), True, 'import numpy as np\n'), ((1378, 1411), 'icecream.ic', 'ic', (['"""infere type structured data"""'], {}), "('infere type structured data')\n", (1380, 1411), False, 'from icecream import ic\n'), ((2043, 2063), 'pandas.read_json', 'pd.read_json', (['buffer'], {}), '(buffer)\n', (2055, 2063), True, 'import pandas as pd\n'), ((603, 614), 'pathlib.Path', 'Path', (['input'], {}), '(input)\n', (607, 614), False, 'from pathlib import Path\n'), ((2128, 2148), 'pandas.read_html', 'pd.read_html', (['buffer'], {}), '(buffer)\n', (2140, 2148), True, 'import pandas as pd\n'), ((2194, 2232), 'logging.raiseExceptions', 'raiseExceptions', (['"""Not implemented Yet"""'], {}), "('Not implemented Yet')\n", (2209, 2232), False, 'from logging import raiseExceptions\n'), ((2277, 2315), 'logging.raiseExceptions', 'raiseExceptions', (['"""Not implemented Yet"""'], {}), "('Not implemented Yet')\n", (2292, 2315), False, 'from logging import raiseExceptions\n'), ((2364, 2402), 'logging.raiseExceptions', 'raiseExceptions', (['"""Not implemented Yet"""'], {}), "('Not implemented Yet')\n", (2379, 2402), False, 'from logging import raiseExceptions\n'), ((2447, 2485), 'logging.raiseExceptions', 'raiseExceptions', (['"""Not implemented Yet"""'], {}), "('Not implemented Yet')\n", (2462, 2485), False, 'from logging import raiseExceptions\n'), ((2531, 2569), 'logging.raiseExceptions', 'raiseExceptions', (['"""Not implemented Yet"""'], {}), "('Not implemented Yet')\n", (2546, 2569), False, 'from logging import raiseExceptions\n'), ((2617, 2655), 'logging.raiseExceptions', 'raiseExceptions', (['"""Not implemented Yet"""'], {}), "('Not implemented Yet')\n", (2632, 2655), False, 'from logging import raiseExceptions\n')] |
import numpy as np
from topocalc.horizon import horizon
from smrf.envphys.constants import (GRAVITY, IR_MAX, IR_MIN, MOL_AIR,
SEA_LEVEL, STD_AIRTMP, STD_LAPSE,
VISIBLE_MAX, VISIBLE_MIN)
from smrf.envphys.solar.irradiance import direct_solar_irradiance
from smrf.envphys.solar.twostream import twostream
from smrf.envphys.thermal.topotherm import hysat
def check_wavelengths(wavelength_range):
if wavelength_range[0] >= VISIBLE_MIN and \
wavelength_range[1] <= VISIBLE_MAX:
wavelength_flag = 'vis'
elif wavelength_range[0] >= IR_MIN and wavelength_range[1] <= IR_MAX:
wavelength_flag = 'ir'
else:
raise ValueError(
'stoporad wavelength range not within visible or IR wavelengths')
return wavelength_flag
def stoporad(date_time, topo, cosz, azimuth, illum_ang, albedo_surface,
wavelength_range, tau_elevation=100, tau=0.2, omega=0.85,
scattering_factor=0.3):
"""[summary]
Args:
date_time ([type]): [description]
topo ([type]): [description]
cosz ([type]): [description]
azimuth ([type]): [description]
illum_ang ([type]): [description]
albedo_surface ([type]): [description]
wavelength_range ([type]): [description]
tau_elevation (int, optional): [description]. Defaults to 100.
tau (float, optional): [description]. Defaults to 0.2.
omega (float, optional): [description]. Defaults to 0.85.
scattering_factor (float, optional): [description]. Defaults to 0.3.
Returns:
[type]: [description]
"""
wavelength_flag = check_wavelengths(wavelength_range) # noqa
# check cosz if sun is down
if cosz < 0:
return np.zeros_like(topo.dem), np.zeros_like(topo.dem)
else:
solar_irradiance = direct_solar_irradiance(
date_time, w=wavelength_range)
# Run horizon to get sun-below-horizon mask
horizon_angles = horizon(azimuth, topo.dem, topo.dx)
thresh = np.tan(np.pi / 2 - np.arccos(cosz))
no_sun_mask = np.tan(np.abs(horizon_angles)) > thresh
# Run shade to get cosine local illumination angle
# mask by horizon mask using cosz=0 where the sun is not visible
illum_ang = np.copy(illum_ang)
illum_ang[no_sun_mask] = 0
R0 = np.mean(albedo_surface)
# Run elevrad to get beam & diffuse then toporad
evrad = Elevrad(
topo.dem,
solar_irradiance,
cosz,
tau_elevation=tau_elevation,
tau=tau,
omega=omega,
scattering_factor=scattering_factor,
surface_albedo=R0)
trad_beam, trad_diff = toporad(
evrad.beam,
evrad.diffuse,
illum_ang,
topo.sky_view_factor,
topo.terrain_config_factor,
cosz,
surface_albedo=albedo_surface)
return trad_beam, trad_diff
def toporad(beam, diffuse, illum_angle, sky_view_factor, terrain_config_factor,
cosz, surface_albedo=0.0):
"""Topographically-corrected solar radiation. Calculates the topographic
distribution of solar radiation at a single time, using input beam and
diffuse radiation calculates supplied by elevrad.
Args:
beam (np.array): beam radiation
diffuse (np.array): diffuse radiation
illum_angle (np.array): local illumination angles
sky_view_factor (np.array): sky view factor
terrain_config_factor (np.array): terrain configuraiton factor
cosz (float): cosine of the zenith
surface_albedo (float/np.array, optional): surface albedo.
Defaults to 0.0.
Returns:
tuple: beam and diffuse radiation corrected for terrain
"""
# adjust diffuse radiation accounting for sky view factor
drad = diffuse * sky_view_factor
# add reflection from adjacent terrain
drad = drad + (diffuse * (1 - sky_view_factor) +
beam * cosz) * terrain_config_factor * surface_albedo
# global radiation is diffuse + incoming_beam * cosine of local
# illumination * angle
rad = drad + beam * illum_angle
return rad, drad
class Elevrad():
"""Beam and diffuse radiation from elevation.
elevrad is essentially the spatial or grid version of the twostream
command.
Args:
elevation (np.array): DEM elevations in meters
solar_irradiance (float): from direct_solar_irradiance
cosz (float): cosine of zenith angle
tau_elevation (float, optional): Elevation [m] of optical depth
measurement. Defaults to 100.
tau (float, optional): optical depth at tau_elevation. Defaults to 0.2.
omega (float, optional): Single scattering albedo. Defaults to 0.85.
scattering_factor (float, optional): Scattering asymmetry parameter.
Defaults to 0.3.
surface_albedo (float, optional): Mean surface albedo. Defaults to 0.5.
"""
def __init__(self, elevation, solar_irradiance, cosz, **kwargs):
"""Initialize then run elevrad
Args:
elevation (np.array): DEM elevation in meters
solar_irradiance (float): from direct_solar_irradiance
cosz (float): cosine of zenith angle
kwargs: tau_elevation, tau, omega, scattering_factor,
surface_albedo
Returns:
radiation: dict with beam and diffuse radiation
"""
# defaults
self.tau_elevation = 100.0
self.tau = 0.2,
self.omega = 0.85
self.scattering_factor = 0.3
self.surface_albedo = 0.5
# set user specified values
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
self.elevation = elevation
self.solar_irradiance = solar_irradiance
self.cosz = cosz
self.calculate()
def calculate(self):
"""Perform the calculations
"""
# reference pressure (at reference elevation, in km)
reference_pressure = hysat(SEA_LEVEL, STD_AIRTMP, STD_LAPSE,
self.tau_elevation / 1000, GRAVITY, MOL_AIR)
# Convert each elevation in look-up table to pressure, then to optical
# depth over the modeling domain
pressure = hysat(SEA_LEVEL, STD_AIRTMP, STD_LAPSE,
self.elevation / 1000, GRAVITY, MOL_AIR)
tau_domain = self.tau * pressure / reference_pressure
# twostream over the optical depth of the domain
self.twostream = twostream(
self.cosz,
self.solar_irradiance,
tau=tau_domain,
omega=self.omega,
g=self.scattering_factor,
R0=self.surface_albedo)
# calculate beam and diffuse
self.beam = self.solar_irradiance * \
self.twostream['direct_transmittance']
self.diffuse = self.solar_irradiance * self.cosz * \
(self.twostream['transmittance'] -
self.twostream['direct_transmittance'])
| [
"numpy.copy",
"numpy.mean",
"numpy.abs",
"numpy.arccos",
"numpy.zeros_like",
"topocalc.horizon.horizon",
"smrf.envphys.thermal.topotherm.hysat",
"smrf.envphys.solar.twostream.twostream",
"smrf.envphys.solar.irradiance.direct_solar_irradiance"
] | [((1896, 1950), 'smrf.envphys.solar.irradiance.direct_solar_irradiance', 'direct_solar_irradiance', (['date_time'], {'w': 'wavelength_range'}), '(date_time, w=wavelength_range)\n', (1919, 1950), False, 'from smrf.envphys.solar.irradiance import direct_solar_irradiance\n'), ((2042, 2077), 'topocalc.horizon.horizon', 'horizon', (['azimuth', 'topo.dem', 'topo.dx'], {}), '(azimuth, topo.dem, topo.dx)\n', (2049, 2077), False, 'from topocalc.horizon import horizon\n'), ((2346, 2364), 'numpy.copy', 'np.copy', (['illum_ang'], {}), '(illum_ang)\n', (2353, 2364), True, 'import numpy as np\n'), ((2414, 2437), 'numpy.mean', 'np.mean', (['albedo_surface'], {}), '(albedo_surface)\n', (2421, 2437), True, 'import numpy as np\n'), ((6270, 6358), 'smrf.envphys.thermal.topotherm.hysat', 'hysat', (['SEA_LEVEL', 'STD_AIRTMP', 'STD_LAPSE', '(self.tau_elevation / 1000)', 'GRAVITY', 'MOL_AIR'], {}), '(SEA_LEVEL, STD_AIRTMP, STD_LAPSE, self.tau_elevation / 1000, GRAVITY,\n MOL_AIR)\n', (6275, 6358), False, 'from smrf.envphys.thermal.topotherm import hysat\n'), ((6530, 6615), 'smrf.envphys.thermal.topotherm.hysat', 'hysat', (['SEA_LEVEL', 'STD_AIRTMP', 'STD_LAPSE', '(self.elevation / 1000)', 'GRAVITY', 'MOL_AIR'], {}), '(SEA_LEVEL, STD_AIRTMP, STD_LAPSE, self.elevation / 1000, GRAVITY, MOL_AIR\n )\n', (6535, 6615), False, 'from smrf.envphys.thermal.topotherm import hysat\n'), ((6781, 6913), 'smrf.envphys.solar.twostream.twostream', 'twostream', (['self.cosz', 'self.solar_irradiance'], {'tau': 'tau_domain', 'omega': 'self.omega', 'g': 'self.scattering_factor', 'R0': 'self.surface_albedo'}), '(self.cosz, self.solar_irradiance, tau=tau_domain, omega=self.\n omega, g=self.scattering_factor, R0=self.surface_albedo)\n', (6790, 6913), False, 'from smrf.envphys.solar.twostream import twostream\n'), ((1809, 1832), 'numpy.zeros_like', 'np.zeros_like', (['topo.dem'], {}), '(topo.dem)\n', (1822, 1832), True, 'import numpy as np\n'), ((1834, 1857), 'numpy.zeros_like', 'np.zeros_like', (['topo.dem'], {}), '(topo.dem)\n', (1847, 1857), True, 'import numpy as np\n'), ((2114, 2129), 'numpy.arccos', 'np.arccos', (['cosz'], {}), '(cosz)\n', (2123, 2129), True, 'import numpy as np\n'), ((2160, 2182), 'numpy.abs', 'np.abs', (['horizon_angles'], {}), '(horizon_angles)\n', (2166, 2182), True, 'import numpy as np\n')] |
# Visualization function
import numpy as np
import matplotlib.pyplot as plt
from math import ceil
from PIL import Image
from scipy.ndimage.filters import gaussian_filter
def img_combine(img, ncols=5, size=1, path=False):
"""
Draw the images with array
img: image array to plot - size = n x im_w x im_h x 3
"""
nimg= img.shape[0]
nrows=int(ceil(nimg/ncols))
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, sharex=True, sharey=True, figsize=(ncols*size,nrows*size))
if nrows==0:
return
elif ncols == 1:
for r, ax in zip(np.arange(nrows), axes):
nth=r
if nth < nimg:
ax.imshow(img[nth])
ax.set_axis_off()
elif nrows==1:
for c, ax in zip(np.arange(ncols), axes):
nth=c
if nth < nimg:
ax.imshow(img[nth])
ax.set_axis_off()
else:
for r, row in zip(np.arange(nrows), axes):
for c, ax in zip(np.arange(ncols), row):
nth=r*ncols+c
if nth < nimg:
ax.imshow(img[nth])
ax.set_axis_off()
if path:
plt.tight_layout()
plt.savefig(path, dpi = 300)
plt.show()
def get_image_for_paper(original_image_object, prediction_map, IHC_map = None,
overlay_alpha = 0.6, sigma_filter = 128,
mix = False):
"""
Get paper used images (raw, overlay_only, raw+overlay, IHC responding region)
Args:
- original_image_object: PIL image obejct
- prediction_map: Array of prediction
- IHC_map: PIL object of IHC
- overlap_alpha: control overlay color (0. - 1.0)
- sigma_filter: Use a Gaussian filter to smooth the prediction map (prevent grid-like looking)
- mix: True/False, True: return combined map
Returns:
Tuple of PIL images
- (raw, overlay, raw+overlay, IHC)
"""
# Prediction map filtering
pred_smooth = gaussian_filter(prediction_map, sigma = sigma_filter)
# Create a overlap map
overlay = np.zeros((prediction_map.shape + (4,))) # (h,w) -> (h,w,4)
overlay[:, :, [0,1]] = 255 # RGB, [0,1] = Yellow
overlay[:, :, -1] = (pred_smooth * 255 * overlay_alpha)
overlay = overlay.astype('uint8')
overlay = Image.fromarray(overlay)
# Render overlay to original image
render = original_image_object.copy()
render.paste(im = overlay, box = (0,0), mask = overlay)
if not mix:
return (original_image_object, overlay, render, IHC_map)
else:
"""
raw | overlay
---------------------
raw+overlay | IHC
"""
sz = tuple([int(i/4) for i in original_image_object.size])
raw_arr = np.array(original_image_object.resize(sz)) # RGBA
overlay = np.array(overlay.resize(sz)) #RGBA
render = np.array(render.resize(sz)) # RGBA
IHC_map = np.array(IHC_map.resize(sz)) if IHC_map is not None else np.zeros((sz + (4,)))
r1 = np.hstack((raw_arr, overlay))
r2 = np.hstack((render, IHC_map))
mixed = np.vstack((r1, r2))
return Image.fromarray(mixed.astype('uint8')) | [
"PIL.Image.fromarray",
"math.ceil",
"matplotlib.pyplot.savefig",
"scipy.ndimage.filters.gaussian_filter",
"numpy.hstack",
"numpy.zeros",
"numpy.vstack",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((399, 506), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': 'nrows', 'ncols': 'ncols', 'sharex': '(True)', 'sharey': '(True)', 'figsize': '(ncols * size, nrows * size)'}), '(nrows=nrows, ncols=ncols, sharex=True, sharey=True, figsize=(\n ncols * size, nrows * size))\n', (411, 506), True, 'import matplotlib.pyplot as plt\n'), ((1226, 1236), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1234, 1236), True, 'import matplotlib.pyplot as plt\n'), ((2028, 2079), 'scipy.ndimage.filters.gaussian_filter', 'gaussian_filter', (['prediction_map'], {'sigma': 'sigma_filter'}), '(prediction_map, sigma=sigma_filter)\n', (2043, 2079), False, 'from scipy.ndimage.filters import gaussian_filter\n'), ((2128, 2165), 'numpy.zeros', 'np.zeros', (['(prediction_map.shape + (4,))'], {}), '(prediction_map.shape + (4,))\n', (2136, 2165), True, 'import numpy as np\n'), ((2352, 2376), 'PIL.Image.fromarray', 'Image.fromarray', (['overlay'], {}), '(overlay)\n', (2367, 2376), False, 'from PIL import Image\n'), ((365, 383), 'math.ceil', 'ceil', (['(nimg / ncols)'], {}), '(nimg / ncols)\n', (369, 383), False, 'from math import ceil\n'), ((1166, 1184), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1182, 1184), True, 'import matplotlib.pyplot as plt\n'), ((1193, 1219), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {'dpi': '(300)'}), '(path, dpi=300)\n', (1204, 1219), True, 'import matplotlib.pyplot as plt\n'), ((3084, 3113), 'numpy.hstack', 'np.hstack', (['(raw_arr, overlay)'], {}), '((raw_arr, overlay))\n', (3093, 3113), True, 'import numpy as np\n'), ((3127, 3155), 'numpy.hstack', 'np.hstack', (['(render, IHC_map)'], {}), '((render, IHC_map))\n', (3136, 3155), True, 'import numpy as np\n'), ((3181, 3200), 'numpy.vstack', 'np.vstack', (['(r1, r2)'], {}), '((r1, r2))\n', (3190, 3200), True, 'import numpy as np\n'), ((3040, 3059), 'numpy.zeros', 'np.zeros', (['(sz + (4,))'], {}), '(sz + (4,))\n', (3048, 3059), True, 'import numpy as np\n'), ((575, 591), 'numpy.arange', 'np.arange', (['nrows'], {}), '(nrows)\n', (584, 591), True, 'import numpy as np\n'), ((755, 771), 'numpy.arange', 'np.arange', (['ncols'], {}), '(ncols)\n', (764, 771), True, 'import numpy as np\n'), ((927, 943), 'numpy.arange', 'np.arange', (['nrows'], {}), '(nrows)\n', (936, 943), True, 'import numpy as np\n'), ((981, 997), 'numpy.arange', 'np.arange', (['ncols'], {}), '(ncols)\n', (990, 997), True, 'import numpy as np\n')] |
'''
May 2020 by <NAME>
<EMAIL>
https://www.github.com/sebbarb/
'''
import sys
sys.path.append('../lib/')
import numpy as np
import pandas as pd
from lifelines import CoxPHFitter
from utils import *
import feather
from hyperparameters import Hyperparameters
from pdb import set_trace as bp
def main():
# Load data
print('Load data...')
hp = Hyperparameters()
data = np.load(hp.data_pp_dir + 'data_arrays_' + hp.gender + '.npz')
print('Use all data for model fitting...')
x = data['x']
time = data['time']
event = data['event']
cols_list = load_obj(hp.data_pp_dir + 'cols_list.pkl')
df = pd.DataFrame(x, columns=cols_list)
df['TIME'] = time
df['EVENT'] = event
###################################################################
print('Fitting all data...')
cph = CoxPHFitter()
cph.fit(df, duration_col='TIME', event_col='EVENT', show_progress=True, step_size=0.5)
cph.print_summary()
print('Saving...')
df_summary = cph.summary
df_summary['PREDICTOR'] = cols_list
df_summary.to_csv(hp.results_dir + 'hr_' + hp.gender + '.csv', index=False)
###################################################################
print('Test on each fold (train on swapped)...')
for fold in range(hp.num_folds):
for swap in range(2):
print('Fold: {} Swap: {}'.format(fold, swap))
idx = (data['fold'][:, fold] == (1-swap))
x = data['x'][idx]
time = data['time'][idx]
event = data['event'][idx]
df = pd.DataFrame(x, columns=cols_list)
df['TIME'] = time
df['EVENT'] = event
print('Fitting all data...')
cph = CoxPHFitter()
cph.fit(df, duration_col='TIME', event_col='EVENT', show_progress=True, step_size=0.5)
print('done')
idx = (data['fold'][:, fold] == swap)
x = data['x'][idx]
df_cox = pd.DataFrame({'LPH': np.dot(x-cph._norm_mean.values, cph.params_)})
print('Saving log proportional hazards for fold...')
df_cox.to_feather(hp.results_dir + 'df_cox_' + hp.gender + '_fold_' + str(fold) + '_' + str(swap) + '.feather')
if __name__ == '__main__':
main()
| [
"hyperparameters.Hyperparameters",
"lifelines.CoxPHFitter",
"numpy.dot",
"pandas.DataFrame",
"numpy.load",
"sys.path.append"
] | [((86, 112), 'sys.path.append', 'sys.path.append', (['"""../lib/"""'], {}), "('../lib/')\n", (101, 112), False, 'import sys\n'), ((379, 396), 'hyperparameters.Hyperparameters', 'Hyperparameters', ([], {}), '()\n', (394, 396), False, 'from hyperparameters import Hyperparameters\n'), ((409, 470), 'numpy.load', 'np.load', (["(hp.data_pp_dir + 'data_arrays_' + hp.gender + '.npz')"], {}), "(hp.data_pp_dir + 'data_arrays_' + hp.gender + '.npz')\n", (416, 470), True, 'import numpy as np\n'), ((678, 712), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {'columns': 'cols_list'}), '(x, columns=cols_list)\n', (690, 712), True, 'import pandas as pd\n'), ((887, 900), 'lifelines.CoxPHFitter', 'CoxPHFitter', ([], {}), '()\n', (898, 900), False, 'from lifelines import CoxPHFitter\n'), ((1650, 1684), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {'columns': 'cols_list'}), '(x, columns=cols_list)\n', (1662, 1684), True, 'import pandas as pd\n'), ((1824, 1837), 'lifelines.CoxPHFitter', 'CoxPHFitter', ([], {}), '()\n', (1835, 1837), False, 'from lifelines import CoxPHFitter\n'), ((2104, 2150), 'numpy.dot', 'np.dot', (['(x - cph._norm_mean.values)', 'cph.params_'], {}), '(x - cph._norm_mean.values, cph.params_)\n', (2110, 2150), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from typing import Iterable, Union
import numpy as np
import pytrol.util.graphprocessor as gp
class Network:
def __init__(self, graph: np.ndarray, edges_to_vertices: np.ndarray,
edges_lgts: np.ndarray, edge_activations,
vertices: dict, edges: dict, locations: np.ndarray):
r"""The network, i.e. the graph, to patrol.
Args:
graph (np.ndarray):
edges_to_vertices (np.ndarray):
edges_lgts (np.ndarray):
edge_activations:
vertices (dict):
edges (dict):
locations (np.ndarray):
"""
self.graph = graph
self.edges_to_vertices = edges_to_vertices
self.edges_lgts = edges_lgts
self.max_units_of_edge = self.edges_lgts.max()
self.vertices = vertices
self.edges = edges
self.locations = locations
self.edge_activations = np.array(edge_activations, dtype=np.int16)
# Neighbours, distances and paths
self.ngbrs = gp.build_tc_neighbours(self.graph)
self.v_dists, self.v_paths = gp.fw_distances(graph, edges_lgts)
self.paths = gp.v_to_v_tc_paths(graph, edges_lgts,
edges_to_vertices, self.v_paths)
self.min_dist, self.max_dist = gp.min_and_max_dists(self.v_dists)
def target(self, pos: Union[int, Iterable], e: int = -1) -> int:
r"""The heading target.
Args:
pos (int | Iterable): the current position from which the target is
required, can be a node id (int) or a position vector (3D
vector)
e (int): the current edge
"""
if len(pos) == 3:
s = pos[0]
e = pos[1]
else:
s = pos
if e < 0:
raise ValueError("Value -1 forbidden. Edge's id must be higher "
"than -1 to determine a target")
return gp.target(s, e, self.edges_to_vertices)
def edge(self, s: Union[Iterable, int], t: int = -1) -> int:
r"""The edge corresponding to `s` if `s` is a position vecteur,
or `{s, t}` if `s` and `t` are node ids.
Args:
s (Iterable | int): the position vector or source node
t: the target node if `s` is a node id
"""
if t != -1:
return gp.edge(s, t, self.graph)
else:
return self.edge(s[0], s[1])
""""
def dist(self, pos1: tuple, pos2: tuple) -> int:
if pos1[1] == -1:
pos1 = (pos1[0], self.graph[pos1[0]][self.graph[pos1[0]] > -1][0],
pos1[2])
if pos2[1] == -1:
pos2 = (pos2[0], self.graph[pos2[0]][self.graph[pos2[0]] > -1][0],
pos2[2])
return self.dists[pos1[0]][pos1[1]][pos1[2]][pos2[0]][pos2[1]][pos2[2]]
"""
def v_dist(self, pos1: Iterable, pos2: Iterable) -> int:
r"""Distance between `pos1` and `pos2`.
Args:
pos1:
pos2:
"""
return self.v_dists[pos1[0]][pos2[0]]
def eucl_dist(self, pos1: Iterable, pos2: Iterable) -> float:
r"""Euclidean distance between `pos1` and `pos2`.
Args:
pos1:
pos2:
"""
vec1 = np.array([self.locations[pos1[0]][0]
- self.locations[self.target(pos1)][0],
self.locations[pos1[0]][1]
- self.locations[self.target(pos1)][1],
self.locations[pos1[0]][2]
- self.locations[self.target(pos1)][2]]) \
if pos1[1] > -1 \
else np.zeros(3)
vec2 = np.array([self.locations[pos2[0]][0]
- self.locations[self.target(pos2)][0],
self.locations[pos2[0]][1]
- self.locations[self.target(pos2)][1],
self.locations[pos2[0]][2]
- self.locations[self.target(pos2)][2]]) \
if pos2[1] > -1 \
else np.zeros(3)
unit1 = pos1[2] if pos1[2] != -1 else 0
unit2 = pos2[2] if pos2[2] != -1 else 0
coords1 = self.locations[pos1[0]] \
+ (unit1 / self.edges_lgts[pos1[1]]) * vec1
coords2 = self.locations[pos2[0]] \
+ (unit2 / self.edges_lgts[pos2[1]]) * vec2
return np.linalg.norm(coords1 - coords2)
def path(self, pos1: Iterable, pos2: Iterable) -> list:
r"""Shortest path between `pos1` and `pos2`.
Args:
pos1:
pos2:
"""
if pos1[1] == -1 and pos1[2] != 0 \
or pos2[1] == -1 and pos2[2] != 0:
raise ValueError(
"A vector of the the 3D space of positions with an edge "
"coordinate (2nd coordinate) valued to -1 cannot have a unit "
"coordinate non equal to 0.")
if pos1[1] == -1:
pos1 = (pos1[0], self.graph[pos1[0]][self.graph[pos1[0]] > -1][0],
pos1[2])
else:
if pos1[1] not in self.graph[pos1[0]]:
raise ValueError("A vector of the the 3D space of positions "
"cannot have an edge non connected to the "
"vertex of its first coordinate.")
if pos2[1] == -1:
pos2 = (pos2[0], self.graph[pos2[0]][self.graph[pos2[0]] > -1][
0], pos2[2])
else:
if pos2[1] not in self.graph[pos2[0]]:
raise ValueError("A vector of the the 3D space of positions "
"cannot have an edge non connected to the "
"vertex of its first coordinate.")
return self.paths[pos1[0]][pos2[0]]
def neighbours(self, p: Iterable) -> list:
r"""The neighbours of `p`
Args:
p: the position as a 3D vector
Returns:
The list of the neighbours of `p`.
"""
return self.ngbrs[p[0]]
| [
"pytrol.util.graphprocessor.v_to_v_tc_paths",
"pytrol.util.graphprocessor.edge",
"pytrol.util.graphprocessor.build_tc_neighbours",
"pytrol.util.graphprocessor.target",
"numpy.array",
"pytrol.util.graphprocessor.fw_distances",
"pytrol.util.graphprocessor.min_and_max_dists",
"numpy.zeros",
"numpy.lina... | [((956, 998), 'numpy.array', 'np.array', (['edge_activations'], {'dtype': 'np.int16'}), '(edge_activations, dtype=np.int16)\n', (964, 998), True, 'import numpy as np\n'), ((1064, 1098), 'pytrol.util.graphprocessor.build_tc_neighbours', 'gp.build_tc_neighbours', (['self.graph'], {}), '(self.graph)\n', (1086, 1098), True, 'import pytrol.util.graphprocessor as gp\n'), ((1137, 1171), 'pytrol.util.graphprocessor.fw_distances', 'gp.fw_distances', (['graph', 'edges_lgts'], {}), '(graph, edges_lgts)\n', (1152, 1171), True, 'import pytrol.util.graphprocessor as gp\n'), ((1194, 1264), 'pytrol.util.graphprocessor.v_to_v_tc_paths', 'gp.v_to_v_tc_paths', (['graph', 'edges_lgts', 'edges_to_vertices', 'self.v_paths'], {}), '(graph, edges_lgts, edges_to_vertices, self.v_paths)\n', (1212, 1264), True, 'import pytrol.util.graphprocessor as gp\n'), ((1347, 1381), 'pytrol.util.graphprocessor.min_and_max_dists', 'gp.min_and_max_dists', (['self.v_dists'], {}), '(self.v_dists)\n', (1367, 1381), True, 'import pytrol.util.graphprocessor as gp\n'), ((2009, 2048), 'pytrol.util.graphprocessor.target', 'gp.target', (['s', 'e', 'self.edges_to_vertices'], {}), '(s, e, self.edges_to_vertices)\n', (2018, 2048), True, 'import pytrol.util.graphprocessor as gp\n'), ((4478, 4511), 'numpy.linalg.norm', 'np.linalg.norm', (['(coords1 - coords2)'], {}), '(coords1 - coords2)\n', (4492, 4511), True, 'import numpy as np\n'), ((2421, 2446), 'pytrol.util.graphprocessor.edge', 'gp.edge', (['s', 't', 'self.graph'], {}), '(s, t, self.graph)\n', (2428, 2446), True, 'import pytrol.util.graphprocessor as gp\n'), ((3731, 3742), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3739, 3742), True, 'import numpy as np\n'), ((4140, 4151), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4148, 4151), True, 'import numpy as np\n')] |
from os.path import join
import cv2
import numpy as np
from sklearn.utils import shuffle
import config as cfg
import random
def trans_image(image, steer, trans_range):
# Translation
tr_x = trans_range * np.random.uniform() - trans_range / 2
steer_ang = steer + tr_x / trans_range * 0.4
tr_y = 40 * np.random.uniform() - 40 / 2
Trans_M = np.float32([[1, 0, tr_x], [0, 1, tr_y]])
image_tr = cv2.warpAffine(image, Trans_M, (320, 160))
return image_tr, steer_ang
def batch_data_gen(data, data_dir='data'):
num_samples = len(data)
while 1:
imgs = np.zeros((cfg.batch_size, cfg.h, cfg.w, 3), dtype=np.float32)
steer_val = np.zeros((cfg.batch_size,), dtype=np.float32)
shuffled_data = shuffle(data)
for offset in range(0, num_samples, cfg.batch_size):
batch_samples = shuffled_data[offset:offset + cfg.batch_size]
for batch_sample in range(len(batch_samples)):
center_img, left_img, right_img, steering, throttle, brake, speed = batch_samples[batch_sample]
steering = np.float32(steering)
# randomly select one of the three images (left, center, right)
img_choice = random.choice(['left', 'center', 'right'])
if 'left' == img_choice:
img = cv2.imread(join(data_dir, left_img.strip()))
steering += cfg.steering_corr
elif 'right' == img_choice:
img = cv2.imread(join(data_dir, right_img.strip()))
steering -= cfg.steering_corr
else: # center
img = cv2.imread(join(data_dir, center_img.strip()))
# horizontal and vertical shifts
img, steering = trans_image(img, steering, 100)
img_cropped = img[cfg.crop_height, :, :]
img_resized = cv2.resize(img_cropped, dsize=(cfg.w, cfg.h))
# randomly mirror the images
if True == random.choice([True, False]):
img_resized = img_resized[:, ::-1, :]
steering *= -1.
imgs[batch_sample] = img_resized
steer_val[batch_sample] = steering
yield imgs, steer_val
| [
"cv2.warpAffine",
"random.choice",
"sklearn.utils.shuffle",
"numpy.zeros",
"numpy.random.uniform",
"cv2.resize",
"numpy.float32"
] | [((359, 399), 'numpy.float32', 'np.float32', (['[[1, 0, tr_x], [0, 1, tr_y]]'], {}), '([[1, 0, tr_x], [0, 1, tr_y]])\n', (369, 399), True, 'import numpy as np\n'), ((415, 457), 'cv2.warpAffine', 'cv2.warpAffine', (['image', 'Trans_M', '(320, 160)'], {}), '(image, Trans_M, (320, 160))\n', (429, 457), False, 'import cv2\n'), ((589, 650), 'numpy.zeros', 'np.zeros', (['(cfg.batch_size, cfg.h, cfg.w, 3)'], {'dtype': 'np.float32'}), '((cfg.batch_size, cfg.h, cfg.w, 3), dtype=np.float32)\n', (597, 650), True, 'import numpy as np\n'), ((671, 716), 'numpy.zeros', 'np.zeros', (['(cfg.batch_size,)'], {'dtype': 'np.float32'}), '((cfg.batch_size,), dtype=np.float32)\n', (679, 716), True, 'import numpy as np\n'), ((741, 754), 'sklearn.utils.shuffle', 'shuffle', (['data'], {}), '(data)\n', (748, 754), False, 'from sklearn.utils import shuffle\n'), ((213, 232), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (230, 232), True, 'import numpy as np\n'), ((316, 335), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (333, 335), True, 'import numpy as np\n'), ((1088, 1108), 'numpy.float32', 'np.float32', (['steering'], {}), '(steering)\n', (1098, 1108), True, 'import numpy as np\n'), ((1219, 1261), 'random.choice', 'random.choice', (["['left', 'center', 'right']"], {}), "(['left', 'center', 'right'])\n", (1232, 1261), False, 'import random\n'), ((1899, 1944), 'cv2.resize', 'cv2.resize', (['img_cropped'], {'dsize': '(cfg.w, cfg.h)'}), '(img_cropped, dsize=(cfg.w, cfg.h))\n', (1909, 1944), False, 'import cv2\n'), ((2018, 2046), 'random.choice', 'random.choice', (['[True, False]'], {}), '([True, False])\n', (2031, 2046), False, 'import random\n')] |
import numpy
def predict_salary(payment_from, payment_to):
if not payment_from and not payment_to:
return None
elif payment_from and payment_to:
return numpy.mean([payment_from, payment_to])
elif payment_from:
return payment_from * 1.2
else:
return payment_to * 0.8
| [
"numpy.mean"
] | [((178, 216), 'numpy.mean', 'numpy.mean', (['[payment_from, payment_to]'], {}), '([payment_from, payment_to])\n', (188, 216), False, 'import numpy\n')] |
#!/usr/bin/env python
import roslib; roslib.load_manifest('numpy_eigen'); roslib.load_manifest('rostest');
import numpy_eigen
import numpy_eigen.test as npe
import numpy
import sys
# http://docs.python.org/library/unittest.html#test-cases
import unittest
class TestEigen(unittest.TestCase):
def assertMatrixClose(self,numpyM,eigenM,testType):
self.assertEqual(numpyM.size,eigenM.size)
if eigenM.ndim == 1:
# The eigen conversion code will take a 1xN or Nx1 and turn
# it into a single dimension matrix.
if numpyM.ndim == 1:
# The original matrix was 1d...compare the 1d types.
self.assertEqual(numpyM.shape[0],eigenM.shape[0], testType)
self.assertTrue(numpy.max(numpy.abs(numpyM - eigenM)) < 1e-10, testType)
elif numpyM.ndim == 2:
# The original matrix was 2d...compare the 1d dimension
# with the eigen matrix
if numpyM.shape[0] == 1:
# Row vector
self.assertEqual(numpyM.shape[1],eigenM.shape[0], testType)
if eigenM.shape[0] > 0:
self.assertTrue(numpy.max(numpy.abs(numpyM[0,:] - eigenM)) < 1e-10, testType)
elif numpyM.shape[1] == 1:
# column vector
self.assertEqual(numpyM.shape[0],eigenM.shape[0], testType)
if eigenM.shape[0] > 0:
self.assertTrue(numpy.max(numpy.abs(numpyM[:,0] - eigenM)) < 1e-10, testType)
else:
self.fail('%s: The output matrix is a vector but none of the input matrix dimensions are 1: %s' % (testType,numpyM.shape))
else:
self.fail('%s: Unexpected number of dimensions in the numpy input matrix: %d' % (testType,numpyM.ndim))
elif eigenM.ndim == 2:
self.assertEqual(numpyM.shape[0],eigenM.shape[0], testType)
self.assertEqual(numpyM.shape[1],eigenM.shape[1], testType)
if numpyM.shape[0] > 0 and numpyM.shape[1] > 0:
self.assertTrue(numpy.max(numpy.abs(numpyM - eigenM)) < 1e-10, testType)
else:
self.fail('%s: Unexpected number of dimensions in the numpy output matrix: %d' % (testType, eigenM.ndim))
def matrixTests(self,t,i,j):
row_limit = 50
col_limit = 50
rows_dim_is_dynamic = (i == 'D')
cols_dim_is_dynamic = (j == 'D')
ii = i;
jj = j;
if not rows_dim_is_dynamic:
ii = int(ii)
if not cols_dim_is_dynamic:
jj = int(jj)
fname = 'test_%s_%s_%s' % (t,i,j)
for R in range(0,row_limit):
for C in range(0,col_limit):
testType = 'Testing %s with input array[%d,%d]' % (fname, R, C)
try:
# Create a random matrix.
numpyM = numpy.random.random([R,C])
# Try to pass it in to the pass-through function
eigenM = npe.__dict__[fname](numpyM)
# There was no error...check that this was okay.
self.assertTrue(rows_dim_is_dynamic or R == ii, testType)
self.assertTrue(cols_dim_is_dynamic or C == jj, testType)
# Check that the matrices are the same.
self.assertMatrixClose(numpyM,eigenM, testType)
except TypeError as inst:
# There was a type error. Check that this was expected.
self.assertFalse( (rows_dim_is_dynamic or R == ii) and (cols_dim_is_dynamic or C == jj), testType)
try:
# Create a random matrix and take the transpose.
numpyM = numpy.random.random([C,R]).T
# Try to pass it in to the pass-through function
eigenM = npe.__dict__[fname](numpyM)
# There was no error...check that this was okay.
self.assertTrue(rows_dim_is_dynamic or R == ii, testType)
self.assertTrue(cols_dim_is_dynamic or C == jj, testType)
# Check that the matrices are the same.
self.assertMatrixClose(numpyM,eigenM, testType)
except TypeError as inst:
# There was a type error. Check that this was expected.
self.assertFalse( (rows_dim_is_dynamic or R == ii) and (cols_dim_is_dynamic or C == jj), testType)
def vectorTests(self,t,i,j):
x = 1
# um...
def test_eigen(self):
T = ['double']
#N = ('01','02','03','04','05','06','07','08','09','10','11','12','13','14','15','16','D')
N = ('1','2','3','4','5','6','D')
#N = (1,2,3,4,'dynamic')
for t in T:
for i in N:
for j in N:
self.matrixTests(t,i,j)
if __name__ == '__main__':
import rostest
rostest.rosrun('numpy_eigen', 'test_eigen', TestEigen)
| [
"numpy.random.random",
"numpy.abs",
"rostest.rosrun",
"roslib.load_manifest"
] | [((37, 72), 'roslib.load_manifest', 'roslib.load_manifest', (['"""numpy_eigen"""'], {}), "('numpy_eigen')\n", (57, 72), False, 'import roslib\n'), ((74, 105), 'roslib.load_manifest', 'roslib.load_manifest', (['"""rostest"""'], {}), "('rostest')\n", (94, 105), False, 'import roslib\n'), ((5140, 5194), 'rostest.rosrun', 'rostest.rosrun', (['"""numpy_eigen"""', '"""test_eigen"""', 'TestEigen'], {}), "('numpy_eigen', 'test_eigen', TestEigen)\n", (5154, 5194), False, 'import rostest\n'), ((2959, 2986), 'numpy.random.random', 'numpy.random.random', (['[R, C]'], {}), '([R, C])\n', (2978, 2986), False, 'import numpy\n'), ((3859, 3886), 'numpy.random.random', 'numpy.random.random', (['[C, R]'], {}), '([C, R])\n', (3878, 3886), False, 'import numpy\n'), ((772, 798), 'numpy.abs', 'numpy.abs', (['(numpyM - eigenM)'], {}), '(numpyM - eigenM)\n', (781, 798), False, 'import numpy\n'), ((2151, 2177), 'numpy.abs', 'numpy.abs', (['(numpyM - eigenM)'], {}), '(numpyM - eigenM)\n', (2160, 2177), False, 'import numpy\n'), ((1214, 1246), 'numpy.abs', 'numpy.abs', (['(numpyM[0, :] - eigenM)'], {}), '(numpyM[0, :] - eigenM)\n', (1223, 1246), False, 'import numpy\n'), ((1519, 1551), 'numpy.abs', 'numpy.abs', (['(numpyM[:, 0] - eigenM)'], {}), '(numpyM[:, 0] - eigenM)\n', (1528, 1551), False, 'import numpy\n')] |
import numpy as np
from numpy.random import choice, uniform
import json
from enum import Enum
import os
import math
from gtts import gTTS
DIR = Enum('DIR', 'right left up down clock anticlock bigger smaller')
ACTION = Enum('ACTION', 'shift rotate roll jump grow circle')
SPEED = Enum('SPEED', 'slow fast')
SHAPE = Enum('SHAPE', 'triangle square pentagon hexagon circle ellipse')
FGCOLOR = Enum('FGCOLOR', 'red magenta orange brown green cyan blue black')
# FGCOLOR = Enum('FGCOLORS', 'red blue')
BGCOLOR = Enum('BGCOLOR', 'white pink beige aquamarine yellow')
# BGCOLOR = Enum('BGCOLORS', 'white pink')
TYPE = Enum('TYPE', 'disjoint overlap subset same')
ACCENT = Enum('ACCENT', 'au ca ind uk')
regular_polygons = [SHAPE.triangle, SHAPE.square, SHAPE.pentagon, SHAPE.hexagon]
circular_shapes = [SHAPE.circle, SHAPE.ellipse]
def perror(msg):
""" Print error and exit. """
print(f'Error: {msg}')
exit(1)
def speed_to_num(speed):
""" Convert speed enum to a number. """
if speed == SPEED.slow: return 5e-3
elif speed == SPEED.fast: return 1e-2
else: perror(f'speed_to_num invalid speed: {speed}')
def speed_to_adverb(speed):
""" Convert speed enum to an adverb """
if speed == SPEED.slow: return 'slowly'
elif speed == SPEED.fast: return 'quickly'
else: perror(f'speed_to_adverb invalid speed: {speed}')
def gen_verb(action, dir=None):
""" Generate verb from action. """
if action == ACTION.shift:
if dir == DIR.right: return 'moving right'
elif dir == DIR.left: return 'moving left'
elif dir == DIR.up: return 'moving up'
elif dir == DIR.down: return 'moving down'
else: perror(f'gen verb shift invalid dir: {dir}')
elif action == ACTION.rotate:
if dir == DIR.clock: return 'rotating clockwise'
elif dir == DIR.anticlock: return 'rotating anticlockwise'
else: perror(f'gen verb rotate invalid dir: {dir}')
elif action == ACTION.roll: return 'rolling'
elif action == ACTION.grow:
if dir == DIR.bigger: return 'growing in size'
elif dir == DIR.smaller: return 'shrinking in size'
elif action == ACTION.jump: return 'jumping up and down'
elif action == ACTION.circle: return 'going around in a circle'
else: perror(f'gen verb invalid action: {action}')
def setup_dirs(data_path, remove_old):
""" Create dirs and files, deleting old ones if specified. """
print(f'SETUP_DIRS: remove_old is set to {remove_old}')
for x in ['audio', 'video']:
path = os.path.join(data_path, x)
if not os.path.isdir(path):
os.makedirs(path)
elif remove_old:
for f in os.listdir(path):
os.remove(os.path.join(path, f))
text_file = os.path.join(data_path, 'texts.csv')
if not os.path.isfile(text_file):
open(text_file, 'a').close()
elif remove_old:
os.remove(text_file)
def circle_sampler():
""" Return the centre and radius of a circle.
Return a circle (x, y, r) in the unit grid which doesn't touch the
edge of the grid (least count = .05).
Number of unique circles possible ~ 2*10*10 = 98.
~ because possible radii values depend on location of centres
"""
xs = np.arange(0.3, 0.75, 0.05)
ys = np.arange(0.3, 0.75, 0.05)
x, y = choice(xs), choice(ys)
# max_r is between 0.2 and 0.5
min_r, max_r = 0.1, min(min(x, 1.0 - x), min(y, 1.0 - y))
rs = np.arange(min_r, max_r, 0.05)
r = choice(rs)
return [x, y, r]
def regular_polygon_sampler():
""" Return a regular polygon (its centre, "radius", and orientation). """
x, y, r = circle_sampler()
theta = uniform(0, 2 * math.pi) # in radians
return [x, y, r, theta]
def ellipse_sampler(circle=False):
x, y, a = circle_sampler()
b = uniform(a/3, 2*a/3)
theta = uniform(0, 360) # somehow this is in degrees in plt
if circle: return [x, y, a, a, 0]
else: return [x, y, a, b, theta]
def get_numpts(shape):
if shape == SHAPE.triangle: return 3
elif shape == SHAPE.square: return 4
elif shape == SHAPE.pentagon: return 5
elif shape == SHAPE.hexagon: return 6
else: perror(f'get numpts undefined shape: {shape}')
def jump_update(xy, t, speed):
""" Model the motion of point xy thrown upwards. """
g = 0.005 if speed == SPEED.slow else 0.01
u = 0.05 if speed == SPEED.slow else 0.05
# rebound from the ground
t = (t-1) % int(2*u/g) + 1
# s = ut + (1/2)at^2
# ds = s(t) - s(t-1) = u - (1/2)g(2t-1)
y = xy[1] + u - (1/2) * g * (2*t - 1)
return (xy[0], y)
def accent_to_args(accent):
if accent == ACCENT.au: return {'lang': 'en', 'tld': 'com.au'}
if accent == ACCENT.ca: return {'lang': 'en', 'tld': 'ca'}
if accent == ACCENT.ind: return {'lang': 'en', 'tld': 'co.in'}
if accent == ACCENT.uk: return {'lang': 'en', 'tld': 'co.uk'}
| [
"os.listdir",
"os.makedirs",
"numpy.random.choice",
"os.path.join",
"os.path.isfile",
"os.path.isdir",
"enum.Enum",
"numpy.random.uniform",
"numpy.arange",
"os.remove"
] | [((145, 209), 'enum.Enum', 'Enum', (['"""DIR"""', '"""right left up down clock anticlock bigger smaller"""'], {}), "('DIR', 'right left up down clock anticlock bigger smaller')\n", (149, 209), False, 'from enum import Enum\n'), ((219, 271), 'enum.Enum', 'Enum', (['"""ACTION"""', '"""shift rotate roll jump grow circle"""'], {}), "('ACTION', 'shift rotate roll jump grow circle')\n", (223, 271), False, 'from enum import Enum\n'), ((280, 306), 'enum.Enum', 'Enum', (['"""SPEED"""', '"""slow fast"""'], {}), "('SPEED', 'slow fast')\n", (284, 306), False, 'from enum import Enum\n'), ((315, 379), 'enum.Enum', 'Enum', (['"""SHAPE"""', '"""triangle square pentagon hexagon circle ellipse"""'], {}), "('SHAPE', 'triangle square pentagon hexagon circle ellipse')\n", (319, 379), False, 'from enum import Enum\n'), ((390, 455), 'enum.Enum', 'Enum', (['"""FGCOLOR"""', '"""red magenta orange brown green cyan blue black"""'], {}), "('FGCOLOR', 'red magenta orange brown green cyan blue black')\n", (394, 455), False, 'from enum import Enum\n'), ((507, 560), 'enum.Enum', 'Enum', (['"""BGCOLOR"""', '"""white pink beige aquamarine yellow"""'], {}), "('BGCOLOR', 'white pink beige aquamarine yellow')\n", (511, 560), False, 'from enum import Enum\n'), ((611, 655), 'enum.Enum', 'Enum', (['"""TYPE"""', '"""disjoint overlap subset same"""'], {}), "('TYPE', 'disjoint overlap subset same')\n", (615, 655), False, 'from enum import Enum\n'), ((665, 695), 'enum.Enum', 'Enum', (['"""ACCENT"""', '"""au ca ind uk"""'], {}), "('ACCENT', 'au ca ind uk')\n", (669, 695), False, 'from enum import Enum\n'), ((2580, 2616), 'os.path.join', 'os.path.join', (['data_path', '"""texts.csv"""'], {}), "(data_path, 'texts.csv')\n", (2592, 2616), False, 'import os\n'), ((3031, 3057), 'numpy.arange', 'np.arange', (['(0.3)', '(0.75)', '(0.05)'], {}), '(0.3, 0.75, 0.05)\n', (3040, 3057), True, 'import numpy as np\n'), ((3066, 3092), 'numpy.arange', 'np.arange', (['(0.3)', '(0.75)', '(0.05)'], {}), '(0.3, 0.75, 0.05)\n', (3075, 3092), True, 'import numpy as np\n'), ((3225, 3254), 'numpy.arange', 'np.arange', (['min_r', 'max_r', '(0.05)'], {}), '(min_r, max_r, 0.05)\n', (3234, 3254), True, 'import numpy as np\n'), ((3260, 3270), 'numpy.random.choice', 'choice', (['rs'], {}), '(rs)\n', (3266, 3270), False, 'from numpy.random import choice, uniform\n'), ((3436, 3459), 'numpy.random.uniform', 'uniform', (['(0)', '(2 * math.pi)'], {}), '(0, 2 * math.pi)\n', (3443, 3459), False, 'from numpy.random import choice, uniform\n'), ((3569, 3594), 'numpy.random.uniform', 'uniform', (['(a / 3)', '(2 * a / 3)'], {}), '(a / 3, 2 * a / 3)\n', (3576, 3594), False, 'from numpy.random import choice, uniform\n'), ((3598, 3613), 'numpy.random.uniform', 'uniform', (['(0)', '(360)'], {}), '(0, 360)\n', (3605, 3613), False, 'from numpy.random import choice, uniform\n'), ((2399, 2425), 'os.path.join', 'os.path.join', (['data_path', 'x'], {}), '(data_path, x)\n', (2411, 2425), False, 'import os\n'), ((2625, 2650), 'os.path.isfile', 'os.path.isfile', (['text_file'], {}), '(text_file)\n', (2639, 2650), False, 'import os\n'), ((3102, 3112), 'numpy.random.choice', 'choice', (['xs'], {}), '(xs)\n', (3108, 3112), False, 'from numpy.random import choice, uniform\n'), ((3114, 3124), 'numpy.random.choice', 'choice', (['ys'], {}), '(ys)\n', (3120, 3124), False, 'from numpy.random import choice, uniform\n'), ((2435, 2454), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (2448, 2454), False, 'import os\n'), ((2461, 2478), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2472, 2478), False, 'import os\n'), ((2703, 2723), 'os.remove', 'os.remove', (['text_file'], {}), '(text_file)\n', (2712, 2723), False, 'import os\n'), ((2510, 2526), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2520, 2526), False, 'import os\n'), ((2542, 2563), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (2554, 2563), False, 'import os\n')] |
import cv2
import numpy as np
import pyrealsense2 as rs
class DepthFiltering():
def __init__(self, temporal_smoothing=5):
self.temporal_smoothing = temporal_smoothing
self.dec_filter = rs.decimation_filter() # Decimation - reduces depth frame density
self.spat_filter = rs.spatial_filter()
self.spat_filter.set_option(rs.option.holes_fill, 3)
self.temp_filter = rs.temporal_filter() # Temporal - reduces temporal noise
self.depth_to_disparity = rs.disparity_transform(True)
self.disparity_to_depth = rs.disparity_transform(False)
self.hole_filling = rs.hole_filling_filter()
def apply_filters(self, depth_frame):
depth_frame = self.depth_to_disparity.process(depth_frame)
depth_frame = self.spat_filter.process(depth_frame)
depth_frame = self.temp_filter.process(depth_frame)
depth_frame = self.disparity_to_depth.process(depth_frame)
depth_frame = self.hole_filling.process(depth_frame)
return depth_frame
class Visualizer():
@staticmethod
def visualize_depth(depth):
depth_image = np.asanyarray(depth.get_data()) is depth if not isinstance(depth,
(np.ndarray, np.generic)) else depth
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
cv2.namedWindow('depth_colormap', cv2.WINDOW_AUTOSIZE)
cv2.imshow('depth_colormap', depth_colormap)
@staticmethod
def visualize_img(img):
img = np.asanyarray(img.get_data()) is img if not isinstance(img, (np.ndarray, np.generic)) else img
cv2.namedWindow('img', cv2.WINDOW_AUTOSIZE)
cv2.imshow('img', img)
@staticmethod
def visualize_aligned_depth_to_image(aligned_depth_frame, color_frame, profile):
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
# print("Depth Scale is: ", depth_scale)
# We will be removing the background of objects more than
# clipping_distance_in_meters meters away
clipping_distance_in_meters = 1 # 1 meter
clipping_distance = clipping_distance_in_meters / depth_scale
if isinstance(aligned_depth_frame, (np.ndarray, np.generic)):
depth_image = aligned_depth_frame
color_image = color_frame
else:
depth_image = np.asanyarray(aligned_depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
# Remove background - Set pixels further than clipping_distance to grey
grey_color = 153
depth_image_3d = np.dstack(
(depth_image, depth_image, depth_image)) # depth image is 1 channel, color is 3 channels
bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image)
# Render images
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
images = np.hstack((bg_removed, depth_colormap))
cv2.namedWindow('Align Example', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Align Example', images)
| [
"pyrealsense2.temporal_filter",
"numpy.dstack",
"pyrealsense2.decimation_filter",
"pyrealsense2.hole_filling_filter",
"cv2.convertScaleAbs",
"numpy.hstack",
"numpy.where",
"pyrealsense2.disparity_transform",
"cv2.imshow",
"pyrealsense2.spatial_filter",
"cv2.namedWindow"
] | [((207, 229), 'pyrealsense2.decimation_filter', 'rs.decimation_filter', ([], {}), '()\n', (227, 229), True, 'import pyrealsense2 as rs\n'), ((301, 320), 'pyrealsense2.spatial_filter', 'rs.spatial_filter', ([], {}), '()\n', (318, 320), True, 'import pyrealsense2 as rs\n'), ((409, 429), 'pyrealsense2.temporal_filter', 'rs.temporal_filter', ([], {}), '()\n', (427, 429), True, 'import pyrealsense2 as rs\n'), ((503, 531), 'pyrealsense2.disparity_transform', 'rs.disparity_transform', (['(True)'], {}), '(True)\n', (525, 531), True, 'import pyrealsense2 as rs\n'), ((566, 595), 'pyrealsense2.disparity_transform', 'rs.disparity_transform', (['(False)'], {}), '(False)\n', (588, 595), True, 'import pyrealsense2 as rs\n'), ((624, 648), 'pyrealsense2.hole_filling_filter', 'rs.hole_filling_filter', ([], {}), '()\n', (646, 648), True, 'import pyrealsense2 as rs\n'), ((1427, 1481), 'cv2.namedWindow', 'cv2.namedWindow', (['"""depth_colormap"""', 'cv2.WINDOW_AUTOSIZE'], {}), "('depth_colormap', cv2.WINDOW_AUTOSIZE)\n", (1442, 1481), False, 'import cv2\n'), ((1490, 1534), 'cv2.imshow', 'cv2.imshow', (['"""depth_colormap"""', 'depth_colormap'], {}), "('depth_colormap', depth_colormap)\n", (1500, 1534), False, 'import cv2\n'), ((1699, 1742), 'cv2.namedWindow', 'cv2.namedWindow', (['"""img"""', 'cv2.WINDOW_AUTOSIZE'], {}), "('img', cv2.WINDOW_AUTOSIZE)\n", (1714, 1742), False, 'import cv2\n'), ((1751, 1773), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (1761, 1773), False, 'import cv2\n'), ((2720, 2770), 'numpy.dstack', 'np.dstack', (['(depth_image, depth_image, depth_image)'], {}), '((depth_image, depth_image, depth_image))\n', (2729, 2770), True, 'import numpy as np\n'), ((2854, 2953), 'numpy.where', 'np.where', (['((depth_image_3d > clipping_distance) | (depth_image_3d <= 0))', 'grey_color', 'color_image'], {}), '((depth_image_3d > clipping_distance) | (depth_image_3d <= 0),\n grey_color, color_image)\n', (2862, 2953), True, 'import numpy as np\n'), ((3099, 3138), 'numpy.hstack', 'np.hstack', (['(bg_removed, depth_colormap)'], {}), '((bg_removed, depth_colormap))\n', (3108, 3138), True, 'import numpy as np\n'), ((3147, 3200), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Align Example"""', 'cv2.WINDOW_AUTOSIZE'], {}), "('Align Example', cv2.WINDOW_AUTOSIZE)\n", (3162, 3200), False, 'import cv2\n'), ((3209, 3244), 'cv2.imshow', 'cv2.imshow', (['"""Align Example"""', 'images'], {}), "('Align Example', images)\n", (3219, 3244), False, 'import cv2\n'), ((1355, 1399), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['depth_image'], {'alpha': '(0.03)'}), '(depth_image, alpha=0.03)\n', (1374, 1399), False, 'import cv2\n'), ((3018, 3062), 'cv2.convertScaleAbs', 'cv2.convertScaleAbs', (['depth_image'], {'alpha': '(0.03)'}), '(depth_image, alpha=0.03)\n', (3037, 3062), False, 'import cv2\n')] |
# -*- coding: utf-8 -*-
import os
import copy
import odl
import torch
import numpy as np
from math import ceil
from tqdm import tqdm
from warnings import warn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torch.optim.lr_scheduler import CyclicLR, OneCycleLR
from dival import LearnedReconstructor
from dival.measure import PSNR
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
class BaseLearnedReconstructor(LearnedReconstructor):
HYPER_PARAMS = {
'epochs': {
'default': 20,
'retrain': True
},
'batch_size': {
'default': 64,
'retrain': True
},
'lr': {
'default': 0.01,
'retrain': True
},
'normalize_by_opnorm': {
'default': False,
'retrain': True
}
}
def __init__(self, ray_trafo, epochs=None, batch_size=None, lr=None,
normalize_by_opnorm=None,
num_data_loader_workers=8, use_cuda=True, show_pbar=True,
fbp_impl='astra_cuda', hyper_params=None, log_dir=None,
log_num_validation_samples=0,
save_best_learned_params_path=None,
**kwargs):
"""
Parameters
----------
ray_trafo : :class:`odl.tomo.RayTransform`
Ray transform from which the FBP operator is constructed.
scales : int, optional
Number of scales in the U-Net (a hyper parameter).
epochs : int, optional
Number of epochs to train (a hyper parameter).
batch_size : int, optional
Batch size (a hyper parameter).
lr : float, optional
Base learning rate (a hyper parameter).
normalize_by_opnorm : bool, optional
Whether to normalize :attr:`ray_trafo` by its operator norm.
num_data_loader_workers : int, optional
Number of parallel workers to use for loading data.
use_cuda : bool, optional
Whether to use cuda for the U-Net.
show_pbar : bool, optional
Whether to show tqdm progress bars during the epochs.
fbp_impl : str, optional
The backend implementation passed to
:class:`odl.tomo.RayTransform` in case no `ray_trafo` is specified.
Then ``dataset.get_ray_trafo(impl=fbp_impl)`` is used to get the
ray transform and FBP operator.
log_dir : str, optional
Tensorboard log directory (name of sub-directory in utils/logs).
If `None`, no logs are written.
log_num_valiation_samples : int, optional
Number of validation images to store in tensorboard logs.
This option only takes effect if ``log_dir is not None``.
save_best_learned_params_path : str, optional
Save best model weights during training under the specified path by
calling :meth:`save_learned_params`.
"""
super().__init__(reco_space=ray_trafo.domain,
observation_space=ray_trafo.range,
hyper_params=hyper_params, **kwargs)
self.ray_trafo = ray_trafo
self.num_data_loader_workers = num_data_loader_workers
self.use_cuda = use_cuda
self.show_pbar = show_pbar
self.fbp_impl = fbp_impl
self.log_dir = log_dir
self.log_num_validation_samples = log_num_validation_samples
self.save_best_learned_params_path = save_best_learned_params_path
self.model = None
if epochs is not None:
self.epochs = epochs
if kwargs.get('hyper_params', {}).get('epochs') is not None:
warn("hyper parameter 'epochs' overridden by constructor "
"argument")
if batch_size is not None:
self.batch_size = batch_size
if kwargs.get('hyper_params', {}).get('batch_size') is not None:
warn("hyper parameter 'batch_size' overridden by constructor "
"argument")
if lr is not None:
self.lr = lr
if kwargs.get('hyper_params', {}).get('lr') is not None:
warn("hyper parameter 'lr' overridden by constructor argument")
if normalize_by_opnorm is not None:
self.normalize_by_opnorm = normalize_by_opnorm
if (kwargs.get('hyper_params', {}).get('normalize_by_opnorm')
is not None):
warn("hyper parameter 'normalize_by_opnorm' overridden by "
"constructor argument")
if self.normalize_by_opnorm:
self.opnorm = odl.power_method_opnorm(self.ray_trafo)
self.ray_trafo = (1./self.opnorm) * self.ray_trafo
self.device = (torch.device('cuda:0')
if self.use_cuda and torch.cuda.is_available() else
torch.device('cpu'))
def eval(self, test_data):
self.model.eval()
running_psnr = 0.0
with tqdm(test_data, desc='test ',
disable=not self.show_pbar) as pbar:
for obs, gt in pbar:
rec = self.reconstruct(obs)
running_psnr += PSNR(rec, gt)
return running_psnr / len(test_data)
def train(self, dataset):
torch.random.manual_seed(1)
# create PyTorch datasets
dataset_train = dataset.create_torch_dataset(
part='train', reshape=((1,) + dataset.space[0].shape,
(1,) + dataset.space[1].shape))
dataset_validation = dataset.create_torch_dataset(
part='validation', reshape=((1,) + dataset.space[0].shape,
(1,) + dataset.space[1].shape))
# reset model before training
self.init_model()
criterion = torch.nn.MSELoss()
self.init_optimizer(dataset_train=dataset_train)
# create PyTorch dataloaders
data_loaders = {'train': DataLoader(
dataset_train, batch_size=self.batch_size,
num_workers=self.num_data_loader_workers, shuffle=True,
pin_memory=True),
'validation': DataLoader(
dataset_validation, batch_size=self.batch_size,
num_workers=self.num_data_loader_workers,
shuffle=True, pin_memory=True)}
dataset_sizes = {'train': len(dataset_train),
'validation': len(dataset_validation)}
self.init_scheduler(dataset_train=dataset_train)
if self.scheduler is not None:
schedule_every_batch = isinstance(
self.scheduler, (CyclicLR, OneCycleLR))
best_model_wts = copy.deepcopy(self.model.state_dict())
best_psnr = 0
if self.log_dir is not None:
writer = SummaryWriter(
log_dir=os.path.join(BASE_DIR, 'utils/logs', self.log_dir),
max_queue=0)
validation_samples = dataset.get_data_pairs(
'validation', self.log_num_validation_samples)
self.model.to(self.device)
self.model.train()
for epoch in range(self.epochs):
# Each epoch has a training and validation phase
for phase in ['train', 'validation']:
if phase == 'train':
self.model.train() # Set model to training mode
else:
self.model.eval() # Set model to evaluate mode
running_psnr = 0.0
running_loss = 0.0
running_size = 0
with tqdm(data_loaders[phase],
desc='epoch {:d}'.format(epoch + 1),
disable=not self.show_pbar) as pbar:
for inputs, labels in pbar:
if self.normalize_by_opnorm:
inputs = (1./self.opnorm) * inputs
inputs = inputs.to(self.device)
labels = labels.to(self.device)
# zero the parameter gradients
self.optimizer.zero_grad()
# forward
# track gradients only if in train phase
with torch.set_grad_enabled(phase == 'train'):
outputs = self.model(inputs)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
torch.nn.utils.clip_grad_norm_(
self.model.parameters(), max_norm=1)
self.optimizer.step()
if (self.scheduler is not None and
schedule_every_batch):
self.scheduler.step()
# lrs.append(self.scheduler.get_lr())
# losses.append(loss.item())
for i in range(outputs.shape[0]):
labels_ = labels[i, 0].detach().cpu().numpy()
outputs_ = outputs[i, 0].detach().cpu().numpy()
running_psnr += PSNR(outputs_, labels_)
# statistics
running_loss += loss.item() * outputs.shape[0]
running_size += outputs.shape[0]
pbar.set_postfix({'phase': phase,
'loss': running_loss/running_size,
'psnr': running_psnr/running_size})
if self.log_dir is not None and phase == 'train':
step = (epoch * ceil(dataset_sizes['train']
/ self.batch_size)
+ ceil(running_size / self.batch_size))
writer.add_scalar('loss/{}'.format(phase),
torch.tensor(running_loss/running_size), step)
writer.add_scalar('psnr/{}'.format(phase),
torch.tensor(running_psnr/running_size), step)
if self.scheduler is not None and not schedule_every_batch:
self.scheduler.step()
epoch_loss = running_loss / dataset_sizes[phase]
epoch_psnr = running_psnr / dataset_sizes[phase]
if self.log_dir is not None and phase == 'validation':
step = (epoch+1) * ceil(dataset_sizes['train']
/ self.batch_size)
writer.add_scalar('loss/{}'.format(phase),
epoch_loss, step)
writer.add_scalar('psnr/{}'.format(phase),
epoch_psnr, step)
# deep copy the model (if it is the best one seen so far)
if phase == 'validation' and epoch_psnr > best_psnr:
best_psnr = epoch_psnr
best_model_wts = copy.deepcopy(self.model.state_dict())
if self.save_best_learned_params_path is not None:
self.save_learned_params(
self.save_best_learned_params_path)
# import matplotlib.pyplot as plt
# plt.plot(lrs)
# plt.show()
# plt.plot(lrs, losses)
# plt.show()
if (phase == 'validation' and self.log_dir is not None and
self.log_num_validation_samples > 0):
with torch.no_grad():
val_images = []
for (y, x) in validation_samples:
y = torch.from_numpy(
np.asarray(y))[None, None].to(self.device)
x = torch.from_numpy(
np.asarray(x))[None, None].to(self.device)
reco = self.model(y)
reco -= torch.min(reco)
reco /= torch.max(reco)
val_images += [reco, x]
writer.add_images(
'validation_samples', torch.cat(val_images),
(epoch + 1) * (ceil(dataset_sizes['train'] /
self.batch_size)),
dataformats='NCWH')
print('Best val psnr: {:4f}'.format(best_psnr))
self.model.load_state_dict(best_model_wts)
def init_model(self):
"""
Initialize :attr:`model`.
Called in :meth:`train` at the beginning.
"""
raise NotImplementedError
def init_optimizer(self, dataset_train):
"""
Initialize the optimizer.
Called in :meth:`train`, after calling :meth:`init_model` and before
calling :meth:`init_scheduler`.
Parameters
----------
dataset_train : :class:`torch.utils.data.Dataset`
The training (torch) dataset constructed in :meth:`train'.
"""
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)
def init_scheduler(self, dataset_train):
"""
Initialize the learning rate scheduler.
Called in :meth:`train`, after calling :meth:`init_optimizer`.
Parameters
----------
dataset_train : :class:`torch.utils.data.Dataset`
The training (torch) dataset constructed in :meth:`train'.
"""
self.scheduler = torch.optim.lr_scheduler.OneCycleLR(
self.optimizer, max_lr=self.lr,
steps_per_epoch=ceil(len(dataset_train) / self.batch_size),
epochs=self.epochs)
def _reconstruct(self, observation):
self.model.eval()
with torch.set_grad_enabled(False):
obs_tensor = torch.from_numpy(
np.asarray(observation)[None, None])
if self.normalize_by_opnorm:
obs_tensor = obs_tensor / self.opnorm
obs_tensor = obs_tensor.to(self.device)
reco_tensor = self.model(obs_tensor)
reconstruction = reco_tensor.cpu().detach().numpy()[0, 0]
return self.reco_space.element(reconstruction)
def save_learned_params(self, path):
path = path if path.endswith('.pt') else path + '.pt'
torch.save(self.model.state_dict(), path)
def load_learned_params(self, path, force_parallel=False):
path = path if path.endswith('.pt') else path + '.pt'
self.init_model()
map_location = ('cuda:0' if self.use_cuda and torch.cuda.is_available()
else 'cpu')
state_dict = torch.load(path, map_location=map_location)
# backwards-compatibility with non-data_parallel weights
data_parallel = list(state_dict.keys())[0].startswith('module.')
if force_parallel and not data_parallel:
state_dict = {('module.' + k): v for k, v in state_dict.items()}
self.model.load_state_dict(state_dict)
| [
"dival.measure.PSNR",
"torch.max",
"torch.min",
"torch.nn.MSELoss",
"torch.cuda.is_available",
"torch.random.manual_seed",
"torch.set_grad_enabled",
"numpy.asarray",
"warnings.warn",
"odl.power_method_opnorm",
"torch.cat",
"torch.device",
"math.ceil",
"torch.load",
"tqdm.tqdm",
"os.pat... | [((499, 524), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (514, 524), False, 'import os\n'), ((5460, 5487), 'torch.random.manual_seed', 'torch.random.manual_seed', (['(1)'], {}), '(1)\n', (5484, 5487), False, 'import torch\n'), ((5998, 6016), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (6014, 6016), False, 'import torch\n'), ((15236, 15279), 'torch.load', 'torch.load', (['path'], {'map_location': 'map_location'}), '(path, map_location=map_location)\n', (15246, 15279), False, 'import torch\n'), ((4799, 4838), 'odl.power_method_opnorm', 'odl.power_method_opnorm', (['self.ray_trafo'], {}), '(self.ray_trafo)\n', (4822, 4838), False, 'import odl\n'), ((4926, 4948), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (4938, 4948), False, 'import torch\n'), ((5047, 5066), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5059, 5066), False, 'import torch\n'), ((5167, 5224), 'tqdm.tqdm', 'tqdm', (['test_data'], {'desc': '"""test """', 'disable': '(not self.show_pbar)'}), "(test_data, desc='test ', disable=not self.show_pbar)\n", (5171, 5224), False, 'from tqdm import tqdm\n'), ((6145, 6276), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_train'], {'batch_size': 'self.batch_size', 'num_workers': 'self.num_data_loader_workers', 'shuffle': '(True)', 'pin_memory': '(True)'}), '(dataset_train, batch_size=self.batch_size, num_workers=self.\n num_data_loader_workers, shuffle=True, pin_memory=True)\n', (6155, 6276), False, 'from torch.utils.data import DataLoader\n'), ((6336, 6472), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_validation'], {'batch_size': 'self.batch_size', 'num_workers': 'self.num_data_loader_workers', 'shuffle': '(True)', 'pin_memory': '(True)'}), '(dataset_validation, batch_size=self.batch_size, num_workers=self\n .num_data_loader_workers, shuffle=True, pin_memory=True)\n', (6346, 6472), False, 'from torch.utils.data import DataLoader\n'), ((14345, 14374), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (14367, 14374), False, 'import torch\n'), ((3842, 3909), 'warnings.warn', 'warn', (['"""hyper parameter \'epochs\' overridden by constructor argument"""'], {}), '("hyper parameter \'epochs\' overridden by constructor argument")\n', (3846, 3909), False, 'from warnings import warn\n'), ((4104, 4175), 'warnings.warn', 'warn', (['"""hyper parameter \'batch_size\' overridden by constructor argument"""'], {}), '("hyper parameter \'batch_size\' overridden by constructor argument")\n', (4108, 4175), False, 'from warnings import warn\n'), ((4338, 4401), 'warnings.warn', 'warn', (['"""hyper parameter \'lr\' overridden by constructor argument"""'], {}), '("hyper parameter \'lr\' overridden by constructor argument")\n', (4342, 4401), False, 'from warnings import warn\n'), ((4630, 4715), 'warnings.warn', 'warn', (['"""hyper parameter \'normalize_by_opnorm\' overridden by constructor argument"""'], {}), '("hyper parameter \'normalize_by_opnorm\' overridden by constructor argument"\n )\n', (4634, 4715), False, 'from warnings import warn\n'), ((4993, 5018), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5016, 5018), False, 'import torch\n'), ((5361, 5374), 'dival.measure.PSNR', 'PSNR', (['rec', 'gt'], {}), '(rec, gt)\n', (5365, 5374), False, 'from dival.measure import PSNR\n'), ((15153, 15178), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15176, 15178), False, 'import torch\n'), ((7022, 7072), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""utils/logs"""', 'self.log_dir'], {}), "(BASE_DIR, 'utils/logs', self.log_dir)\n", (7034, 7072), False, 'import os\n'), ((14435, 14458), 'numpy.asarray', 'np.asarray', (['observation'], {}), '(observation)\n', (14445, 14458), True, 'import numpy as np\n'), ((12078, 12093), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12091, 12093), False, 'import torch\n'), ((8424, 8464), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (["(phase == 'train')"], {}), "(phase == 'train')\n", (8446, 8464), False, 'import torch\n'), ((9522, 9545), 'dival.measure.PSNR', 'PSNR', (['outputs_', 'labels_'], {}), '(outputs_, labels_)\n', (9526, 9545), False, 'from dival.measure import PSNR\n'), ((10929, 10975), 'math.ceil', 'ceil', (["(dataset_sizes['train'] / self.batch_size)"], {}), "(dataset_sizes['train'] / self.batch_size)\n", (10933, 10975), False, 'from math import ceil\n'), ((12528, 12543), 'torch.min', 'torch.min', (['reco'], {}), '(reco)\n', (12537, 12543), False, 'import torch\n'), ((12580, 12595), 'torch.max', 'torch.max', (['reco'], {}), '(reco)\n', (12589, 12595), False, 'import torch\n'), ((12741, 12762), 'torch.cat', 'torch.cat', (['val_images'], {}), '(val_images)\n', (12750, 12762), False, 'import torch\n'), ((10178, 10214), 'math.ceil', 'ceil', (['(running_size / self.batch_size)'], {}), '(running_size / self.batch_size)\n', (10182, 10214), False, 'from math import ceil\n'), ((10333, 10374), 'torch.tensor', 'torch.tensor', (['(running_loss / running_size)'], {}), '(running_loss / running_size)\n', (10345, 10374), False, 'import torch\n'), ((10497, 10538), 'torch.tensor', 'torch.tensor', (['(running_psnr / running_size)'], {}), '(running_psnr / running_size)\n', (10509, 10538), False, 'import torch\n'), ((12807, 12853), 'math.ceil', 'ceil', (["(dataset_sizes['train'] / self.batch_size)"], {}), "(dataset_sizes['train'] / self.batch_size)\n", (12811, 12853), False, 'from math import ceil\n'), ((10044, 10090), 'math.ceil', 'ceil', (["(dataset_sizes['train'] / self.batch_size)"], {}), "(dataset_sizes['train'] / self.batch_size)\n", (10048, 10090), False, 'from math import ceil\n'), ((12275, 12288), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (12285, 12288), True, 'import numpy as np\n'), ((12400, 12413), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (12410, 12413), True, 'import numpy as np\n')] |
import numpy as np
import csv
from collections import namedtuple
import json
import torch
import torch.nn.utils.rnn as rnn_utils
import torch.nn as nn
import torch.optim as optim
from torchsummary import summary
from data_index import get_dataset, get_batch
import data_index
import math
#import data_index.info as info
import os
info = data_index.info
config = json.load(open("config.json"))
torch.manual_seed(config['seed'])
class Rnn(nn.Module):
# add self-correcting module later as another nn.Module
def __init__(self, device="cuda"):
super(Rnn, self).__init__()
self.x_size = info.feature_size * config["second_split"]
# proj size = hidden size
self.rnn = nn.LSTM(self.x_size, hidden_size=config["hidden_size"],
num_layers=config["num_layers"], dropout=config["dropout"], batch_first=True)
self.proj_net = nn.Sequential(
nn.ReLU(),
nn.Linear(config["hidden_size"], config["proj_hidden_size"]),
nn.ReLU(),
nn.Linear(config["proj_hidden_size"],
info.feature_size * config["second_split"])
)
self.to(torch.device(device))
def forward(self, X, lens):
# this forward goes through the entire length of the input and spits out all the predictions as output
# input is NOT a padded sequence
batch_size, seq_len, ss, fs = X.size()
X = X.view(batch_size, seq_len, ss*fs)
packed_X = rnn_utils.pack_padded_sequence(X, lens, batch_first=True, enforce_sorted=False)
out, self.hidden = self.rnn(packed_X)
#unpack
out, _ = rnn_utils.pad_packed_sequence(out, batch_first=True)
#project
out = self.proj_net(out)
out = out.contiguous()
out = out.view((batch_size, seq_len, -1))
# last prediction (can't find it's loss) is still there
return out
def loss(self, X, Y_hat, mask):
batch_size, seq_len, ss, fs = X.size()
X = X.view(batch_size, seq_len, ss*fs)
Y_hat = Y_hat[:, :-1, :] # 0, 1, ..., last-1
Y = X[:, 1:, :] # 1, 2, ..., last
#X = X.view(batch_size, seq_len, ss, fs)
Y = Y.view(batch_size, seq_len-1, ss, fs)
Y_hat = Y_hat.view(batch_size, seq_len-1, ss, fs)
mask = mask[:, :-1, :, :]
loss = (Y - Y_hat) ** 2 # mse loss
loss = loss * mask # mask it
loss = loss.sum() / mask.sum() * (ss * fs) * config["loss_factor"]
return loss
def summary(model, name):
num=sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Number of parameters in model %s:" % name, num)
def loop(model, optimizer, dataset, data_mask, data_lens, TOT, type_=0, update=True):
losses = []
for batch_idx in range(TOT):
batch, mask, lens = get_batch(
batch_idx, dataset, data_mask, data_lens, type=type_, last=False)
Y_hat = model(batch, lens)
loss = model.loss(batch, Y_hat, mask)
losses.append(loss.item() * Y_hat.size()[0] / config["batch_size"])
if batch_idx % config["print_every"] == 0 and type_==0:
# output the loss and stuff, all pretty
print("\t\ttrain loss (%d): %.4f" % (batch_idx, loss.item()))
if update:
# grad step
optimizer.zero_grad()
loss.backward()
optimizer.step()
return np.array(losses)
def train_sequence_model(model, optimizer, dataset, data_mask, data_lens, epochs):
TOT = math.ceil(data_index.TRAIN / config["batch_size"])
#summary(model, (config["batch_size"], 100, info.feature_size * config["second_split"]))
summary(model, "rnn")
plot_train = []
plot_val = []
for e in range(epochs):
# loop through batches
print("Epoch %d" % e)
losses = loop(model, optimizer, dataset, data_mask, data_lens, TOT=TOT, type_=0, update=True)
print("Epoch %d over, average loss" % e, losses.mean())
val_losses = validate(model, dataset, data_mask, data_lens, final=True)
print("\t\tVal loss: %.4f" % (val_losses.mean()))
plot_val.append(val_losses.mean())
plot_train.append(losses.mean())
return plot_train, plot_val
def validate(model, dataset, data_mask, data_lens, final=False):
TOT = (data_index.VAL // config["batch_size"])
losses = loop(model, None, dataset, data_mask, data_lens, TOT=TOT, type_=1, update=False)
if final:
print("Final validation")
print("\tMean validation loss:", losses.mean())
return losses
def main():
load = False
epochs = 10
rnn = Rnn()
optimizer = optim.Adam(rnn.parameters(), lr=config["lr"], weight_decay=config["l2_reg"])
dataset = torch.load("data/dataset.pt")
data_mask = torch.load("data/data_mask.pt")
lens = torch.load("data/lens.pt")
if load and os.path.exists("checkpoints/" + config["checkpoint_dir"]):
rnn.load("checkpoints/" + config["checkpoint_dir"])
plt_t, plt_v = train_sequence_model(rnn, optimizer, dataset, data_mask, lens, epochs)
print(plt_t)
print(plt_v)
validate(rnn, dataset, data_mask, lens, final=True)
if __name__ == "__main__":
main()
| [
"torch.manual_seed",
"os.path.exists",
"torch.nn.ReLU",
"math.ceil",
"torch.nn.LSTM",
"torch.load",
"numpy.array",
"torch.nn.utils.rnn.pack_padded_sequence",
"data_index.get_batch",
"torch.nn.Linear",
"torchsummary.summary",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.device"
] | [((394, 427), 'torch.manual_seed', 'torch.manual_seed', (["config['seed']"], {}), "(config['seed'])\n", (411, 427), False, 'import torch\n'), ((3415, 3431), 'numpy.array', 'np.array', (['losses'], {}), '(losses)\n', (3423, 3431), True, 'import numpy as np\n'), ((3527, 3577), 'math.ceil', 'math.ceil', (["(data_index.TRAIN / config['batch_size'])"], {}), "(data_index.TRAIN / config['batch_size'])\n", (3536, 3577), False, 'import math\n'), ((3675, 3696), 'torchsummary.summary', 'summary', (['model', '"""rnn"""'], {}), "(model, 'rnn')\n", (3682, 3696), False, 'from torchsummary import summary\n'), ((4750, 4779), 'torch.load', 'torch.load', (['"""data/dataset.pt"""'], {}), "('data/dataset.pt')\n", (4760, 4779), False, 'import torch\n'), ((4796, 4827), 'torch.load', 'torch.load', (['"""data/data_mask.pt"""'], {}), "('data/data_mask.pt')\n", (4806, 4827), False, 'import torch\n'), ((4839, 4865), 'torch.load', 'torch.load', (['"""data/lens.pt"""'], {}), "('data/lens.pt')\n", (4849, 4865), False, 'import torch\n'), ((705, 843), 'torch.nn.LSTM', 'nn.LSTM', (['self.x_size'], {'hidden_size': "config['hidden_size']", 'num_layers': "config['num_layers']", 'dropout': "config['dropout']", 'batch_first': '(True)'}), "(self.x_size, hidden_size=config['hidden_size'], num_layers=config[\n 'num_layers'], dropout=config['dropout'], batch_first=True)\n", (712, 843), True, 'import torch.nn as nn\n'), ((1488, 1567), 'torch.nn.utils.rnn.pack_padded_sequence', 'rnn_utils.pack_padded_sequence', (['X', 'lens'], {'batch_first': '(True)', 'enforce_sorted': '(False)'}), '(X, lens, batch_first=True, enforce_sorted=False)\n', (1518, 1567), True, 'import torch.nn.utils.rnn as rnn_utils\n'), ((1647, 1699), 'torch.nn.utils.rnn.pad_packed_sequence', 'rnn_utils.pad_packed_sequence', (['out'], {'batch_first': '(True)'}), '(out, batch_first=True)\n', (1676, 1699), True, 'import torch.nn.utils.rnn as rnn_utils\n'), ((2831, 2906), 'data_index.get_batch', 'get_batch', (['batch_idx', 'dataset', 'data_mask', 'data_lens'], {'type': 'type_', 'last': '(False)'}), '(batch_idx, dataset, data_mask, data_lens, type=type_, last=False)\n', (2840, 2906), False, 'from data_index import get_dataset, get_batch\n'), ((4882, 4939), 'os.path.exists', 'os.path.exists', (["('checkpoints/' + config['checkpoint_dir'])"], {}), "('checkpoints/' + config['checkpoint_dir'])\n", (4896, 4939), False, 'import os\n'), ((917, 926), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (924, 926), True, 'import torch.nn as nn\n'), ((940, 1000), 'torch.nn.Linear', 'nn.Linear', (["config['hidden_size']", "config['proj_hidden_size']"], {}), "(config['hidden_size'], config['proj_hidden_size'])\n", (949, 1000), True, 'import torch.nn as nn\n'), ((1014, 1023), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1021, 1023), True, 'import torch.nn as nn\n'), ((1037, 1123), 'torch.nn.Linear', 'nn.Linear', (["config['proj_hidden_size']", "(info.feature_size * config['second_split'])"], {}), "(config['proj_hidden_size'], info.feature_size * config[\n 'second_split'])\n", (1046, 1123), True, 'import torch.nn as nn\n'), ((1167, 1187), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (1179, 1187), False, 'import torch\n')] |
import pytest
import numpy as np
import discretisedfield as df
import micromagneticmodel as mm
class TestZeeman:
@pytest.fixture(autouse=True)
def _setup_calculator(self, calculator):
self.calculator = calculator
def setup(self):
p1 = (-10e-9, -5e-9, -3e-9)
p2 = (10e-9, 5e-9, 3e-9)
self.region = df.Region(p1=p1, p2=p2)
self.cell = (1e-9, 1e-9, 1e-9)
self.subregions = {'r1': df.Region(p1=(-10e-9, -5e-9, -3e-9),
p2=(10e-9, 0, 3e-9)),
'r2': df.Region(p1=(-10e-9, 0, -3e-9),
p2=(10e-9, 5e-9, 3e-9))}
def test_vector(self):
name = 'zeeman_vector'
H = (0, 0, 1e6)
Ms = 1e6
system = mm.System(name=name)
# time-independent
system.energy = mm.Zeeman(H=H)
mesh = df.Mesh(region=self.region, cell=self.cell)
system.m = df.Field(mesh, dim=3, value=(1, 1, 1), norm=Ms)
md = self.calculator.MinDriver()
md.drive(system)
value = system.m(mesh.region.random_point())
assert np.linalg.norm(np.subtract(value, (0, 0, Ms))) < 1e-3
# time-dependent - sin
system.energy = mm.Zeeman(H=H, wave='sin', f=1e9, t0=1e-12)
mesh = df.Mesh(region=self.region, cell=self.cell)
system.m = df.Field(mesh, dim=3, value=(1, 1, 1), norm=Ms)
td = self.calculator.TimeDriver()
td.drive(system, t=0.1e-9, n=20)
# time-dependent - sinc
system.energy = mm.Zeeman(H=H, wave='sinc', f=1e9, t0=0)
mesh = df.Mesh(region=self.region, cell=self.cell)
system.m = df.Field(mesh, dim=3, value=(1, 1, 1), norm=Ms)
td = self.calculator.TimeDriver()
td.drive(system, t=0.1e-9, n=20)
self.calculator.delete(system)
# time-dependent - function
def t_func(t):
if t < 1e-10:
return 1
elif t < 5e-10:
return (5e-10 - t) / 4e-10
else:
return 0
system.energy = mm.Zeeman(H=H, time_dependence=t_func, tstep=1e-13)
mesh = df.Mesh(region=self.region, cell=self.cell)
system.m = df.Field(mesh, dim=3, value=(1, 1, 1), norm=Ms)
td = self.calculator.TimeDriver()
td.drive(system, t=0.1e-9, n=20)
# time-dependent - tcl strings
tcl_strings = {}
tcl_strings['proc'] = '''proc TimeFunction { total_time } {
set Hx [expr {sin($total_time * 1e10)}]
set dHx [expr {1e10 * cos($total_time * 1e10)}]
return [list $Hx 0 0 $dHx 0 0]
}
'''
tcl_strings['energy'] = 'Oxs_ScriptUZeeman'
tcl_strings['script_args'] = 'total_time'
tcl_strings['script'] = 'TimeFunction'
system.energy = mm.Zeeman(H=H, tcl_strings=tcl_strings)
mesh = df.Mesh(region=self.region, cell=self.cell)
system.m = df.Field(mesh, dim=3, value=(1, 1, 1), norm=Ms)
td = self.calculator.TimeDriver()
td.drive(system, t=0.1e-9, n=20)
self.calculator.delete(system)
def test_dict(self):
name = 'zeeman_dict'
H = {'r1': (1e5, 0, 0), 'r2': (0, 0, 1e5)}
Ms = 1e6
system = mm.System(name=name)
system.energy = mm.Zeeman(H=H)
mesh = df.Mesh(region=self.region, cell=self.cell,
subregions=self.subregions)
system.m = df.Field(mesh, dim=3, value=(1, 1, 1), norm=Ms)
md = self.calculator.MinDriver()
md.drive(system)
assert np.linalg.norm(np.subtract(system.m['r1'].average,
(Ms, 0, 0))) < 1
assert np.linalg.norm(np.subtract(system.m['r2'].average,
(0, 0, Ms))) < 1
# time-dependent - sin
system.energy = mm.Zeeman(H=H, wave='sin', f=1e9, t0=1e-12)
mesh = df.Mesh(region=self.region, cell=self.cell,
subregions=self.subregions)
system.m = df.Field(mesh, dim=3, value=(1, 1, 1), norm=Ms)
td = self.calculator.TimeDriver()
td.drive(system, t=0.1e-9, n=20)
# time-dependent - sinc
system.energy = mm.Zeeman(H=H, wave='sinc', f=1e9, t0=0)
mesh = df.Mesh(region=self.region, cell=self.cell,
subregions=self.subregions)
system.m = df.Field(mesh, dim=3, value=(1, 1, 1), norm=Ms)
td = self.calculator.TimeDriver()
td.drive(system, t=0.1e-9, n=20)
# time-dependent - function
def t_func(t):
if t < 1e-10:
return 1
elif t < 5e-10:
return (5e-10 - t) / 4e-10
else:
return 0
system.energy = mm.Zeeman(H=H, time_dependence=t_func, tstep=1e-13)
mesh = df.Mesh(region=self.region, cell=self.cell,
subregions=self.subregions)
system.m = df.Field(mesh, dim=3, value=(1, 1, 1), norm=Ms)
td = self.calculator.TimeDriver()
td.drive(system, t=0.1e-9, n=20)
self.calculator.delete(system)
def test_field(self):
name = 'zeeman_field'
def value_fun(pos):
x, y, z = pos
if x <= 0:
return (1e6, 0, 0)
else:
return (0, 0, 1e6)
mesh = df.Mesh(region=self.region, cell=self.cell)
H = df.Field(mesh, dim=3, value=value_fun)
Ms = 1e6
system = mm.System(name=name)
system.energy = mm.Zeeman(H=H)
system.m = df.Field(mesh, dim=3, value=(0, 1, 0), norm=Ms)
md = self.calculator.MinDriver()
md.drive(system)
value = system.m((-2e-9, -2e-9, -2e-9))
assert np.linalg.norm(np.subtract(value, (Ms, 0, 0))) < 1e-3
value = system.m((2e-9, 2e-9, 2e-9))
assert np.linalg.norm(np.subtract(value, (0, 0, Ms))) < 1e-3
# time-dependent - sin
system.energy = mm.Zeeman(H=H, wave='sin', f=1e9, t0=1e-12)
mesh = df.Mesh(region=self.region, cell=self.cell)
system.m = df.Field(mesh, dim=3, value=(1, 1, 1), norm=Ms)
td = self.calculator.TimeDriver()
td.drive(system, t=0.1e-9, n=20)
# time-dependent - sinc
system.energy = mm.Zeeman(H=H, wave='sinc', f=1e9, t0=0)
mesh = df.Mesh(region=self.region, cell=self.cell)
system.m = df.Field(mesh, dim=3, value=(1, 1, 1), norm=Ms)
td = self.calculator.TimeDriver()
td.drive(system, t=0.1e-9, n=20)
# time-dependent - function
def t_func(t):
omega = 2*np.pi * 1e9
return [np.cos(omega * t), -np.sin(omega * t), 0,
np.sin(omega * t), np.cos(omega * t), 0,
0, 0, 1]
system.energy = mm.Zeeman(H=H, time_dependence=t_func, tstep=1e-13)
mesh = df.Mesh(region=self.region, cell=self.cell)
system.m = df.Field(mesh, dim=3, value=(1, 1, 1), norm=Ms)
td = self.calculator.TimeDriver()
td.drive(system, t=0.1e-9, n=20)
# time-dependent - tcl strings
tcl_strings = {}
tcl_strings['proc'] = '''proc TimeFunction { total_time } {
set PI [expr {4*atan(1.)}]
set w [expr {1e9*2*$PI}]
set ct [expr {cos($w*$total_time)}]
set mct [expr {-1*$ct}] ;# "mct" is "minus cosine (w)t"
set st [expr {sin($w*$total_time)}]
set mst [expr {-1*$st}] ;# "mst" is "minus sine (w)t"
return [list $ct $mst 0 \
$st $ct 0 \
0 0 1 \
[expr {$w*$mst}] [expr {$w*$mct}] 0 \
[expr {$w*$ct}] [expr {$w*$mst}] 0 \
0 0 0]
}'''
tcl_strings['energy'] = 'Oxs_TransformZeeman'
tcl_strings['type'] = 'general'
tcl_strings['script_args'] = 'total_time'
tcl_strings['script'] = 'TimeFunction'
system.energy = mm.Zeeman(H=H, tcl_strings=tcl_strings)
mesh = df.Mesh(region=self.region, cell=self.cell)
system.m = df.Field(mesh, dim=3, value=(1, 1, 1), norm=Ms)
td = self.calculator.TimeDriver()
td.drive(system, t=0.1e-9, n=20)
self.calculator.delete(system)
| [
"micromagneticmodel.System",
"numpy.sin",
"discretisedfield.Field",
"numpy.subtract",
"numpy.cos",
"pytest.fixture",
"discretisedfield.Region",
"micromagneticmodel.Zeeman",
"discretisedfield.Mesh"
] | [((120, 148), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (134, 148), False, 'import pytest\n'), ((344, 367), 'discretisedfield.Region', 'df.Region', ([], {'p1': 'p1', 'p2': 'p2'}), '(p1=p1, p2=p2)\n', (353, 367), True, 'import discretisedfield as df\n'), ((795, 815), 'micromagneticmodel.System', 'mm.System', ([], {'name': 'name'}), '(name=name)\n', (804, 815), True, 'import micromagneticmodel as mm\n'), ((868, 882), 'micromagneticmodel.Zeeman', 'mm.Zeeman', ([], {'H': 'H'}), '(H=H)\n', (877, 882), True, 'import micromagneticmodel as mm\n'), ((899, 942), 'discretisedfield.Mesh', 'df.Mesh', ([], {'region': 'self.region', 'cell': 'self.cell'}), '(region=self.region, cell=self.cell)\n', (906, 942), True, 'import discretisedfield as df\n'), ((962, 1009), 'discretisedfield.Field', 'df.Field', (['mesh'], {'dim': '(3)', 'value': '(1, 1, 1)', 'norm': 'Ms'}), '(mesh, dim=3, value=(1, 1, 1), norm=Ms)\n', (970, 1009), True, 'import discretisedfield as df\n'), ((1256, 1308), 'micromagneticmodel.Zeeman', 'mm.Zeeman', ([], {'H': 'H', 'wave': '"""sin"""', 'f': '(1000000000.0)', 't0': '(1e-12)'}), "(H=H, wave='sin', f=1000000000.0, t0=1e-12)\n", (1265, 1308), True, 'import micromagneticmodel as mm\n'), ((1316, 1359), 'discretisedfield.Mesh', 'df.Mesh', ([], {'region': 'self.region', 'cell': 'self.cell'}), '(region=self.region, cell=self.cell)\n', (1323, 1359), True, 'import discretisedfield as df\n'), ((1379, 1426), 'discretisedfield.Field', 'df.Field', (['mesh'], {'dim': '(3)', 'value': '(1, 1, 1)', 'norm': 'Ms'}), '(mesh, dim=3, value=(1, 1, 1), norm=Ms)\n', (1387, 1426), True, 'import discretisedfield as df\n'), ((1568, 1617), 'micromagneticmodel.Zeeman', 'mm.Zeeman', ([], {'H': 'H', 'wave': '"""sinc"""', 'f': '(1000000000.0)', 't0': '(0)'}), "(H=H, wave='sinc', f=1000000000.0, t0=0)\n", (1577, 1617), True, 'import micromagneticmodel as mm\n'), ((1625, 1668), 'discretisedfield.Mesh', 'df.Mesh', ([], {'region': 'self.region', 'cell': 'self.cell'}), '(region=self.region, cell=self.cell)\n', (1632, 1668), True, 'import discretisedfield as df\n'), ((1688, 1735), 'discretisedfield.Field', 'df.Field', (['mesh'], {'dim': '(3)', 'value': '(1, 1, 1)', 'norm': 'Ms'}), '(mesh, dim=3, value=(1, 1, 1), norm=Ms)\n', (1696, 1735), True, 'import discretisedfield as df\n'), ((2110, 2161), 'micromagneticmodel.Zeeman', 'mm.Zeeman', ([], {'H': 'H', 'time_dependence': 't_func', 'tstep': '(1e-13)'}), '(H=H, time_dependence=t_func, tstep=1e-13)\n', (2119, 2161), True, 'import micromagneticmodel as mm\n'), ((2178, 2221), 'discretisedfield.Mesh', 'df.Mesh', ([], {'region': 'self.region', 'cell': 'self.cell'}), '(region=self.region, cell=self.cell)\n', (2185, 2221), True, 'import discretisedfield as df\n'), ((2241, 2288), 'discretisedfield.Field', 'df.Field', (['mesh'], {'dim': '(3)', 'value': '(1, 1, 1)', 'norm': 'Ms'}), '(mesh, dim=3, value=(1, 1, 1), norm=Ms)\n', (2249, 2288), True, 'import discretisedfield as df\n'), ((2857, 2896), 'micromagneticmodel.Zeeman', 'mm.Zeeman', ([], {'H': 'H', 'tcl_strings': 'tcl_strings'}), '(H=H, tcl_strings=tcl_strings)\n', (2866, 2896), True, 'import micromagneticmodel as mm\n'), ((2913, 2956), 'discretisedfield.Mesh', 'df.Mesh', ([], {'region': 'self.region', 'cell': 'self.cell'}), '(region=self.region, cell=self.cell)\n', (2920, 2956), True, 'import discretisedfield as df\n'), ((2976, 3023), 'discretisedfield.Field', 'df.Field', (['mesh'], {'dim': '(3)', 'value': '(1, 1, 1)', 'norm': 'Ms'}), '(mesh, dim=3, value=(1, 1, 1), norm=Ms)\n', (2984, 3023), True, 'import discretisedfield as df\n'), ((3290, 3310), 'micromagneticmodel.System', 'mm.System', ([], {'name': 'name'}), '(name=name)\n', (3299, 3310), True, 'import micromagneticmodel as mm\n'), ((3335, 3349), 'micromagneticmodel.Zeeman', 'mm.Zeeman', ([], {'H': 'H'}), '(H=H)\n', (3344, 3349), True, 'import micromagneticmodel as mm\n'), ((3366, 3437), 'discretisedfield.Mesh', 'df.Mesh', ([], {'region': 'self.region', 'cell': 'self.cell', 'subregions': 'self.subregions'}), '(region=self.region, cell=self.cell, subregions=self.subregions)\n', (3373, 3437), True, 'import discretisedfield as df\n'), ((3480, 3527), 'discretisedfield.Field', 'df.Field', (['mesh'], {'dim': '(3)', 'value': '(1, 1, 1)', 'norm': 'Ms'}), '(mesh, dim=3, value=(1, 1, 1), norm=Ms)\n', (3488, 3527), True, 'import discretisedfield as df\n'), ((3903, 3955), 'micromagneticmodel.Zeeman', 'mm.Zeeman', ([], {'H': 'H', 'wave': '"""sin"""', 'f': '(1000000000.0)', 't0': '(1e-12)'}), "(H=H, wave='sin', f=1000000000.0, t0=1e-12)\n", (3912, 3955), True, 'import micromagneticmodel as mm\n'), ((3963, 4034), 'discretisedfield.Mesh', 'df.Mesh', ([], {'region': 'self.region', 'cell': 'self.cell', 'subregions': 'self.subregions'}), '(region=self.region, cell=self.cell, subregions=self.subregions)\n', (3970, 4034), True, 'import discretisedfield as df\n'), ((4077, 4124), 'discretisedfield.Field', 'df.Field', (['mesh'], {'dim': '(3)', 'value': '(1, 1, 1)', 'norm': 'Ms'}), '(mesh, dim=3, value=(1, 1, 1), norm=Ms)\n', (4085, 4124), True, 'import discretisedfield as df\n'), ((4266, 4315), 'micromagneticmodel.Zeeman', 'mm.Zeeman', ([], {'H': 'H', 'wave': '"""sinc"""', 'f': '(1000000000.0)', 't0': '(0)'}), "(H=H, wave='sinc', f=1000000000.0, t0=0)\n", (4275, 4315), True, 'import micromagneticmodel as mm\n'), ((4323, 4394), 'discretisedfield.Mesh', 'df.Mesh', ([], {'region': 'self.region', 'cell': 'self.cell', 'subregions': 'self.subregions'}), '(region=self.region, cell=self.cell, subregions=self.subregions)\n', (4330, 4394), True, 'import discretisedfield as df\n'), ((4437, 4484), 'discretisedfield.Field', 'df.Field', (['mesh'], {'dim': '(3)', 'value': '(1, 1, 1)', 'norm': 'Ms'}), '(mesh, dim=3, value=(1, 1, 1), norm=Ms)\n', (4445, 4484), True, 'import discretisedfield as df\n'), ((4819, 4870), 'micromagneticmodel.Zeeman', 'mm.Zeeman', ([], {'H': 'H', 'time_dependence': 't_func', 'tstep': '(1e-13)'}), '(H=H, time_dependence=t_func, tstep=1e-13)\n', (4828, 4870), True, 'import micromagneticmodel as mm\n'), ((4887, 4958), 'discretisedfield.Mesh', 'df.Mesh', ([], {'region': 'self.region', 'cell': 'self.cell', 'subregions': 'self.subregions'}), '(region=self.region, cell=self.cell, subregions=self.subregions)\n', (4894, 4958), True, 'import discretisedfield as df\n'), ((5001, 5048), 'discretisedfield.Field', 'df.Field', (['mesh'], {'dim': '(3)', 'value': '(1, 1, 1)', 'norm': 'Ms'}), '(mesh, dim=3, value=(1, 1, 1), norm=Ms)\n', (5009, 5048), True, 'import discretisedfield as df\n'), ((5412, 5455), 'discretisedfield.Mesh', 'df.Mesh', ([], {'region': 'self.region', 'cell': 'self.cell'}), '(region=self.region, cell=self.cell)\n', (5419, 5455), True, 'import discretisedfield as df\n'), ((5469, 5507), 'discretisedfield.Field', 'df.Field', (['mesh'], {'dim': '(3)', 'value': 'value_fun'}), '(mesh, dim=3, value=value_fun)\n', (5477, 5507), True, 'import discretisedfield as df\n'), ((5543, 5563), 'micromagneticmodel.System', 'mm.System', ([], {'name': 'name'}), '(name=name)\n', (5552, 5563), True, 'import micromagneticmodel as mm\n'), ((5588, 5602), 'micromagneticmodel.Zeeman', 'mm.Zeeman', ([], {'H': 'H'}), '(H=H)\n', (5597, 5602), True, 'import micromagneticmodel as mm\n'), ((5622, 5669), 'discretisedfield.Field', 'df.Field', (['mesh'], {'dim': '(3)', 'value': '(0, 1, 0)', 'norm': 'Ms'}), '(mesh, dim=3, value=(0, 1, 0), norm=Ms)\n', (5630, 5669), True, 'import discretisedfield as df\n'), ((6026, 6078), 'micromagneticmodel.Zeeman', 'mm.Zeeman', ([], {'H': 'H', 'wave': '"""sin"""', 'f': '(1000000000.0)', 't0': '(1e-12)'}), "(H=H, wave='sin', f=1000000000.0, t0=1e-12)\n", (6035, 6078), True, 'import micromagneticmodel as mm\n'), ((6086, 6129), 'discretisedfield.Mesh', 'df.Mesh', ([], {'region': 'self.region', 'cell': 'self.cell'}), '(region=self.region, cell=self.cell)\n', (6093, 6129), True, 'import discretisedfield as df\n'), ((6149, 6196), 'discretisedfield.Field', 'df.Field', (['mesh'], {'dim': '(3)', 'value': '(1, 1, 1)', 'norm': 'Ms'}), '(mesh, dim=3, value=(1, 1, 1), norm=Ms)\n', (6157, 6196), True, 'import discretisedfield as df\n'), ((6338, 6387), 'micromagneticmodel.Zeeman', 'mm.Zeeman', ([], {'H': 'H', 'wave': '"""sinc"""', 'f': '(1000000000.0)', 't0': '(0)'}), "(H=H, wave='sinc', f=1000000000.0, t0=0)\n", (6347, 6387), True, 'import micromagneticmodel as mm\n'), ((6395, 6438), 'discretisedfield.Mesh', 'df.Mesh', ([], {'region': 'self.region', 'cell': 'self.cell'}), '(region=self.region, cell=self.cell)\n', (6402, 6438), True, 'import discretisedfield as df\n'), ((6458, 6505), 'discretisedfield.Field', 'df.Field', (['mesh'], {'dim': '(3)', 'value': '(1, 1, 1)', 'norm': 'Ms'}), '(mesh, dim=3, value=(1, 1, 1), norm=Ms)\n', (6466, 6505), True, 'import discretisedfield as df\n'), ((6861, 6912), 'micromagneticmodel.Zeeman', 'mm.Zeeman', ([], {'H': 'H', 'time_dependence': 't_func', 'tstep': '(1e-13)'}), '(H=H, time_dependence=t_func, tstep=1e-13)\n', (6870, 6912), True, 'import micromagneticmodel as mm\n'), ((6929, 6972), 'discretisedfield.Mesh', 'df.Mesh', ([], {'region': 'self.region', 'cell': 'self.cell'}), '(region=self.region, cell=self.cell)\n', (6936, 6972), True, 'import discretisedfield as df\n'), ((6992, 7039), 'discretisedfield.Field', 'df.Field', (['mesh'], {'dim': '(3)', 'value': '(1, 1, 1)', 'norm': 'Ms'}), '(mesh, dim=3, value=(1, 1, 1), norm=Ms)\n', (7000, 7039), True, 'import discretisedfield as df\n'), ((8108, 8147), 'micromagneticmodel.Zeeman', 'mm.Zeeman', ([], {'H': 'H', 'tcl_strings': 'tcl_strings'}), '(H=H, tcl_strings=tcl_strings)\n', (8117, 8147), True, 'import micromagneticmodel as mm\n'), ((8164, 8207), 'discretisedfield.Mesh', 'df.Mesh', ([], {'region': 'self.region', 'cell': 'self.cell'}), '(region=self.region, cell=self.cell)\n', (8171, 8207), True, 'import discretisedfield as df\n'), ((8227, 8274), 'discretisedfield.Field', 'df.Field', (['mesh'], {'dim': '(3)', 'value': '(1, 1, 1)', 'norm': 'Ms'}), '(mesh, dim=3, value=(1, 1, 1), norm=Ms)\n', (8235, 8274), True, 'import discretisedfield as df\n'), ((440, 500), 'discretisedfield.Region', 'df.Region', ([], {'p1': '(-1e-08, -5e-09, -3e-09)', 'p2': '(1e-08, 0, 3e-09)'}), '(p1=(-1e-08, -5e-09, -3e-09), p2=(1e-08, 0, 3e-09))\n', (449, 500), True, 'import discretisedfield as df\n'), ((575, 634), 'discretisedfield.Region', 'df.Region', ([], {'p1': '(-1e-08, 0, -3e-09)', 'p2': '(1e-08, 5e-09, 3e-09)'}), '(p1=(-1e-08, 0, -3e-09), p2=(1e-08, 5e-09, 3e-09))\n', (584, 634), True, 'import discretisedfield as df\n'), ((1161, 1191), 'numpy.subtract', 'np.subtract', (['value', '(0, 0, Ms)'], {}), '(value, (0, 0, Ms))\n', (1172, 1191), True, 'import numpy as np\n'), ((3626, 3673), 'numpy.subtract', 'np.subtract', (["system.m['r1'].average", '(Ms, 0, 0)'], {}), "(system.m['r1'].average, (Ms, 0, 0))\n", (3637, 3673), True, 'import numpy as np\n'), ((3752, 3799), 'numpy.subtract', 'np.subtract', (["system.m['r2'].average", '(0, 0, Ms)'], {}), "(system.m['r2'].average, (0, 0, Ms))\n", (3763, 3799), True, 'import numpy as np\n'), ((5816, 5846), 'numpy.subtract', 'np.subtract', (['value', '(Ms, 0, 0)'], {}), '(value, (Ms, 0, 0))\n', (5827, 5846), True, 'import numpy as np\n'), ((5931, 5961), 'numpy.subtract', 'np.subtract', (['value', '(0, 0, Ms)'], {}), '(value, (0, 0, Ms))\n', (5942, 5961), True, 'import numpy as np\n'), ((6704, 6721), 'numpy.cos', 'np.cos', (['(omega * t)'], {}), '(omega * t)\n', (6710, 6721), True, 'import numpy as np\n'), ((6766, 6783), 'numpy.sin', 'np.sin', (['(omega * t)'], {}), '(omega * t)\n', (6772, 6783), True, 'import numpy as np\n'), ((6785, 6802), 'numpy.cos', 'np.cos', (['(omega * t)'], {}), '(omega * t)\n', (6791, 6802), True, 'import numpy as np\n'), ((6724, 6741), 'numpy.sin', 'np.sin', (['(omega * t)'], {}), '(omega * t)\n', (6730, 6741), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# test_recoverstats.py
#
# Copyright 2016 <NAME> <<EMAIL>>
#
import os
import shlex
import subprocess
import sys
sys.path.insert(0, os.path.abspath('..'))
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import stats
import sep
from astropy.convolution import convolve
from astropy.convolution import convolve_fft
from astropy.time import Time
from astropy.io import fits
from astropy.stats import sigma_clipped_stats
from astropy.stats import signal_to_noise_oir_ccd
from astropy.table import Table
from photutils import psf
from photutils import daofind
from imsim import simtools
import propercoadd as pc
def residuals_psf_sub_table(outtab, title=None):
plt.subplot(3,1,1)
if title is not None:
plt.title(title)
plt.scatter(np.arange(len(outtab))+1,
(outtab['flux_fit']-outtab['flux_0'])*100./outtab['flux_0'])
plt.axhline(0, ls='--', c='k')
plt.ylabel('fluxperc')
plt.xlim(0.5, len(outtab)+.5)
plt.subplot(3,1,2)
plt.scatter(np.arange(len(outtab))+1, outtab['x_fit']-outtab['x_0'])
plt.axhline(0, ls='--', c='k')
plt.ylabel('dx')
plt.xlim(0.5, len(outtab)+.5)
plt.subplot(3,1,3)
plt.scatter(np.arange(len(outtab))+1, outtab['y_fit']-outtab['y_0'])
plt.axhline(0, ls='--', c='k')
plt.ylabel('dy')
plt.xlim(0.5, len(outtab)+.5)
plt.tight_layout()
plt.show()
def compare_psf_sub(subim, pim, im, kw1s={}, kw2s={}, kw3s={}, kw4s={}):
subps = (2, 2)
cborient = 'vertical'
plt.subplot(2,2,1)
plt.imshow(pim, **kw1s)
plt.colorbar(orientation=cborient)
plt.title('Base image')
plt.subplot(2,2,2)
plt.imshow(subim, **kw2s)
plt.colorbar(orientation=cborient)
plt.title('PSF subtracted image')
#print("Subtracted image bkg-sub mean:", np.mean(subim-bkg), 'and SD:', np.std(subim-bkg))
plt.subplot(2,2,3)
plt.imshow(im, **kw3s)
plt.colorbar(orientation=cborient)
plt.title('Real noise-free images')
plt.subplot(2,2,4)
plt.imshow(pim-subim, **kw4s)
plt.colorbar(orientation=cborient)
plt.title('PSF images')
plt.show()
N = 1024 # side
FWHM = 10
test_dir = os.path.abspath('./test_images/recover_stats')
x = np.linspace(5*FWHM, N-5*FWHM, 10)
y = np.linspace(5*FWHM, N-5*FWHM, 10)
xy = simtools.cartesian_product([x, y])
t = Time.now()
SN = 100. # SN para poder medir psf
weights = list(np.linspace(1, 10000, len(xy)))
m = simtools.delta_point(N, center=False, xy=xy, weights=weights)
im = simtools.image(m, N, t_exp=1, FWHM=FWHM, SN=SN, bkg_pdf='poisson')
sim = pc.SingleImage(im)
sim.subtract_back()
srcs = sep.extract(sim.bkg_sub_img, thresh=12*sim.bkg.globalrms)
posflux = srcs[['x','y', 'flux']]
psf_guess = psf.IntegratedGaussianPRF(flux=1, sigma=8)
psf_guess.flux.fixed = False
psf_guess.x_0.fixed = False
psf_guess.y_0.fixed = False
psf_guess.x_0.sigma = True
fitshape = (64,64)
intab = Table(names=['x_0', 'y_0', 'flux_0'], data=posflux)
#subimi = psf.subtract_psf(sim.bkg_sub_img, psf_guess, posflux)
outtabi = psf.psf_photometry(sim.bkg_sub_img, intab, psf_guess, fitshape,
store_fit_info=True)
outtabi['flux_input'] = intab['flux_0']
# with daofind there are lots of garbage
found = daofind(sim.bkg_sub_img, threshold=5*sim.bkg.globalrms, fwhm=10,
exclude_border=True)
intab2 = Table(names=['x_0', 'y_0', 'flux_0'], data=[found['xcentroid'],
found['ycentroid'], found['flux']])
outtabi2 = psf.psf_photometry(sim.bkg_sub_img, intab2, psf_guess, fitshape,
store_fit_info=True)
outtabi2['flux_input'] = intab2['flux_0']
| [
"propercoadd.SingleImage",
"imsim.simtools.delta_point",
"astropy.table.Table",
"matplotlib.pyplot.ylabel",
"photutils.daofind",
"matplotlib.pyplot.imshow",
"imsim.simtools.cartesian_product",
"photutils.psf.IntegratedGaussianPRF",
"matplotlib.pyplot.axhline",
"numpy.linspace",
"imsim.simtools.i... | [((2203, 2249), 'os.path.abspath', 'os.path.abspath', (['"""./test_images/recover_stats"""'], {}), "('./test_images/recover_stats')\n", (2218, 2249), False, 'import os\n'), ((2255, 2294), 'numpy.linspace', 'np.linspace', (['(5 * FWHM)', '(N - 5 * FWHM)', '(10)'], {}), '(5 * FWHM, N - 5 * FWHM, 10)\n', (2266, 2294), True, 'import numpy as np\n'), ((2293, 2332), 'numpy.linspace', 'np.linspace', (['(5 * FWHM)', '(N - 5 * FWHM)', '(10)'], {}), '(5 * FWHM, N - 5 * FWHM, 10)\n', (2304, 2332), True, 'import numpy as np\n'), ((2332, 2366), 'imsim.simtools.cartesian_product', 'simtools.cartesian_product', (['[x, y]'], {}), '([x, y])\n', (2358, 2366), False, 'from imsim import simtools\n'), ((2372, 2382), 'astropy.time.Time.now', 'Time.now', ([], {}), '()\n', (2380, 2382), False, 'from astropy.time import Time\n'), ((2473, 2534), 'imsim.simtools.delta_point', 'simtools.delta_point', (['N'], {'center': '(False)', 'xy': 'xy', 'weights': 'weights'}), '(N, center=False, xy=xy, weights=weights)\n', (2493, 2534), False, 'from imsim import simtools\n'), ((2540, 2606), 'imsim.simtools.image', 'simtools.image', (['m', 'N'], {'t_exp': '(1)', 'FWHM': 'FWHM', 'SN': 'SN', 'bkg_pdf': '"""poisson"""'}), "(m, N, t_exp=1, FWHM=FWHM, SN=SN, bkg_pdf='poisson')\n", (2554, 2606), False, 'from imsim import simtools\n'), ((2614, 2632), 'propercoadd.SingleImage', 'pc.SingleImage', (['im'], {}), '(im)\n', (2628, 2632), True, 'import propercoadd as pc\n'), ((2661, 2720), 'sep.extract', 'sep.extract', (['sim.bkg_sub_img'], {'thresh': '(12 * sim.bkg.globalrms)'}), '(sim.bkg_sub_img, thresh=12 * sim.bkg.globalrms)\n', (2672, 2720), False, 'import sep\n'), ((2766, 2808), 'photutils.psf.IntegratedGaussianPRF', 'psf.IntegratedGaussianPRF', ([], {'flux': '(1)', 'sigma': '(8)'}), '(flux=1, sigma=8)\n', (2791, 2808), False, 'from photutils import psf\n'), ((2950, 3001), 'astropy.table.Table', 'Table', ([], {'names': "['x_0', 'y_0', 'flux_0']", 'data': 'posflux'}), "(names=['x_0', 'y_0', 'flux_0'], data=posflux)\n", (2955, 3001), False, 'from astropy.table import Table\n'), ((3077, 3165), 'photutils.psf.psf_photometry', 'psf.psf_photometry', (['sim.bkg_sub_img', 'intab', 'psf_guess', 'fitshape'], {'store_fit_info': '(True)'}), '(sim.bkg_sub_img, intab, psf_guess, fitshape,\n store_fit_info=True)\n', (3095, 3165), False, 'from photutils import psf\n'), ((3256, 3347), 'photutils.daofind', 'daofind', (['sim.bkg_sub_img'], {'threshold': '(5 * sim.bkg.globalrms)', 'fwhm': '(10)', 'exclude_border': '(True)'}), '(sim.bkg_sub_img, threshold=5 * sim.bkg.globalrms, fwhm=10,\n exclude_border=True)\n', (3263, 3347), False, 'from photutils import daofind\n'), ((3355, 3459), 'astropy.table.Table', 'Table', ([], {'names': "['x_0', 'y_0', 'flux_0']", 'data': "[found['xcentroid'], found['ycentroid'], found['flux']]"}), "(names=['x_0', 'y_0', 'flux_0'], data=[found['xcentroid'], found[\n 'ycentroid'], found['flux']])\n", (3360, 3459), False, 'from astropy.table import Table\n'), ((3470, 3559), 'photutils.psf.psf_photometry', 'psf.psf_photometry', (['sim.bkg_sub_img', 'intab2', 'psf_guess', 'fitshape'], {'store_fit_info': '(True)'}), '(sim.bkg_sub_img, intab2, psf_guess, fitshape,\n store_fit_info=True)\n', (3488, 3559), False, 'from photutils import psf\n'), ((184, 205), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (199, 205), False, 'import os\n'), ((738, 758), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(1)'], {}), '(3, 1, 1)\n', (749, 758), True, 'import matplotlib.pyplot as plt\n'), ((923, 953), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'ls': '"""--"""', 'c': '"""k"""'}), "(0, ls='--', c='k')\n", (934, 953), True, 'import matplotlib.pyplot as plt\n'), ((958, 980), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""fluxperc"""'], {}), "('fluxperc')\n", (968, 980), True, 'import matplotlib.pyplot as plt\n'), ((1020, 1040), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(2)'], {}), '(3, 1, 2)\n', (1031, 1040), True, 'import matplotlib.pyplot as plt\n'), ((1116, 1146), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'ls': '"""--"""', 'c': '"""k"""'}), "(0, ls='--', c='k')\n", (1127, 1146), True, 'import matplotlib.pyplot as plt\n'), ((1151, 1167), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""dx"""'], {}), "('dx')\n", (1161, 1167), True, 'import matplotlib.pyplot as plt\n'), ((1207, 1227), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(3)', '(1)', '(3)'], {}), '(3, 1, 3)\n', (1218, 1227), True, 'import matplotlib.pyplot as plt\n'), ((1303, 1333), 'matplotlib.pyplot.axhline', 'plt.axhline', (['(0)'], {'ls': '"""--"""', 'c': '"""k"""'}), "(0, ls='--', c='k')\n", (1314, 1333), True, 'import matplotlib.pyplot as plt\n'), ((1338, 1354), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""dy"""'], {}), "('dy')\n", (1348, 1354), True, 'import matplotlib.pyplot as plt\n'), ((1394, 1412), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1410, 1412), True, 'import matplotlib.pyplot as plt\n'), ((1417, 1427), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1425, 1427), True, 'import matplotlib.pyplot as plt\n'), ((1553, 1573), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (1564, 1573), True, 'import matplotlib.pyplot as plt\n'), ((1576, 1599), 'matplotlib.pyplot.imshow', 'plt.imshow', (['pim'], {}), '(pim, **kw1s)\n', (1586, 1599), True, 'import matplotlib.pyplot as plt\n'), ((1604, 1638), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'orientation': 'cborient'}), '(orientation=cborient)\n', (1616, 1638), True, 'import matplotlib.pyplot as plt\n'), ((1643, 1666), 'matplotlib.pyplot.title', 'plt.title', (['"""Base image"""'], {}), "('Base image')\n", (1652, 1666), True, 'import matplotlib.pyplot as plt\n'), ((1672, 1692), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (1683, 1692), True, 'import matplotlib.pyplot as plt\n'), ((1695, 1720), 'matplotlib.pyplot.imshow', 'plt.imshow', (['subim'], {}), '(subim, **kw2s)\n', (1705, 1720), True, 'import matplotlib.pyplot as plt\n'), ((1725, 1759), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'orientation': 'cborient'}), '(orientation=cborient)\n', (1737, 1759), True, 'import matplotlib.pyplot as plt\n'), ((1764, 1797), 'matplotlib.pyplot.title', 'plt.title', (['"""PSF subtracted image"""'], {}), "('PSF subtracted image')\n", (1773, 1797), True, 'import matplotlib.pyplot as plt\n'), ((1898, 1918), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (1909, 1918), True, 'import matplotlib.pyplot as plt\n'), ((1921, 1943), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im, **kw3s)\n', (1931, 1943), True, 'import matplotlib.pyplot as plt\n'), ((1948, 1982), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'orientation': 'cborient'}), '(orientation=cborient)\n', (1960, 1982), True, 'import matplotlib.pyplot as plt\n'), ((1987, 2022), 'matplotlib.pyplot.title', 'plt.title', (['"""Real noise-free images"""'], {}), "('Real noise-free images')\n", (1996, 2022), True, 'import matplotlib.pyplot as plt\n'), ((2028, 2048), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (2039, 2048), True, 'import matplotlib.pyplot as plt\n'), ((2051, 2082), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(pim - subim)'], {}), '(pim - subim, **kw4s)\n', (2061, 2082), True, 'import matplotlib.pyplot as plt\n'), ((2085, 2119), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'orientation': 'cborient'}), '(orientation=cborient)\n', (2097, 2119), True, 'import matplotlib.pyplot as plt\n'), ((2124, 2147), 'matplotlib.pyplot.title', 'plt.title', (['"""PSF images"""'], {}), "('PSF images')\n", (2133, 2147), True, 'import matplotlib.pyplot as plt\n'), ((2152, 2162), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2160, 2162), True, 'import matplotlib.pyplot as plt\n'), ((791, 807), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (800, 807), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import os.path as osp
from unittest import TestCase
from datumaro.components.project import Project
from datumaro.components.extractor import Extractor, DatasetItem
from datumaro.util.test_utils import TestDir
from datumaro.util.image import save_image
class ImageDirFormatTest(TestCase):
class TestExtractor(Extractor):
def __iter__(self):
return iter([
DatasetItem(id=1, image=np.ones((10, 6, 3))),
DatasetItem(id=2, image=np.ones((5, 4, 3))),
])
def test_can_load(self):
with TestDir() as test_dir:
source_dataset = self.TestExtractor()
for item in source_dataset:
save_image(osp.join(test_dir.path, '%s.jpg' % item.id),
item.image)
project = Project.import_from(test_dir.path, 'image_dir')
parsed_dataset = project.make_dataset()
self.assertListEqual(
sorted(source_dataset.subsets()),
sorted(parsed_dataset.subsets()),
)
self.assertEqual(len(source_dataset), len(parsed_dataset))
for subset_name in source_dataset.subsets():
source_subset = source_dataset.get_subset(subset_name)
parsed_subset = parsed_dataset.get_subset(subset_name)
self.assertEqual(len(source_subset), len(parsed_subset))
for idx, (item_a, item_b) in enumerate(
zip(source_subset, parsed_subset)):
self.assertEqual(item_a, item_b, str(idx))
self.assertEqual(
source_dataset.categories(),
parsed_dataset.categories()) | [
"datumaro.util.test_utils.TestDir",
"datumaro.components.project.Project.import_from",
"os.path.join",
"numpy.ones"
] | [((583, 592), 'datumaro.util.test_utils.TestDir', 'TestDir', ([], {}), '()\n', (590, 592), False, 'from datumaro.util.test_utils import TestDir\n'), ((824, 871), 'datumaro.components.project.Project.import_from', 'Project.import_from', (['test_dir.path', '"""image_dir"""'], {}), "(test_dir.path, 'image_dir')\n", (843, 871), False, 'from datumaro.components.project import Project\n'), ((724, 767), 'os.path.join', 'osp.join', (['test_dir.path', "('%s.jpg' % item.id)"], {}), "(test_dir.path, '%s.jpg' % item.id)\n", (732, 767), True, 'import os.path as osp\n'), ((442, 461), 'numpy.ones', 'np.ones', (['(10, 6, 3)'], {}), '((10, 6, 3))\n', (449, 461), True, 'import numpy as np\n'), ((504, 522), 'numpy.ones', 'np.ones', (['(5, 4, 3)'], {}), '((5, 4, 3))\n', (511, 522), True, 'import numpy as np\n')] |
import corner as triangle
import numpy as np
from matplotlib import rcParams
run_name='Mtheory_nax20_DM_run1'
chain=np.load(run_name+'.npy')
nwalkers, nsteps,ndim = np.shape(chain)
burnin = nsteps/4
# Make sample chain removing burnin
combinedUSE=chain[:,burnin:,:].reshape((-1,ndim))
# Priors
lFL3min,lFL3max=100.,115.
sminl,sminu=10.,30.
smaxl,smaxu=30.,100.
Nmin,Nmax=0.5,1.
betamin,betamax=0.,1.
#################################
# Plotting fonts
###############################
F1 = 20 # Axes font size
F2 = 20 # Legend font size
line = 1.5 # Line width
# Setting the font structure
rc = rcParams # Font structure is called rc now
rc['text.usetex'] = True # Tex fonts
rc['font.family'] = 'serif'
rc['font.serif'].insert(0,'cm') # Default font is computer modern for latex
rc['font.size'] = F1
rc['xtick.labelsize'] = 'small'
rc['ytick.labelsize'] = 'small'
rc['legend.fontsize'] = F2
##############################
# Binning
#############################
bins=20
# Linear binning for linear prior
lFbins=np.linspace(lFL3min,lFL3max,num=bins)
sminbins=np.linspace(sminl,sminu,num=bins)
smaxbins=np.linspace(smaxl,smaxu,num=bins)
Nbins=np.linspace(Nmin,Nmax,num=bins)
betabins=np.linspace(betamin,betamax,num=bins)
#############################################
# Triangle plot: show 1 and 2 sigma levels following triangle documentation
###########################################
combinedCOL='#7E1946'
fig2 = triangle.corner(combinedUSE, labels=[r'$\log_{10}F\Lambda^3$',r'$s_{\rm min}$',r'$s_{\rm max}$',r'$\widetilde{N}$',r'$\beta_\mathcal{M}$'],
color=combinedCOL,smooth1d=2,smooth=2.,plot_datapoints=False,
levels=(1-np.exp(-0.5),1-np.exp(-2.)),
density=True,range=[[lFL3min,lFL3max],[sminl,sminu],[smaxl,smaxu],[Nmin,Nmax],[betamin,betamax]],
bins=[lFbins,sminbins,smaxbins,Nbins,betabins])
fig2.savefig('Plots/'+run_name+"_triangle.pdf")
fig2.savefig('Plots/'+run_name+"_triangle.png") | [
"numpy.exp",
"numpy.shape",
"numpy.load",
"numpy.linspace"
] | [((117, 143), 'numpy.load', 'np.load', (["(run_name + '.npy')"], {}), "(run_name + '.npy')\n", (124, 143), True, 'import numpy as np\n'), ((166, 181), 'numpy.shape', 'np.shape', (['chain'], {}), '(chain)\n', (174, 181), True, 'import numpy as np\n'), ((1021, 1060), 'numpy.linspace', 'np.linspace', (['lFL3min', 'lFL3max'], {'num': 'bins'}), '(lFL3min, lFL3max, num=bins)\n', (1032, 1060), True, 'import numpy as np\n'), ((1068, 1103), 'numpy.linspace', 'np.linspace', (['sminl', 'sminu'], {'num': 'bins'}), '(sminl, sminu, num=bins)\n', (1079, 1103), True, 'import numpy as np\n'), ((1111, 1146), 'numpy.linspace', 'np.linspace', (['smaxl', 'smaxu'], {'num': 'bins'}), '(smaxl, smaxu, num=bins)\n', (1122, 1146), True, 'import numpy as np\n'), ((1151, 1184), 'numpy.linspace', 'np.linspace', (['Nmin', 'Nmax'], {'num': 'bins'}), '(Nmin, Nmax, num=bins)\n', (1162, 1184), True, 'import numpy as np\n'), ((1192, 1231), 'numpy.linspace', 'np.linspace', (['betamin', 'betamax'], {'num': 'bins'}), '(betamin, betamax, num=bins)\n', (1203, 1231), True, 'import numpy as np\n'), ((1643, 1655), 'numpy.exp', 'np.exp', (['(-0.5)'], {}), '(-0.5)\n', (1649, 1655), True, 'import numpy as np\n'), ((1658, 1670), 'numpy.exp', 'np.exp', (['(-2.0)'], {}), '(-2.0)\n', (1664, 1670), True, 'import numpy as np\n')] |
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
def slidingCoefficient(longSlipValue, latSlipValue, asymptoteValue, asymptoteSlipLong, asymptoteLatSlip):
combinedSlip = np.sqrt(latSlipValue**2+longSlipValue**2)
if (combinedSlip == 0):
return 0;
if (longSlipValue > 0):
k = latSlipValue / longSlipValue
limitx = (asymptoteSlipLong * asymptoteLatSlip) / np.sqrt(asymptoteLatSlip**2 + asymptoteSlipLong ** 2 * k ** 2)
limity = k * limitx
limitTotal = np.sqrt(limitx**2+limity**2)
if (combinedSlip < limitTotal):
return (combinedSlip / limitTotal) * asymptoteValue
return asymptoteValue
else:
if (latSlipValue < asymptoteLatSlip):
return (latSlipValue / asymptoteLatSlip) * asymptoteValue
return asymptoteValue
def adhesion(slipValue, extremumValue, extremumSlip, asymptoteValue, asymptoteSlip):
if (slipValue > asymptoteSlip):
return 0.0
slippingValueAtExtremum = (asymptoteValue / asymptoteSlip) * extremumSlip
AdhessionValueAtExtremum = extremumValue - slippingValueAtExtremum
if (slipValue < extremumSlip):
return (AdhessionValueAtExtremum / extremumSlip) * slipValue
return (( - AdhessionValueAtExtremum) / (asymptoteSlip - extremumSlip)) \
* (slipValue - extremumSlip) + AdhessionValueAtExtremum;
fig = plt.figure()
ax = fig.gca(projection='3d')
X = np.arange(0.0, 0.9, 0.01)
Y = np.arange(0.0, 90, 1)
xs = np.zeros(len(X)*len(Y))
ys = np.zeros(len(X)*len(Y))
zs = np.zeros(len(X)*len(Y))
c = ["" for x in range(len(X)*len(Y))]
Z = np.zeros((len(X),len(Y)))
for x in range(len(X)):
for y in range(len(Y)):
xs[x*len(Y)+y] = X[x]
ys[x*len(Y)+y] = Y[y]
adhesionlong = adhesion(X[x], 1.0, 0.2, 0.75, 0.4)
adhesionlat = adhesion(Y[y], 1.0, 20, 0.75, 40)
value = slidingCoefficient(X[x], Y[y], 0.75, 0.4, 40) + np.sqrt(adhesionlong**2+adhesionlat**2)
zs[x*len(Y)+y] = value
c[x*len(Y)+y] = 'b' if value < 0.75 else 'r'
ax.scatter(xs, ys, zs, s = 1, c = c)
plt.show()
| [
"matplotlib.pyplot.figure",
"numpy.sqrt",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((1559, 1571), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1569, 1571), True, 'import matplotlib.pyplot as plt\n'), ((1607, 1632), 'numpy.arange', 'np.arange', (['(0.0)', '(0.9)', '(0.01)'], {}), '(0.0, 0.9, 0.01)\n', (1616, 1632), True, 'import numpy as np\n'), ((1637, 1658), 'numpy.arange', 'np.arange', (['(0.0)', '(90)', '(1)'], {}), '(0.0, 90, 1)\n', (1646, 1658), True, 'import numpy as np\n'), ((2269, 2279), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2277, 2279), True, 'import matplotlib.pyplot as plt\n'), ((307, 354), 'numpy.sqrt', 'np.sqrt', (['(latSlipValue ** 2 + longSlipValue ** 2)'], {}), '(latSlipValue ** 2 + longSlipValue ** 2)\n', (314, 354), True, 'import numpy as np\n'), ((638, 672), 'numpy.sqrt', 'np.sqrt', (['(limitx ** 2 + limity ** 2)'], {}), '(limitx ** 2 + limity ** 2)\n', (645, 672), True, 'import numpy as np\n'), ((524, 588), 'numpy.sqrt', 'np.sqrt', (['(asymptoteLatSlip ** 2 + asymptoteSlipLong ** 2 * k ** 2)'], {}), '(asymptoteLatSlip ** 2 + asymptoteSlipLong ** 2 * k ** 2)\n', (531, 588), True, 'import numpy as np\n'), ((2107, 2152), 'numpy.sqrt', 'np.sqrt', (['(adhesionlong ** 2 + adhesionlat ** 2)'], {}), '(adhesionlong ** 2 + adhesionlat ** 2)\n', (2114, 2152), True, 'import numpy as np\n')] |
from __future__ import print_function
from __future__ import division
from . import _C
import numpy as np
import random
from scipy.stats import t
from copy import copy, deepcopy
from fuzzytools import numba as ftnumba
from . import flux_magnitude as flux_magnitude
OBS_NOISE_RANGE = 1
CHECK = _C.CHECK
MIN_POINTS_LIGHTCURVE_DEFINITION = _C.MIN_POINTS_LIGHTCURVE_DEFINITION
CADENCE_THRESHOLD = _C.CADENCE_THRESHOLD
EPS = _C.EPS
RESET_TIME_OFFSET = True
SERIAL_CHAR = _C.SERIAL_CHAR
BYPASS_PROB_WINDOW = 0
BYPASS_PROB_DROPOUT = 0
BYPASS_PROB_OBS = 0
DS_MODE = {'random':1.}
MIN_WINDOW_LENGTH_FRAC = 75/100
DF = 2 # 1 2 5 np.inf
OBSE_STD_SCALE = 1/2
DS_PROB = 10/100
BYPASS_PROB = .0
###################################################################################################################################################
def diff_vector(x,
uses_prepend=True,
prepended_value=None,
):
if len(x)==0:
return x
if uses_prepend:
x0 = x[0] if prepended_value is None else prepended_value
new_x = np.concatenate([x0[None], x], axis=0)
else:
new_x = x
dx = new_x[1:]-new_x[:-1]
return dx
def get_new_noisy_obs(obs, obse, obs_min_lim,
std_scale=OBSE_STD_SCALE,
df=DF,
obs_noise_range=OBS_NOISE_RANGE,
):
assert df>=0
dtype = obs.dtype
std = obse*std_scale
if df==np.inf:
new_obs = np.random.standard_normal(size=len(obs)).astype(dtype)*std+obs
else:
new_obs = np.random.standard_t(df, size=len(obs)).astype(dtype)*std+obs
bar_size = (1.645*2)*obse # for .95 percentile used in plot
min_lim = obs-bar_size*obs_noise_range/2
max_lim = obs+bar_size*obs_noise_range/2
new_obs = np.clip(new_obs, min_lim, max_lim)
new_obs = np.clip(new_obs, obs_min_lim, None)
return new_obs
###################################################################################################################################################
class SubLCO():
'''
Dataclass object used to store an astronomical light curve
'''
def __init__(self, days, obs, obse,
y:int=None,
dtype=np.float32,
flux_type=True,
):
self.days = days
self.obs = obs
self.obse = obse
self.y = y
self.dtype = dtype
self.flux_type = flux_type
self.reset()
def reset(self):
self.set_values(self.days, self.obs, self.obse)
self.set_synthetic_mode(None)
def convert_to_magnitude(self):
if self.flux_type:
flux = copy(self.obs)
flux_error = copy(self.obse)
self._set_obs(flux_magnitude.get_magnitude_from_flux(flux))
self._set_obse(flux_magnitude.get_magnitude_error_from_flux(flux, flux_error))
self.flux_type = False
else:
pass
def convert_to_flux(self):
if self.flux_type:
pass
else:
mag = copy(self.obs)
mag_error = copy(self.obse)
self._set_obs(flux_magnitude.get_flux_from_magnitude(mag))
self._set_obse(flux_magnitude.get_flux_error_from_magnitude(mag, mag_error))
self.flux_type = True
def get_synthetic_mode(self):
return self.synthetic_mode
def set_synthetic_mode(self, synthetic_mode):
self.synthetic_mode = synthetic_mode
def is_synthetic(self):
return not self.synthetic_mode is None
def set_values(self, days, obs, obse):
'''
Always use this method to set new values!
'''
assert len(days)==len(obs)
assert len(days)==len(obse)
tdays = copy(days).astype(self.dtype) if isinstance(days, np.ndarray) else np.array(days, dtype=self.dtype)
tobs = copy(obs).astype(self.dtype) if isinstance(obs, np.ndarray) else np.array(obs, dtype=self.dtype)
tobse = copy(obse).astype(self.dtype) if isinstance(obse, np.ndarray) else np.array(obse, dtype=self.dtype)
self._set_days(tdays)
self._set_obs(tobs)
self._set_obse(tobse)
def _set_days(self, days):
assert len(days.shape)==1
if CHECK:
assert np.all((diff_vector(days, uses_prepend=False)>0)) # check if obs-days are in order
self.days = days
def _set_obs(self, obs):
assert len(obs.shape)==1
if CHECK:
assert np.all(obs>=0) # check all obs-levels are positive
self.obs = obs
def _set_obse(self, obse):
assert len(obse.shape)==1
if CHECK:
assert np.all(obse>=0) # check all obs-errors are positive
self.obse = obse
def add_day_values(self, values):
'''
This method overrides information!
Always use this method to add values
calcule d_days again
'''
assert len(self)==len(values)
new_days = self.days+values
self.days = new_days # bypass _set_days() because non-sorted asumption
valid_indexs = np.argsort(new_days) # must sort before the values to mantain sequenciality
self.apply_valid_indexs_to_attrs(valid_indexs) # apply valid indexs to all
def add_obs_values(self, values):
'''
This method overrides information!
Always use this method to add values
calcule d_obs again
'''
assert len(self)==len(values)
new_obs = self.obs+values
self._set_obs(new_obs)
def apply_data_augmentation(self,
ds_mode=DS_MODE,
ds_prob=DS_PROB,
obs_min_lim=0,
min_valid_length=MIN_POINTS_LIGHTCURVE_DEFINITION,
min_window_length_frac=MIN_WINDOW_LENGTH_FRAC,
bypass_prob_window=BYPASS_PROB_WINDOW,
bypass_prob_dropout=BYPASS_PROB_DROPOUT,
std_scale=OBSE_STD_SCALE,
df=DF,
obs_noise_range=OBS_NOISE_RANGE,
bypass_prob_obs=BYPASS_PROB_OBS,
bypass_prob=BYPASS_PROB,
):
if random.random()>bypass_prob:
self.apply_downsampling_window(
ds_mode=ds_mode,
ds_prob=ds_prob,
min_valid_length=min_valid_length,
min_window_length_frac=min_window_length_frac,
bypass_prob_window=bypass_prob_window,
bypass_prob_dropout=bypass_prob_dropout,
)
self.add_obs_noise_gaussian(obs_min_lim,
std_scale=std_scale,
df=df,
obs_noise_range=obs_noise_range,
bypass_prob_obs=bypass_prob_obs,
)
return
def apply_downsampling_window(self,
ds_mode=DS_MODE,
ds_prob=DS_PROB,
min_valid_length=MIN_POINTS_LIGHTCURVE_DEFINITION,
min_window_length_frac=MIN_WINDOW_LENGTH_FRAC,
bypass_prob_window=BYPASS_PROB_WINDOW,
bypass_prob_dropout=BYPASS_PROB_DROPOUT,
):
if len(self)<=min_valid_length:
return
valid_mask = np.ones((len(self)), dtype=np.bool)
### mask
if random.random()>bypass_prob_window:
if ds_mode is None or len(ds_mode)==0:
ds_mode = {'none':1}
keys = list(ds_mode.keys())
mode = np.random.choice(keys, p=[ds_mode[k] for k in keys])
window_length = max(min_valid_length, int(min_window_length_frac*len(self)))
if mode=='none':
pass
elif mode=='left':
valid_mask[:] = False
new_length = random.randint(window_length, len(self)) # [a,b]
valid_mask[:new_length] = True
elif mode=='random':
valid_mask[:] = False
new_length = random.randint(window_length, len(self)) # [a,b]
index = random.randint(0, len(self)-new_length) # [a,b]
valid_mask[index:index+new_length] = True
else:
raise Exception(f'no mode {mode}')
### random dropout
if random.random()>bypass_prob_dropout:
assert ds_prob>=0 and ds_prob<=1
if ds_prob>0:
ber_valid_mask = ftnumba.bernoulli(1-ds_prob, len(self))
valid_mask = valid_mask & ber_valid_mask
# print(valid_mask, ber_valid_mask)
if valid_mask.sum()<min_valid_length: # extra case. If by change the mask implies a very short curve
valid_mask = np.zeros((len(self)), dtype=np.bool)
valid_mask[:min_valid_length] = True
valid_mask = valid_mask[np.random.permutation(len(valid_mask))]
### calcule again as the original values changed
self.apply_valid_indexs_to_attrs(valid_mask)
return
def add_obs_noise_gaussian(self, obs_min_lim:float,
std_scale=OBSE_STD_SCALE,
df=DF,
obs_noise_range=OBS_NOISE_RANGE,
bypass_prob_obs=BYPASS_PROB_OBS,
):
'''
This method overrides information!
'''
if std_scale==0:
return
if random.random()>bypass_prob_obs:
obs_values = get_new_noisy_obs(self.obs, self.obse, obs_min_lim,
std_scale,
df,
obs_noise_range,
)
self.add_obs_values(obs_values-self.obs)
return
def apply_valid_indexs_to_attrs(self, valid_indexs):
'''
Be careful, this method can remove info
calcule d_days again
calcule d_obs again
fixme: this function is not opimized... specially due the d_days and that kind of variables
'''
original_len = len(self)
for key in self.__dict__.keys():
x = self.__dict__[key]
if isinstance(x, np.ndarray): # apply same mask to all in the object
assert len(x.shape)==1 # 1D
assert original_len==len(x), f'{key} {original_len}=={len(x)}'
new_x = x[valid_indexs]
setattr(self, key, new_x)
def get_valid_indexs_max_day(self, max_day):
return self.days<=max_day
def clip_attrs_given_max_day(self, max_day):
'''
Be careful, this method remove info!
'''
valid_indexs = self.get_valid_indexs_max_day(max_day)
self.apply_valid_indexs_to_attrs(valid_indexs)
def get_valid_indexs_max_duration(self, max_duration):
return self.days-self.get_first_day()<=max_duration
def clip_attrs_given_max_duration(self, max_duration):
'''
Be careful, this method remove info!
'''
valid_indexs = self.get_valid_indexs_max_duration(max_duration)
self.apply_valid_indexs_to_attrs(valid_indexs)
def get_x(self):
attrs = ['days', 'obs', 'obse']
return self.get_custom_x(attrs)
def get_attr(self, attr:str):
return getattr(self, attr)
def get_custom_x(self, attrs:list):
values = [self.get_attr(attr)[...,None] for attr in attrs]
x = np.concatenate(values, axis=-1)
return x
def get_first_day(self):
return self.days[0]
def get_last_day(self):
return self.days[-1]
def get_days_duration(self):
if len(self)==0:
return None
first_day = self.get_first_day()
last_day = self.get_last_day()
assert last_day>=first_day
return last_day-first_day
def copy(self):
return copy(self)
def __copy__(self):
new_sublco = SubLCO(
copy(self.days),
copy(self.obs),
copy(self.obse),
self.y,
self.dtype,
)
new_sublco.set_synthetic_mode(self.get_synthetic_mode())
for key in self.__dict__.keys():
if key in ['days', 'obs', 'obse']:
continue
v = self.__dict__[key]
if isinstance(v, np.ndarray):
setattr(new_sublco, key, copy(v))
return new_sublco
def __len__(self):
l = len(self.days)
assert l==len(self.obs)
assert l==len(self.obse)
return l
def __repr__(self):
txt = f'[d={self.days}{self.days.dtype}'
txt += f'; o={self.obs}{self.obs.dtype}'
txt += f'; oe={self.obse}{self.obse.dtype}]'
return txt
def clean_small_cadence(self,
dt=CADENCE_THRESHOLD,
mode='expectation',
):
ddict = {}
i = 0
while i<len(self.days):
day = self.days[i]
valid_indexs = np.where((self.days>=day) & (self.days<day+dt))[0]
ddict[day] = valid_indexs
i += len(valid_indexs)
new_days = []
new_obs = []
new_obse = []
for k in ddict.keys():
if mode=='mean':
new_days.append(np.mean(self.days[ddict[k]]))
new_obs.append(np.mean(self.obs[ddict[k]]))
new_obse.append(np.mean(self.obse[ddict[k]]))
elif mode=='min_obse':
i = np.argmin(self.obse[ddict[k]])
new_days.append(self.days[ddict[k]][i])
new_obs.append(self.obs[ddict[k]][i])
new_obse.append(self.obse[ddict[k]][i])
elif mode=='expectation':
obse_exp = np.exp(-np.log(self.obse[ddict[k]]+EPS))
assert len(np.where(obse_exp==np.inf)[0])==0
dist = obse_exp/obse_exp.sum()
new_days.append(np.sum(self.days[ddict[k]]*dist))
new_obs.append(np.sum(self.obs[ddict[k]]*dist))
new_obse.append(np.sum(self.obse[ddict[k]]*dist))
else:
raise Exception(f'no mode {mode}')
self.set_values(new_days, new_obs, new_obse)
def get_snr(self,
alpha=10,
beta=1e-10,
max_len=None,
):
if len(self)==0:
return np.nan
else:
max_len = len(self) if max_len is None else max_len
snr = (self.obs[:max_len]**2)/(alpha*self.obse[:max_len]**2+beta)
return np.mean(snr)
def get_min_brightness(self,
return_idx=False,
):
idx = None,
min_brightness = np.nan
if len(self)>0:
if self.flux_type:
idx = np.argmin(self.obs)
else:
idx = np.argmax(self.obs)
min_brightness = self.obs[idx]
if return_idx:
return min_brightness, idx
else:
return min_brightness
def get_max_brightness(self,
return_idx=False,
):
idx = None,
max_brightness = np.nan
if len(self)>0:
if self.flux_type:
idx = np.argmax(self.obs)
else:
idx = np.argmin(self.obs)
max_brightness = self.obs[idx]
if return_idx:
return max_brightness, idx
else:
return max_brightness
def get_mean_brightness(self):
if len(self)==0:
return np.nan
else:
return np.mean(self.obs)
def get_max_brightness_time(self):
if len(self)==0:
return np.nan
else:
_, idx = self.get_max_brightness(return_idx=True)
tmax = self.days[idx]
return tmax
def __add__(self, other):
if self is None or self==0:
return copy(other)
if other is None or other==0:
return copy(self)
if type(self)==SubLCO and type(other)==SubLCO:
new_days = np.concatenate([self.days, other.days], axis=0)
new_obs = np.concatenate([self.obs, other.obs], axis=0)
new_obse = np.concatenate([self.obse, other.obse], axis=0)
valid_indexs = np.argsort(new_days)
new_lco = SubLCO(
new_days[valid_indexs],
new_obs[valid_indexs],
new_obse[valid_indexs],
self.y,
self.dtype,
)
return new_lco
assert 0
def __radd__(self, other):
return self+other
def astype(self, dtype):
self.dtype = dtype
for key in self.__dict__.keys():
x = self.__dict__[key]
if isinstance(x, np.ndarray): # apply same mask to all in the object
new_x = x.astype(self.dtype)
setattr(self, key, new_x)
return self
###################################################################################################################################################
class LCO():
'''
Dataclass object used to store a multiband astronomical light curve
'''
def __init__(self,
is_flux:bool=True,
y:int=None,
ra:float=None,
dec:float=None,
z:float=None,
):
self.is_flux = is_flux
self.set_y(y)
self.ra = ra
self.dec = dec
self.z = z
self.reset()
def reset(self):
self.bands = []
def convert_to_magnitude(self):
for b in self.bands:
self.get_b(b).convert_to_magnitude()
def convert_to_flux(self):
for b in self.bands:
self.get_b(b).convert_to_flux()
def add_bands(self, band_dict,
reset_time_offset=RESET_TIME_OFFSET,
):
bands = band_dict.keys()
for b in bands:
args = band_dict[b]
self.add_b(b, *args)
if reset_time_offset:
self.reset_day_offset_serial()
def add_b(self, b:str, days, obs, obse):
'''
Always use this method
'''
sublcobj = SubLCO(days, obs, obse,
y=self.y,
)
self.add_sublcobj_b(b, sublcobj)
def add_sublcobj_b(self, b:str, sublcobj):
assert not b==SERIAL_CHAR
setattr(self, b, sublcobj)
if not b in self.bands:
self.bands += [b]
def copy_only_metadata(self):
new_lco = LCO(
is_flux=self.is_flux,
y=self.y,
ra=self.ra,
dec=self.dec,
z=self.z,
)
return new_lco
def copy(self):
return copy(self)
def __copy__(self):
new_lco = LCO(
is_flux=self.is_flux,
y=self.y,
ra=self.ra,
dec=self.dec,
z=self.z,
)
for b in self.bands:
new_sublcobj = copy(self.get_b(b))
new_lco.add_sublcobj_b(b, new_sublcobj)
return new_lco
def set_y(self, y:int):
'''
Always use this method
'''
self.y = None if y is None else int(y)
def __repr__(self):
txt = ''
for b in self.bands:
obj = self.get_b(b)
txt += f'({b}:{len(obj)}) - {str(obj)}\n'
return txt
def __len__(self):
return sum([len(self.get_b(b)) for b in self.bands])
### serial/multi-band important methods
def add_first_day(self, first_day):
for b in self.bands:
self.get_b(b).days = self.get_b(b).days+first_day
def compute_global_first_day(self):
first_days = [self.get_b(b).get_first_day() for b in self.get_bands() if len(self.get_b(b))>0]
assert len(first_days)>0
global_first_day = min(first_days)
return global_first_day
def reset_day_offset_serial(self):
'''
delete day offset acording to the first day along any day!
'''
global_first_day = self.compute_global_first_day()
self.add_first_day(-global_first_day)
return self
def get_sorted_days_indexs_serial(self):
values = [self.get_b(b).days for b in self.get_bands()]
all_days = np.concatenate(values, axis=0)
sorted_days_indexs = np.argsort(all_days)
return sorted_days_indexs
def get_onehot_serial(self):
onehot = np.zeros((len(self), len(self.get_bands())), dtype=np.bool)
index = 0
for kb,b in enumerate(self.get_bands()):
l = len(getattr(self, b))
onehot[index:index+l,kb] = True
index += l
sorted_days_indexs = self.get_sorted_days_indexs_serial()
onehot = onehot[sorted_days_indexs]
return onehot
def get_x_serial(self,
attrs=['days', 'obs', 'obse'],
):
values = [self.get_b(b).get_custom_x(attrs) for b in self.get_bands()]
x = np.concatenate(values, axis=0)
sorted_days_indexs = self.get_sorted_days_indexs_serial()
x = x[sorted_days_indexs]
return x
def get_serial_days(self):
serial_days = self.get_x_serial(['days'])
return serial_days
def get_serial_diff_days(self):
serial_days = self.get_serial_days()[:,0]
serial_diff_days = diff_vector(serial_days,
uses_prepend=True,
prepended_value=None,
)
return serial_diff_days
def get_parallel_days(self):
parallel_days = {}
for b in self.get_bands():
days = self.get_b(b).days
parallel_days[b] = days
return parallel_days
def get_parallel_diff_days(self,
generates_mb=True,
):
global_first_day = self.compute_global_first_day()
bands = copy(self.get_bands())
if generates_mb:
self.generate_mb()
if hasattr(self, 'merged_band'):
bands += [SERIAL_CHAR]
parallel_diff_days = {}
for b in bands:
days = self.get_b(b).days
diff_days = diff_vector(days,
uses_prepend=True,
prepended_value=global_first_day,
)
parallel_diff_days[b] = diff_days
return parallel_diff_days
def get_serial_days_duration(self):
'''
Duration in days of complete light curve
'''
serial_days = self.get_serial_days()
duration = np.max(serial_days)-np.min(serial_days)
return duration
def get_b(self, b:str):
if b==SERIAL_CHAR:
return self.get_mb()
else:
return getattr(self, b)
def generate_mb(self):
self.merged_band = sum([self.get_b(b) for b in self.get_bands()]) # generate
def get_mb(self):
self.generate_mb()
return self.merged_band
def clip_attrs_given_max_day(self, max_day):
for b in self.get_bands():
self.get_b(b).clip_attrs_given_max_day(max_day)
def get_bands(self):
return self.bands
def get_length_b(self, b:str):
return len(self.get_b(b))
def get_length_bdict(self):
return {b:self.get_length_b(b) for b in self.get_bands()}
def any_synthetic(self):
return any([self.get_b(b).is_synthetic() for b in self.get_bands()])
def all_synthetic(self):
return all([self.get_b(b).is_synthetic() for b in self.get_bands()])
def any_real(self):
return any([not self.get_b(b).is_synthetic() for b in self.get_bands()])
def all_real(self):
return all([not self.get_b(b).is_synthetic() for b in self.get_bands()])
def any_band_eqover_length(self,
th_length=MIN_POINTS_LIGHTCURVE_DEFINITION,
):
return any([len(self.get_b(b))>=th_length for b in self.get_bands()])
def clean_small_cadence(self,
dt=CADENCE_THRESHOLD,
mode='expectation',
):
for b in self.get_bands():
self.get_b(b).clean_small_cadence(dt, mode)
def get_snr(self):
snr_d = {b:self.get_b(b).get_snr() for b in self.get_bands()}
return snr_d
def get_tmax(self):
tmax_d = {b:self.get_b(b).get_tmax() for b in self.bands}
return tmax_d
| [
"numpy.clip",
"numpy.mean",
"numpy.all",
"numpy.random.choice",
"numpy.where",
"numpy.log",
"numpy.argmax",
"numpy.max",
"numpy.argsort",
"numpy.array",
"random.random",
"numpy.sum",
"numpy.concatenate",
"numpy.min",
"numpy.argmin",
"copy.copy"
] | [((1613, 1647), 'numpy.clip', 'np.clip', (['new_obs', 'min_lim', 'max_lim'], {}), '(new_obs, min_lim, max_lim)\n', (1620, 1647), True, 'import numpy as np\n'), ((1659, 1694), 'numpy.clip', 'np.clip', (['new_obs', 'obs_min_lim', 'None'], {}), '(new_obs, obs_min_lim, None)\n', (1666, 1694), True, 'import numpy as np\n'), ((1014, 1051), 'numpy.concatenate', 'np.concatenate', (['[x0[None], x]'], {'axis': '(0)'}), '([x0[None], x], axis=0)\n', (1028, 1051), True, 'import numpy as np\n'), ((4406, 4426), 'numpy.argsort', 'np.argsort', (['new_days'], {}), '(new_days)\n', (4416, 4426), True, 'import numpy as np\n'), ((9304, 9335), 'numpy.concatenate', 'np.concatenate', (['values'], {'axis': '(-1)'}), '(values, axis=-1)\n', (9318, 9335), True, 'import numpy as np\n'), ((9662, 9672), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (9666, 9672), False, 'from copy import copy, deepcopy\n'), ((14943, 14953), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (14947, 14953), False, 'from copy import copy, deepcopy\n'), ((16230, 16260), 'numpy.concatenate', 'np.concatenate', (['values'], {'axis': '(0)'}), '(values, axis=0)\n', (16244, 16260), True, 'import numpy as np\n'), ((16284, 16304), 'numpy.argsort', 'np.argsort', (['all_days'], {}), '(all_days)\n', (16294, 16304), True, 'import numpy as np\n'), ((16824, 16854), 'numpy.concatenate', 'np.concatenate', (['values'], {'axis': '(0)'}), '(values, axis=0)\n', (16838, 16854), True, 'import numpy as np\n'), ((2340, 2354), 'copy.copy', 'copy', (['self.obs'], {}), '(self.obs)\n', (2344, 2354), False, 'from copy import copy, deepcopy\n'), ((2371, 2386), 'copy.copy', 'copy', (['self.obse'], {}), '(self.obse)\n', (2375, 2386), False, 'from copy import copy, deepcopy\n'), ((2649, 2663), 'copy.copy', 'copy', (['self.obs'], {}), '(self.obs)\n', (2653, 2663), False, 'from copy import copy, deepcopy\n'), ((2679, 2694), 'copy.copy', 'copy', (['self.obse'], {}), '(self.obse)\n', (2683, 2694), False, 'from copy import copy, deepcopy\n'), ((3313, 3345), 'numpy.array', 'np.array', (['days'], {'dtype': 'self.dtype'}), '(days, dtype=self.dtype)\n', (3321, 3345), True, 'import numpy as np\n'), ((3420, 3451), 'numpy.array', 'np.array', (['obs'], {'dtype': 'self.dtype'}), '(obs, dtype=self.dtype)\n', (3428, 3451), True, 'import numpy as np\n'), ((3529, 3561), 'numpy.array', 'np.array', (['obse'], {'dtype': 'self.dtype'}), '(obse, dtype=self.dtype)\n', (3537, 3561), True, 'import numpy as np\n'), ((3889, 3905), 'numpy.all', 'np.all', (['(obs >= 0)'], {}), '(obs >= 0)\n', (3895, 3905), True, 'import numpy as np\n'), ((4036, 4053), 'numpy.all', 'np.all', (['(obse >= 0)'], {}), '(obse >= 0)\n', (4042, 4053), True, 'import numpy as np\n'), ((5211, 5226), 'random.random', 'random.random', ([], {}), '()\n', (5224, 5226), False, 'import random\n'), ((6049, 6064), 'random.random', 'random.random', ([], {}), '()\n', (6062, 6064), False, 'import random\n'), ((6194, 6246), 'numpy.random.choice', 'np.random.choice', (['keys'], {'p': '[ds_mode[k] for k in keys]'}), '(keys, p=[ds_mode[k] for k in keys])\n', (6210, 6246), True, 'import numpy as np\n'), ((6808, 6823), 'random.random', 'random.random', ([], {}), '()\n', (6821, 6823), False, 'import random\n'), ((7670, 7685), 'random.random', 'random.random', ([], {}), '()\n', (7683, 7685), False, 'import random\n'), ((9721, 9736), 'copy.copy', 'copy', (['self.days'], {}), '(self.days)\n', (9725, 9736), False, 'from copy import copy, deepcopy\n'), ((9741, 9755), 'copy.copy', 'copy', (['self.obs'], {}), '(self.obs)\n', (9745, 9755), False, 'from copy import copy, deepcopy\n'), ((9760, 9775), 'copy.copy', 'copy', (['self.obse'], {}), '(self.obse)\n', (9764, 9775), False, 'from copy import copy, deepcopy\n'), ((11727, 11739), 'numpy.mean', 'np.mean', (['snr'], {}), '(snr)\n', (11734, 11739), True, 'import numpy as np\n'), ((12467, 12484), 'numpy.mean', 'np.mean', (['self.obs'], {}), '(self.obs)\n', (12474, 12484), True, 'import numpy as np\n'), ((12727, 12738), 'copy.copy', 'copy', (['other'], {}), '(other)\n', (12731, 12738), False, 'from copy import copy, deepcopy\n'), ((12782, 12792), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (12786, 12792), False, 'from copy import copy, deepcopy\n'), ((12859, 12906), 'numpy.concatenate', 'np.concatenate', (['[self.days, other.days]'], {'axis': '(0)'}), '([self.days, other.days], axis=0)\n', (12873, 12906), True, 'import numpy as np\n'), ((12920, 12965), 'numpy.concatenate', 'np.concatenate', (['[self.obs, other.obs]'], {'axis': '(0)'}), '([self.obs, other.obs], axis=0)\n', (12934, 12965), True, 'import numpy as np\n'), ((12980, 13027), 'numpy.concatenate', 'np.concatenate', (['[self.obse, other.obse]'], {'axis': '(0)'}), '([self.obse, other.obse], axis=0)\n', (12994, 13027), True, 'import numpy as np\n'), ((13046, 13066), 'numpy.argsort', 'np.argsort', (['new_days'], {}), '(new_days)\n', (13056, 13066), True, 'import numpy as np\n'), ((18042, 18061), 'numpy.max', 'np.max', (['serial_days'], {}), '(serial_days)\n', (18048, 18061), True, 'import numpy as np\n'), ((18062, 18081), 'numpy.min', 'np.min', (['serial_days'], {}), '(serial_days)\n', (18068, 18081), True, 'import numpy as np\n'), ((10515, 10568), 'numpy.where', 'np.where', (['((self.days >= day) & (self.days < day + dt))'], {}), '((self.days >= day) & (self.days < day + dt))\n', (10523, 10568), True, 'import numpy as np\n'), ((11886, 11905), 'numpy.argmin', 'np.argmin', (['self.obs'], {}), '(self.obs)\n', (11895, 11905), True, 'import numpy as np\n'), ((11925, 11944), 'numpy.argmax', 'np.argmax', (['self.obs'], {}), '(self.obs)\n', (11934, 11944), True, 'import numpy as np\n'), ((12206, 12225), 'numpy.argmax', 'np.argmax', (['self.obs'], {}), '(self.obs)\n', (12215, 12225), True, 'import numpy as np\n'), ((12245, 12264), 'numpy.argmin', 'np.argmin', (['self.obs'], {}), '(self.obs)\n', (12254, 12264), True, 'import numpy as np\n'), ((3246, 3256), 'copy.copy', 'copy', (['days'], {}), '(days)\n', (3250, 3256), False, 'from copy import copy, deepcopy\n'), ((3355, 3364), 'copy.copy', 'copy', (['obs'], {}), '(obs)\n', (3359, 3364), False, 'from copy import copy, deepcopy\n'), ((3462, 3472), 'copy.copy', 'copy', (['obse'], {}), '(obse)\n', (3466, 3472), False, 'from copy import copy, deepcopy\n'), ((10042, 10049), 'copy.copy', 'copy', (['v'], {}), '(v)\n', (10046, 10049), False, 'from copy import copy, deepcopy\n'), ((10734, 10762), 'numpy.mean', 'np.mean', (['self.days[ddict[k]]'], {}), '(self.days[ddict[k]])\n', (10741, 10762), True, 'import numpy as np\n'), ((10783, 10810), 'numpy.mean', 'np.mean', (['self.obs[ddict[k]]'], {}), '(self.obs[ddict[k]])\n', (10790, 10810), True, 'import numpy as np\n'), ((10832, 10860), 'numpy.mean', 'np.mean', (['self.obse[ddict[k]]'], {}), '(self.obse[ddict[k]])\n', (10839, 10860), True, 'import numpy as np\n'), ((10896, 10926), 'numpy.argmin', 'np.argmin', (['self.obse[ddict[k]]'], {}), '(self.obse[ddict[k]])\n', (10905, 10926), True, 'import numpy as np\n'), ((11246, 11280), 'numpy.sum', 'np.sum', (['(self.days[ddict[k]] * dist)'], {}), '(self.days[ddict[k]] * dist)\n', (11252, 11280), True, 'import numpy as np\n'), ((11299, 11332), 'numpy.sum', 'np.sum', (['(self.obs[ddict[k]] * dist)'], {}), '(self.obs[ddict[k]] * dist)\n', (11305, 11332), True, 'import numpy as np\n'), ((11352, 11386), 'numpy.sum', 'np.sum', (['(self.obse[ddict[k]] * dist)'], {}), '(self.obse[ddict[k]] * dist)\n', (11358, 11386), True, 'import numpy as np\n'), ((11109, 11142), 'numpy.log', 'np.log', (['(self.obse[ddict[k]] + EPS)'], {}), '(self.obse[ddict[k]] + EPS)\n', (11115, 11142), True, 'import numpy as np\n'), ((11157, 11185), 'numpy.where', 'np.where', (['(obse_exp == np.inf)'], {}), '(obse_exp == np.inf)\n', (11165, 11185), True, 'import numpy as np\n')] |
import cv2
import numpy as np
def projective_transform(img, points_img, points_another, width, height):
"""
2画像間で射影変換を行う
Parameters
----------
img : numpy.ndarray
入力画像
points_img: list of lists
画像 `img` における対応点
points_another : list of lists
もう一方の画像における対応点
width, height : int
生成される画像の大きさ
Returns
-------
numpy.ndarray
射影変換結果画像 (BGRA カラー画像)
"""
# Convert image
if img.ndim == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGRA)
elif img.ndim == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
# Convert to numpy.ndarray
points_img = np.float32(points_img)
points_another = np.float32(points_another)
# 射影変換行列を計算
M, mask = cv2.findHomography(points_img, points_another, 0)
transformed = cv2.warpPerspective(
img,
M,
(width, height),
borderMode=cv2.BORDER_CONSTANT,
borderValue=(0, 0, 0, 0)
)
return transformed
| [
"cv2.warpPerspective",
"numpy.float32",
"cv2.cvtColor",
"cv2.findHomography"
] | [((661, 683), 'numpy.float32', 'np.float32', (['points_img'], {}), '(points_img)\n', (671, 683), True, 'import numpy as np\n'), ((705, 731), 'numpy.float32', 'np.float32', (['points_another'], {}), '(points_another)\n', (715, 731), True, 'import numpy as np\n'), ((763, 812), 'cv2.findHomography', 'cv2.findHomography', (['points_img', 'points_another', '(0)'], {}), '(points_img, points_another, 0)\n', (781, 812), False, 'import cv2\n'), ((832, 938), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'M', '(width, height)'], {'borderMode': 'cv2.BORDER_CONSTANT', 'borderValue': '(0, 0, 0, 0)'}), '(img, M, (width, height), borderMode=cv2.BORDER_CONSTANT,\n borderValue=(0, 0, 0, 0))\n', (851, 938), False, 'import cv2\n'), ((497, 535), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2BGRA'], {}), '(img, cv2.COLOR_GRAY2BGRA)\n', (509, 535), False, 'import cv2\n'), ((574, 611), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2BGRA'], {}), '(img, cv2.COLOR_BGR2BGRA)\n', (586, 611), False, 'import cv2\n')] |
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import eigsh
def spec_proj(W,k,alg=3):
n = W.shape[0]
deg = sp.spdiags(np.sum(W,1).T,0,n,n)
L = deg - W
if alg == 1:
E,V = eigsh(L,k,sigma=-1,tol=1e-6,return_eigenvectors=True)
V1 = V
elif alg == 2:
E,V = eigsh(L,k,deg,sigma=-1,tol=1e-6,return_eigenvectors=True)
V1 = V
else:
Dinv2 = sp.spdiags(np.power(np.sum(W,1),(-1/2)).T,0,n,n)
Lsym = Dinv2@L@Dinv2
E,V1 = eigsh(Lsym,k,sigma=-1,tol=1e-6,return_eigenvectors=True)
T = sp.spdiags(np.power(np.sum(np.multiply(V1,V1),1).T,(-1/2)),0,n,n)
V = T@V1
D = sp.spdiags(E,0,k,k).toarray()
return V, D, V1
| [
"numpy.sum",
"numpy.multiply",
"scipy.sparse.spdiags",
"scipy.sparse.linalg.eigsh"
] | [((223, 281), 'scipy.sparse.linalg.eigsh', 'eigsh', (['L', 'k'], {'sigma': '(-1)', 'tol': '(1e-06)', 'return_eigenvectors': '(True)'}), '(L, k, sigma=-1, tol=1e-06, return_eigenvectors=True)\n', (228, 281), False, 'from scipy.sparse.linalg import eigsh\n'), ((150, 162), 'numpy.sum', 'np.sum', (['W', '(1)'], {}), '(W, 1)\n', (156, 162), True, 'import numpy as np\n'), ((325, 388), 'scipy.sparse.linalg.eigsh', 'eigsh', (['L', 'k', 'deg'], {'sigma': '(-1)', 'tol': '(1e-06)', 'return_eigenvectors': '(True)'}), '(L, k, deg, sigma=-1, tol=1e-06, return_eigenvectors=True)\n', (330, 388), False, 'from scipy.sparse.linalg import eigsh\n'), ((527, 588), 'scipy.sparse.linalg.eigsh', 'eigsh', (['Lsym', 'k'], {'sigma': '(-1)', 'tol': '(1e-06)', 'return_eigenvectors': '(True)'}), '(Lsym, k, sigma=-1, tol=1e-06, return_eigenvectors=True)\n', (532, 588), False, 'from scipy.sparse.linalg import eigsh\n'), ((691, 713), 'scipy.sparse.spdiags', 'sp.spdiags', (['E', '(0)', 'k', 'k'], {}), '(E, 0, k, k)\n', (701, 713), True, 'import scipy.sparse as sp\n'), ((444, 456), 'numpy.sum', 'np.sum', (['W', '(1)'], {}), '(W, 1)\n', (450, 456), True, 'import numpy as np\n'), ((623, 642), 'numpy.multiply', 'np.multiply', (['V1', 'V1'], {}), '(V1, V1)\n', (634, 642), True, 'import numpy as np\n')] |
# 4 of case study
import numpy as np
np.random.seed(123)
# Starting step
step = 50
# Roll the dice
dice= np.random.randint(1,7)
# Finish the control construct
if dice <= 2 :
step = step - 1
elif dice <=5 :
step= step+1
else:
step = step + np.random.randint(1,7)
# Print out dice and step
print(dice)
print(step)
| [
"numpy.random.randint",
"numpy.random.seed"
] | [((37, 56), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (51, 56), True, 'import numpy as np\n'), ((107, 130), 'numpy.random.randint', 'np.random.randint', (['(1)', '(7)'], {}), '(1, 7)\n', (124, 130), True, 'import numpy as np\n'), ((254, 277), 'numpy.random.randint', 'np.random.randint', (['(1)', '(7)'], {}), '(1, 7)\n', (271, 277), True, 'import numpy as np\n')] |
import tarfile
import numpy as np
import os
import sys
import h5py
from scipy import ndimage
import random
import pickle
from matplotlib import pyplot as plt
def maybe_extract(filename, force=False):
root = os.path.splitext(os.path.splitext(filename)[0])[0] # remove .tar.gz
if os.path.isdir(root) and not force:
# You may override by setting force=True.
print('%s already present - Skipping extraction of %s.' % (root, filename))
else:
print('Extracting data for %s. This may take a while. Please wait.' % root)
tar = tarfile.open(filename)
sys.stdout.flush()
tar.extractall()
tar.close()
data_folders = [
os.path.join(root, d) for d in sorted(os.listdir(root))
if os.path.isdir(os.path.join(root, d))]
print(data_folders)
return data_folders
# maybe_extract('train.tar.gz')
# maybe_extract('test.tar.gz')
image_size = 64 # Pixel width and height.
pixel_depth = 255.0 # Number of levels per pixel.
image_channels = 3
RECOGNITION_LENGTH = 5
ext_ratio = 0.3
single_digit_size = 64
crop_digit_size = 54
last_percent_reported = None
sampling_ratio = 0.5
def download_progress_hook(count, blockSize, totalSize):
"""A hook to report the progress of a download. This is mostly intended for users with
slow internet connections. Reports every 1% change in download progress.
"""
global last_percent_reported
percent = int(count * blockSize * 100 / totalSize)
if last_percent_reported != percent:
if percent % 5 == 0:
sys.stdout.write("%s%%" % percent)
sys.stdout.flush()
else:
sys.stdout.write(".")
sys.stdout.flush()
last_percent_reported = percent
def crop_image(label, top, left, height, width, img_data):
# import matplotlib.pyplot as plt
# plt.figure()
# plt.imshow(img_data)
# plt.show()
height_shift = height * ext_ratio / 2
width_shift = width * ext_ratio / 2
y_start = int(top - height_shift) if top - height_shift >= 0 else 0
y_end = int(top + height + height_shift)
x_start = int(left - width_shift) if left - width >= 0 else 0
x_end = int(left + width + width_shift)
single_digit = img_data[y_start: y_end, x_start: x_end, :]
try:
zoomed = ndimage.zoom(single_digit,
(single_digit_size / single_digit.shape[0], single_digit_size / single_digit.shape[1], 1))
except ZeroDivisionError as e:
print(e)
return (None, None)
if zoomed.shape != (single_digit_size, single_digit_size, image_channels):
return (None, None)
single_digit = zoomed
shift = random.randint(0, single_digit_size - crop_digit_size)
single_digit = single_digit[shift:, shift:, :]
try:
zoomed = ndimage.zoom(single_digit, (image_size / single_digit.shape[0], image_size / single_digit.shape[1], 1))
except ZeroDivisionError as e:
print(e)
return (None, None)
if zoomed.shape != (image_size, image_size, image_channels):
return (None, None)
label.extend([-1 for _ in range(RECOGNITION_LENGTH - 1)])
# import matplotlib.pyplot as plt
# plt.figure()
# plt.imshow(zoomed)
# plt.show()
return zoomed, label
def prepare_datasets(mat_file, isExtended=True):
root = os.path.dirname(mat_file)
img_meta = h5py.File(mat_file, 'r')
name_dataset = img_meta['digitStruct']['name']
bbox_dataset = img_meta['digitStruct']['bbox']
img_num = int(name_dataset.shape[0] * sampling_ratio)
dataset_len = img_num
if isExtended:
# compute the the size of generated dataset.
for idx in range(img_num):
dataset_len += img_meta[bbox_dataset[idx, 0]]['label'].shape[0]
print('datasets size: ' + str((dataset_len, image_size, image_size, image_channels)))
datasets = np.ndarray(shape=(dataset_len, image_size, image_size, image_channels), dtype=np.float32)
labels = np.ndarray(shape=(dataset_len, RECOGNITION_LENGTH + 1), dtype=np.int32) # <L, s1, s2, s3, s4, s5>, if si is absent, then set to -1.
actual_num_imgs = 0
print('Processing data for %s. This may take a while. Please wait.' % mat_file)
for idx in range(img_num):
download_progress_hook(idx, 1, img_num)
num_digits = img_meta[bbox_dataset[idx, 0]]['label'].shape[0]
if num_digits > RECOGNITION_LENGTH:
continue
img_name = ''.join([chr(cha) for cha in img_meta[name_dataset[idx, 0]][:, 0]])
img_path = os.path.join(root, img_name)
# print('processing ' + img_path)
img_data = (ndimage.imread(img_path).astype(float) - pixel_depth / 2) / pixel_depth
zoomed = ndimage.zoom(img_data, (image_size/img_data.shape[0], image_size/img_data.shape[1], 1))
if zoomed.shape != (image_size, image_size, image_channels):
continue
datasets[actual_num_imgs] = zoomed
example_label = [num_digits]
# print('number of digits: ' + str(num_digits))
if num_digits == 1:
# number '0' is represented as 10 in SVHN datasets.
digit_label = int(img_meta[bbox_dataset[idx, 0]]['label'][0, 0]) % 10
example_label.append(digit_label)
else:
for label_idx in range(num_digits):
example_label.append(int(img_meta[img_meta[bbox_dataset[idx,0]]['label'][label_idx, 0]][0, 0]) % 10)
example_label.extend([-1 for _ in range(RECOGNITION_LENGTH - num_digits)]) # fill '-1' with absent digits.
labels[actual_num_imgs] = example_label
actual_num_imgs += 1
if isExtended:
# crop single digit to sample more examples.
if num_digits == 1:
label = [1, int(img_meta[bbox_dataset[idx, 0]]['label'][0, 0]) % 10]
top = img_meta[bbox_dataset[idx, 0]]['top'][0, 0]
left = img_meta[bbox_dataset[idx, 0]]['left'][0, 0]
height = img_meta[bbox_dataset[idx, 0]]['height'][0, 0]
width = img_meta[bbox_dataset[idx, 0]]['width'][0, 0]
digit_img, label = crop_image(label, top, left, height, width, img_data)
if digit_img != None:
datasets[actual_num_imgs] = digit_img
labels[actual_num_imgs] = label
actual_num_imgs += 1
else:
for label_idx in range(num_digits):
label = [1, int(img_meta[img_meta[bbox_dataset[idx,0]]['label'][label_idx, 0]][0, 0]) % 10]
top = img_meta[img_meta[bbox_dataset[idx,0]]['top'][label_idx, 0]][0, 0]
left = img_meta[img_meta[bbox_dataset[idx,0]]['left'][label_idx, 0]][0, 0]
height = img_meta[img_meta[bbox_dataset[idx,0]]['height'][label_idx, 0]][0, 0]
width = img_meta[img_meta[bbox_dataset[idx,0]]['width'][label_idx, 0]][0, 0]
digit_img, label = crop_image(label, top, left, height, width, img_data)
if digit_img != None:
datasets[actual_num_imgs] = digit_img
labels[actual_num_imgs] = label
actual_num_imgs += 1
datasets = datasets[:actual_num_imgs, :, :, :]
labels = labels[:actual_num_imgs, :]
print('actual dataset size: ' + str(datasets.shape))
print('actual lables size: ' + str(labels.shape))
return datasets, labels
def pickle_dataset(pickle_file, save):
try:
f = open(pickle_file, 'wb')
pickle.dump(save, f, pickle.HIGHEST_PROTOCOL)
f.close()
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
# train_datasets, train_labels = prepare_datasets('train/digitStruct.mat')
# pickle_dataset('train.pickle', {'datasets': train_datasets, 'labels': train_labels})
# del train_datasets
# del train_labels
# test_datasets, test_labels = prepare_datasets('test/digitStruct.mat', isExtended=False)
# pickle_dataset('test.pickle', {'datasets': test_datasets, 'labels': test_labels})
# del test_datasets
# del test_labels
def check_dataset(pickle_file):
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
datasets = save['datasets']
labels = save['labels']
print('dataset size: ', datasets.shape)
plt.figure()
for idx in range(3):
plt.imshow(datasets[75+idx])
print('label: ', labels[75+idx])
plt.show()
check_dataset('mini_train.pickle')
# create mini test dataset.
def create_mini_dataset(ori_file, mini_file):
with open(ori_file, 'rb') as f:
save = pickle.load(f)
with open(mini_file, 'wb') as mini_f:
train_datasets = save['datasets'][:100]
train_labels = save['labels'][:100]
del save
mini_save = {
'datasets': train_datasets,
'labels': train_labels
}
try:
pickle.dump(mini_save, mini_f, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to ', mini_file, ':', e)
raise
# create_mini_dataset('train.pickle', 'mini_train.pickle')
# create_mini_dataset('test.pickle', 'mini_test.pickle')
| [
"matplotlib.pyplot.imshow",
"tarfile.open",
"pickle.dump",
"os.listdir",
"scipy.ndimage.zoom",
"matplotlib.pyplot.show",
"os.path.join",
"pickle.load",
"os.path.splitext",
"h5py.File",
"scipy.ndimage.imread",
"os.path.dirname",
"matplotlib.pyplot.figure",
"os.path.isdir",
"numpy.ndarray"... | [((2678, 2732), 'random.randint', 'random.randint', (['(0)', '(single_digit_size - crop_digit_size)'], {}), '(0, single_digit_size - crop_digit_size)\n', (2692, 2732), False, 'import random\n'), ((3337, 3362), 'os.path.dirname', 'os.path.dirname', (['mat_file'], {}), '(mat_file)\n', (3352, 3362), False, 'import os\n'), ((3378, 3402), 'h5py.File', 'h5py.File', (['mat_file', '"""r"""'], {}), "(mat_file, 'r')\n", (3387, 3402), False, 'import h5py\n'), ((3878, 3971), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(dataset_len, image_size, image_size, image_channels)', 'dtype': 'np.float32'}), '(shape=(dataset_len, image_size, image_size, image_channels),\n dtype=np.float32)\n', (3888, 3971), True, 'import numpy as np\n'), ((3981, 4052), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(dataset_len, RECOGNITION_LENGTH + 1)', 'dtype': 'np.int32'}), '(shape=(dataset_len, RECOGNITION_LENGTH + 1), dtype=np.int32)\n', (3991, 4052), True, 'import numpy as np\n'), ((289, 308), 'os.path.isdir', 'os.path.isdir', (['root'], {}), '(root)\n', (302, 308), False, 'import os\n'), ((566, 588), 'tarfile.open', 'tarfile.open', (['filename'], {}), '(filename)\n', (578, 588), False, 'import tarfile\n'), ((597, 615), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (613, 615), False, 'import sys\n'), ((690, 711), 'os.path.join', 'os.path.join', (['root', 'd'], {}), '(root, d)\n', (702, 711), False, 'import os\n'), ((2309, 2431), 'scipy.ndimage.zoom', 'ndimage.zoom', (['single_digit', '(single_digit_size / single_digit.shape[0], single_digit_size /\n single_digit.shape[1], 1)'], {}), '(single_digit, (single_digit_size / single_digit.shape[0], \n single_digit_size / single_digit.shape[1], 1))\n', (2321, 2431), False, 'from scipy import ndimage\n'), ((2810, 2917), 'scipy.ndimage.zoom', 'ndimage.zoom', (['single_digit', '(image_size / single_digit.shape[0], image_size / single_digit.shape[1], 1)'], {}), '(single_digit, (image_size / single_digit.shape[0], image_size /\n single_digit.shape[1], 1))\n', (2822, 2917), False, 'from scipy import ndimage\n'), ((4542, 4570), 'os.path.join', 'os.path.join', (['root', 'img_name'], {}), '(root, img_name)\n', (4554, 4570), False, 'import os\n'), ((4723, 4818), 'scipy.ndimage.zoom', 'ndimage.zoom', (['img_data', '(image_size / img_data.shape[0], image_size / img_data.shape[1], 1)'], {}), '(img_data, (image_size / img_data.shape[0], image_size /\n img_data.shape[1], 1))\n', (4735, 4818), False, 'from scipy import ndimage\n'), ((7566, 7611), 'pickle.dump', 'pickle.dump', (['save', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(save, f, pickle.HIGHEST_PROTOCOL)\n', (7577, 7611), False, 'import pickle\n'), ((8230, 8244), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8241, 8244), False, 'import pickle\n'), ((8369, 8381), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8379, 8381), True, 'from matplotlib import pyplot as plt\n'), ((8683, 8697), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8694, 8697), False, 'import pickle\n'), ((1562, 1596), 'sys.stdout.write', 'sys.stdout.write', (["('%s%%' % percent)"], {}), "('%s%%' % percent)\n", (1578, 1596), False, 'import sys\n'), ((1609, 1627), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1625, 1627), False, 'import sys\n'), ((1654, 1675), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (1670, 1675), False, 'import sys\n'), ((1688, 1706), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1704, 1706), False, 'import sys\n'), ((8423, 8453), 'matplotlib.pyplot.imshow', 'plt.imshow', (['datasets[75 + idx]'], {}), '(datasets[75 + idx])\n', (8433, 8453), True, 'from matplotlib import pyplot as plt\n'), ((8509, 8519), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8517, 8519), True, 'from matplotlib import pyplot as plt\n'), ((230, 256), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (246, 256), False, 'import os\n'), ((728, 744), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (738, 744), False, 'import os\n'), ((771, 792), 'os.path.join', 'os.path.join', (['root', 'd'], {}), '(root, d)\n', (783, 792), False, 'import os\n'), ((9021, 9076), 'pickle.dump', 'pickle.dump', (['mini_save', 'mini_f', 'pickle.HIGHEST_PROTOCOL'], {}), '(mini_save, mini_f, pickle.HIGHEST_PROTOCOL)\n', (9032, 9076), False, 'import pickle\n'), ((4633, 4657), 'scipy.ndimage.imread', 'ndimage.imread', (['img_path'], {}), '(img_path)\n', (4647, 4657), False, 'from scipy import ndimage\n')] |
import numpy as np
import os
from .utils.utils import get_yolo_boxes, makedirs
def evaluate_full(model,
generator,
obj_thresh = 0.5,
nms_thresh = 0.5,
net_h = 416,
net_w = 416,
save_path = ""):
# Predict boxes
all_detections, all_annotations = predict_boxes(
model,
generator,
obj_thresh,
nms_thresh,
net_h,
net_w,
save_path)
# Compute mAP
m_ap, ap = evaluate_coco(
model,
generator,
all_detections,
all_annotations)
return m_ap[0], ap[0]
def predict_boxes(model,
generator,
obj_thresh = 0.5,
nms_thresh = 0.5,
net_h = 416,
net_w = 416,
save_path = ""):
# gather all detections and annotations
all_detections = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
all_annotations = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
# Open file for output
save = len(save_path) > 0
f = None
if save:
dir_path = os.path.split(save_path)[0] + "/"
if not os.path.isdir(dir_path):
makedirs(dir_path)
f = open(save_path, "w")
for i in range(generator.size()):
raw_image = [generator.load_image(i)]
# Write image name to file
if save:
f.write("# " + generator.img_filename(i) + "\n")
# make the boxes and the labels
pred_boxes = get_yolo_boxes(model, raw_image, net_h, net_w, generator.get_anchors(), obj_thresh, nms_thresh)[0]
score = np.array([box.get_score() for box in pred_boxes])
pred_labels = np.array([box.label for box in pred_boxes])
if len(pred_boxes) > 0:
pred_boxes = np.array([[box.xmin, box.ymin, box.xmax, box.ymax, box.get_score()] for box in pred_boxes])
else:
pred_boxes = np.array([[]])
# sort the boxes and the labels according to scores
score_sort = np.argsort(-score)
pred_labels = pred_labels[score_sort]
pred_boxes = pred_boxes[score_sort]
# copy detections to all_detections
for label in range(generator.num_classes()):
all_detections[i][label] = pred_boxes[pred_labels == label, :]
# Write detection to file
if save:
for d in all_detections[i][label]:
face_str = '{:.1f} {:.1f} {:.1f} {:.1f} {:f}\n'.format(d[0], d[1], d[2] - d[0], d[3] - d[1], d[4])
f.write(face_str)
annotations = generator.load_annotation(i)
# copy detections to all_annotations
for label in range(generator.num_classes()):
all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()
return all_detections, all_annotations
def evaluate_coco(model,
generator,
all_detections,
all_annotations,
iou_start = 0.5,
iou_step = 0.05,
num_iou = 10):
# Avergage AP overmany IoU thresholds
iou_thresh_lst = np.array([iou_start + i * iou_step for i in range(num_iou)])
# compute mAP by comparing all detections and all annotations
mean_average_precisions = {}
average_precisions = {}
for label in range(generator.num_classes()):
false_positives = [np.zeros((0,)) for j in range(num_iou)]
true_positives = [np.zeros((0,)) for j in range(num_iou)]
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(generator.size()):
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
for j in range(num_iou):
false_positives[j] = np.append(false_positives[j], 1)
true_positives[j] = np.append(true_positives[j], 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if assigned_annotation in detected_annotations:
for j in range(num_iou):
false_positives[j] = np.append(false_positives[j], 1)
true_positives[j] = np.append(true_positives[j], 0)
else:
for j, iou_thresh in enumerate(iou_thresh_lst):
if max_overlap >= iou_thresh:
false_positives[j] = np.append(false_positives[j], 0)
true_positives[j] = np.append(true_positives[j], 1)
else:
false_positives[j] = np.append(false_positives[j], 1)
true_positives[j] = np.append(true_positives[j], 0)
if (max_overlap >= iou_thresh_lst).any():
detected_annotations.append(assigned_annotation)
# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
mean_average_precisions[label] = 0
average_precisions[label] = 0
continue
# sort by score
indices = np.argsort(-scores)
recall = [np.zeros((0,)) for j in range(num_iou)]
precision = [np.zeros((0,)) for j in range(num_iou)]
average_precision = 0.0
for j in range(num_iou):
false_positives[j] = false_positives[j][indices]
true_positives[j] = true_positives[j][indices]
# compute false positives and true positives
false_positives[j] = np.cumsum(false_positives[j])
true_positives[j] = np.cumsum(true_positives[j])
# compute recall and precision
recall[j] = true_positives[j] / num_annotations
precision[j] = true_positives[j] / np.maximum(true_positives[j] + false_positives[j], np.finfo(np.float64).eps)
# compute average precision
average_precision = average_precision + compute_ap(recall[j], precision[j])
if j == 0:
average_precisions[label] = average_precision
mean_average_precisions[label] = average_precision / float(num_iou)
return mean_average_precisions, average_precisions
def evaluate_pascal(model,
generator,
all_detections,
all_annotations,
iou_threshold = 0.5):
""" Evaluate a given dataset using a given model.
code originally from https://github.com/fizyr/keras-retinanet
# Arguments
model : The model to evaluate.
generator : The generator that represents the dataset to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
obj_thresh : The threshold used to distinguish between object and non-object
nms_thresh : The threshold used to determine whether two detections are duplicates
net_h : The height of the input image to the model, higher value results in better accuracy
net_w : The width of the input image to the model
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
# compute mAP by comparing all detections and all annotations
average_precisions = {}
for label in range(generator.num_classes()):
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(generator.size()):
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
average_precisions[label] = 0
continue
# sort by score
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = compute_ap(recall, precision)
average_precisions[label] = average_precision
return average_precisions
def compute_overlap(a, b):
"""
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
Parameters
----------
a: (N, 4) ndarray of float
b: (K, 4) ndarray of float
Returns
-------
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
area = (b[:, 2] - b[:, 0]) * (b[:, 3] - b[:, 1])
iw = np.minimum(np.expand_dims(a[:, 2], axis=1), b[:, 2]) - np.maximum(np.expand_dims(a[:, 0], 1), b[:, 0])
ih = np.minimum(np.expand_dims(a[:, 3], axis=1), b[:, 3]) - np.maximum(np.expand_dims(a[:, 1], 1), b[:, 1])
iw = np.maximum(iw, 0)
ih = np.maximum(ih, 0)
ua = np.expand_dims((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1) + area - iw * ih
ua = np.maximum(ua, np.finfo(float).eps)
intersection = iw * ih
return intersection / ua
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], recall, [1.]))
mpre = np.concatenate(([0.], precision, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
#another way: interpolating r in {0,0.01,0.02,...,1}
# ap = 1/101. * np.sum_{r=0,0.01,...,1} (mpre[r])
return ap
| [
"numpy.where",
"numpy.argmax",
"os.path.split",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.zeros",
"os.path.isdir",
"numpy.append",
"numpy.concatenate",
"numpy.expand_dims",
"numpy.finfo",
"numpy.cumsum",
"numpy.maximum"
] | [((10891, 10908), 'numpy.maximum', 'np.maximum', (['iw', '(0)'], {}), '(iw, 0)\n', (10901, 10908), True, 'import numpy as np\n'), ((10918, 10935), 'numpy.maximum', 'np.maximum', (['ih', '(0)'], {}), '(ih, 0)\n', (10928, 10935), True, 'import numpy as np\n'), ((11595, 11633), 'numpy.concatenate', 'np.concatenate', (['([0.0], recall, [1.0])'], {}), '(([0.0], recall, [1.0]))\n', (11609, 11633), True, 'import numpy as np\n'), ((11643, 11684), 'numpy.concatenate', 'np.concatenate', (['([0.0], precision, [0.0])'], {}), '(([0.0], precision, [0.0]))\n', (11657, 11684), True, 'import numpy as np\n'), ((12007, 12052), 'numpy.sum', 'np.sum', (['((mrec[i + 1] - mrec[i]) * mpre[i + 1])'], {}), '((mrec[i + 1] - mrec[i]) * mpre[i + 1])\n', (12013, 12052), True, 'import numpy as np\n'), ((1831, 1874), 'numpy.array', 'np.array', (['[box.label for box in pred_boxes]'], {}), '([box.label for box in pred_boxes])\n', (1839, 1874), True, 'import numpy as np\n'), ((2161, 2179), 'numpy.argsort', 'np.argsort', (['(-score)'], {}), '(-score)\n', (2171, 2179), True, 'import numpy as np\n'), ((3682, 3696), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (3690, 3696), True, 'import numpy as np\n'), ((5733, 5752), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (5743, 5752), True, 'import numpy as np\n'), ((8048, 8062), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (8056, 8062), True, 'import numpy as np\n'), ((8089, 8103), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (8097, 8103), True, 'import numpy as np\n'), ((8130, 8144), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (8138, 8144), True, 'import numpy as np\n'), ((9640, 9659), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (9650, 9659), True, 'import numpy as np\n'), ((9841, 9867), 'numpy.cumsum', 'np.cumsum', (['false_positives'], {}), '(false_positives)\n', (9850, 9867), True, 'import numpy as np\n'), ((9894, 9919), 'numpy.cumsum', 'np.cumsum', (['true_positives'], {}), '(true_positives)\n', (9903, 9919), True, 'import numpy as np\n'), ((11785, 11817), 'numpy.maximum', 'np.maximum', (['mpre[i - 1]', 'mpre[i]'], {}), '(mpre[i - 1], mpre[i])\n', (11795, 11817), True, 'import numpy as np\n'), ((11925, 11956), 'numpy.where', 'np.where', (['(mrec[1:] != mrec[:-1])'], {}), '(mrec[1:] != mrec[:-1])\n', (11933, 11956), True, 'import numpy as np\n'), ((1293, 1316), 'os.path.isdir', 'os.path.isdir', (['dir_path'], {}), '(dir_path)\n', (1306, 1316), False, 'import os\n'), ((2064, 2078), 'numpy.array', 'np.array', (['[[]]'], {}), '([[]])\n', (2072, 2078), True, 'import numpy as np\n'), ((3549, 3563), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (3557, 3563), True, 'import numpy as np\n'), ((3616, 3630), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (3624, 3630), True, 'import numpy as np\n'), ((5771, 5785), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (5779, 5785), True, 'import numpy as np\n'), ((5832, 5846), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (5840, 5846), True, 'import numpy as np\n'), ((6149, 6178), 'numpy.cumsum', 'np.cumsum', (['false_positives[j]'], {}), '(false_positives[j])\n', (6158, 6178), True, 'import numpy as np\n'), ((6212, 6240), 'numpy.cumsum', 'np.cumsum', (['true_positives[j]'], {}), '(true_positives[j])\n', (6221, 6240), True, 'import numpy as np\n'), ((10677, 10708), 'numpy.expand_dims', 'np.expand_dims', (['a[:, 2]'], {'axis': '(1)'}), '(a[:, 2], axis=1)\n', (10691, 10708), True, 'import numpy as np\n'), ((10732, 10758), 'numpy.expand_dims', 'np.expand_dims', (['a[:, 0]', '(1)'], {}), '(a[:, 0], 1)\n', (10746, 10758), True, 'import numpy as np\n'), ((10789, 10820), 'numpy.expand_dims', 'np.expand_dims', (['a[:, 3]'], {'axis': '(1)'}), '(a[:, 3], axis=1)\n', (10803, 10820), True, 'import numpy as np\n'), ((10844, 10870), 'numpy.expand_dims', 'np.expand_dims', (['a[:, 1]', '(1)'], {}), '(a[:, 1], 1)\n', (10858, 10870), True, 'import numpy as np\n'), ((10946, 11011), 'numpy.expand_dims', 'np.expand_dims', (['((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]))'], {'axis': '(1)'}), '((a[:, 2] - a[:, 0]) * (a[:, 3] - a[:, 1]), axis=1)\n', (10960, 11011), True, 'import numpy as np\n'), ((11054, 11069), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (11062, 11069), True, 'import numpy as np\n'), ((1244, 1268), 'os.path.split', 'os.path.split', (['save_path'], {}), '(save_path)\n', (1257, 1268), False, 'import os\n'), ((4044, 4067), 'numpy.append', 'np.append', (['scores', 'd[4]'], {}), '(scores, d[4])\n', (4053, 4067), True, 'import numpy as np\n'), ((4477, 4504), 'numpy.argmax', 'np.argmax', (['overlaps'], {'axis': '(1)'}), '(overlaps, axis=1)\n', (4486, 4504), True, 'import numpy as np\n'), ((8492, 8515), 'numpy.append', 'np.append', (['scores', 'd[4]'], {}), '(scores, d[4])\n', (8501, 8515), True, 'import numpy as np\n'), ((8860, 8887), 'numpy.argmax', 'np.argmax', (['overlaps'], {'axis': '(1)'}), '(overlaps, axis=1)\n', (8869, 8887), True, 'import numpy as np\n'), ((4399, 4424), 'numpy.expand_dims', 'np.expand_dims', (['d'], {'axis': '(0)'}), '(d, axis=0)\n', (4413, 4424), True, 'import numpy as np\n'), ((8601, 8630), 'numpy.append', 'np.append', (['false_positives', '(1)'], {}), '(false_positives, 1)\n', (8610, 8630), True, 'import numpy as np\n'), ((8669, 8697), 'numpy.append', 'np.append', (['true_positives', '(0)'], {}), '(true_positives, 0)\n', (8678, 8697), True, 'import numpy as np\n'), ((8782, 8807), 'numpy.expand_dims', 'np.expand_dims', (['d'], {'axis': '(0)'}), '(d, axis=0)\n', (8796, 8807), True, 'import numpy as np\n'), ((9099, 9128), 'numpy.append', 'np.append', (['false_positives', '(0)'], {}), '(false_positives, 0)\n', (9108, 9128), True, 'import numpy as np\n'), ((9167, 9195), 'numpy.append', 'np.append', (['true_positives', '(1)'], {}), '(true_positives, 1)\n', (9176, 9195), True, 'import numpy as np\n'), ((9325, 9354), 'numpy.append', 'np.append', (['false_positives', '(1)'], {}), '(false_positives, 1)\n', (9334, 9354), True, 'import numpy as np\n'), ((9393, 9421), 'numpy.append', 'np.append', (['true_positives', '(0)'], {}), '(true_positives, 0)\n', (9402, 9421), True, 'import numpy as np\n'), ((10095, 10115), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (10103, 10115), True, 'import numpy as np\n'), ((4205, 4237), 'numpy.append', 'np.append', (['false_positives[j]', '(1)'], {}), '(false_positives[j], 1)\n', (4214, 4237), True, 'import numpy as np\n'), ((4283, 4314), 'numpy.append', 'np.append', (['true_positives[j]', '(0)'], {}), '(true_positives[j], 0)\n', (4292, 4314), True, 'import numpy as np\n'), ((4731, 4763), 'numpy.append', 'np.append', (['false_positives[j]', '(1)'], {}), '(false_positives[j], 1)\n', (4740, 4763), True, 'import numpy as np\n'), ((4809, 4840), 'numpy.append', 'np.append', (['true_positives[j]', '(0)'], {}), '(true_positives[j], 0)\n', (4818, 4840), True, 'import numpy as np\n'), ((6446, 6466), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (6454, 6466), True, 'import numpy as np\n'), ((5034, 5066), 'numpy.append', 'np.append', (['false_positives[j]', '(0)'], {}), '(false_positives[j], 0)\n', (5043, 5066), True, 'import numpy as np\n'), ((5116, 5147), 'numpy.append', 'np.append', (['true_positives[j]', '(1)'], {}), '(true_positives[j], 1)\n', (5125, 5147), True, 'import numpy as np\n'), ((5227, 5259), 'numpy.append', 'np.append', (['false_positives[j]', '(1)'], {}), '(false_positives[j], 1)\n', (5236, 5259), True, 'import numpy as np\n'), ((5309, 5340), 'numpy.append', 'np.append', (['true_positives[j]', '(0)'], {}), '(true_positives[j], 0)\n', (5318, 5340), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 11 21:43:14 2018
@author: robot
"""
import plotly.plotly as py
import plotly.graph_objs as go
import plotly
import random
import numpy as np
import copy as cp
import copy
import readCfg.read_cfg as rd
from IPython.display import HTML,display
import colorlover as cl
import math
#import read_cfg
class Pnt:
def __init__(self,x=0,y=0):
self.x = x
self.y = y
def pnt2dict(self):
dic = dict(x = x,y= y)
return dic
def display(self):
print('x = ',self.x,'y = ',self.y)
class Circle:
def __init__(self,pnt = Pnt(),rad = 0):
self.x = pnt.x
self.y = pnt.y
self.rad = rad
self.x0 = self.x - self.rad
self.y0 = self.y - self.rad
self.x1 = self.x + self.rad
self.y1 = self.y + self.rad
def circle2dict(self):
dic = dict()
dic['type'] = 'circle'
dic['xref'] = 'x'
dic['yref'] = 'y'
dic['x0'] = self.x0
dic['y0'] = self.y0
dic['x1'] = self.x1
dic['y1'] = self.y1
dic['line'] = dict(color = 'rgba(50, 171, 96, 1)')
return dic
class Line:
def __init__(self,pnt0 =Pnt(),pnt1=Pnt()):
self.x0 = pnt0.x
self.y0 = pnt0.y
self.x1 = pnt1.x
self.y1 = pnt1.y
def line2dict(self):
dic= dict()
dic['type']='line'
dic['x0'] =self.x0
dic['y0'] =self.y0
dic['x1'] =self.x1
dic['y1'] =self.y1
dic['line'] = dict(color = 'rgb(128, 0, 128)')
return dic
class Rect:
def __init__(self,pnt =Pnt(),width =0,height =0):
self.x0 = pnt.x
self.y0 = pnt.y
self.x1 = self.x0 + width
self.y1 = self.y0 + height
def rect2dict(self):
dic = dict()
dic['type']='rect'
dic['x0'] = self.x0
dic['y0'] = self.y0
dic['x1'] = self.x1
dic['y1'] = self.y1
dic['line'] = dict(color = 'rgb(128, 0, 128)')
return dic
def getLevelColor(level):
strcolor = 'rgba('
for i in range(3):
strcolor = strcolor + str(level*50)+','
strcolor = strcolor + str(1/level) +')'
return strcolor
colorLst = ['white','black']
class Env:
def __init__(self, mat = np.zeros((2,2))):
self.mat = mat
self.shapeLst = []
self.drawData = []
self.annotations = []
self.proLevNum = 0
def addgrid(self):
g_color = 'blue'
row = len(self.mat)
for i in range(row):
for j in range(len(self.mat[i])):
pnt = Pnt(i,j)
rect = Rect(pnt,1,1)
rectDic = rect.rect2dict()
rectDic['line']['color'] = g_color
rectDic['line']['width'] = 0.5
# rectDic['opacity'] = 1/(int(self.mat[i][j])+1)
# rectDic['fillcolor'] = colorLst[int(self.mat[i][j])]
if(int(self.mat[i][j])==1):
rectDic['fillcolor'] = 'black'
# if(int(self.mat[i][j])==0):
# rectDic['fillcolor'] = colorLst[int(self.mat[i][j])]
# getLevelColor(mat[i][j])
self.shapeLst.append(copy.deepcopy(rectDic))
print(len(self.shapeLst))
def addProGrid(self,proLevLst = []):
line_color = 'red'
ind = 0
row = len(self.mat)
bupu = cl.scales['9']['seq']['YlGnBu']
bupuNum = cl.interp(bupu,500)
bupuUnit = math.floor(500/4)
for i in range(row):
for j in range(len(self.mat[i])):
pnt = Pnt(i,j)
rect = Rect(pnt,1,1)
rectDic = rect.rect2dict()
rectDic['line']['color'] = line_color
rectDic['line']['width'] = 0.5
if int(proLevLst[ind]) == 0:
rectDic['fillcolor'] = 'black'
else:
rectDic['fillcolor'] = bupuNum[int((proLevLst[ind] - 1) *bupuUnit)]
rectDic['opacity'] = 0.7
ind += 1
self.shapeLst.append(copy.deepcopy(rectDic))
def addRobotStartPnt(self,lst= []):
for i in range(len(lst[0])):
lst[0][i] = lst[0][i] + 0.5
lst[1][i] = lst[1][i] + 0.5
startTrace = go.Scatter(x =[lst[0][i]], y = [lst[1][i]],mode ='markers',marker = dict(symbol = 'cross-dot',size = 20),
name =
# 'start')
'Robot_'+ str(i+1))
self.drawData.append(startTrace)
def drawPic(self,name ='env',fileType = True):
layout = dict()
layout['shapes'] = self.shapeLst
layout['xaxis'] = {'range':[0,len(self.mat[0])]}
layout['yaxis'] = {'range':[0,len(self.mat)]}
layout['xaxis'] = dict(
autorange=True,
showgrid=False,
zeroline=False,
showline=False,
autotick=True,
ticks='',
showticklabels = False)
layout['yaxis'] = dict(
scaleanchor = "x",
autorange=True,
showgrid=False,
zeroline=False,
showline=False,
autotick=True,
ticks='',
showticklabels = False)
layout['font'] = dict(
family='sans-serif',
size=25,
color='#000'
)
layout['legend'] = dict(font=dict(
family='sans-serif',
size=25,
color='#000'
))
layout['autosize'] = False
layout['height'] = 1000
layout['width']= 1000
layout['annotations'] = self.annotations
# print(layout)
fig = dict(data = self.drawData ,layout = layout)
if(fileType):
plotly.offline.plot(fig,filename = name + '.html',validate=False)
else:
py.image.save_as(fig,filename = name+'.jpeg')
def drawIns(cfgFileName = '5_20_20_80_Outdoor_Cfg.txt',drawType = 1,
fileName = 'nothing',
fileType = False ):
py.sign_in('tesla_fox', 'HOTRQ3nIOdYUUszDIfgN')
# conFileDir = './/data//'
# degNameCfg = conFileDir + cfgFileName
readCfg = rd.Read_Cfg(cfgFileName)
data = []
readCfg.get('row',data)
row = int(data.pop())
readCfg.get('col',data)
col = int(data.pop())
mat = np.zeros((row,col))
obRowLst = []
obColLst = []
readCfg.get('obRow',obRowLst)
readCfg.get('obCol',obColLst)
for i in range(len(obRowLst)):
obRow = int(obRowLst[i])
obCol = int(obColLst[i])
mat[obRow][obCol] = 1
robRowLst = []
robColLst = []
readCfg.get('robRow',robRowLst)
readCfg.get('robCol',robColLst)
proLevLst = []
readCfg.get('proLevGrid',proLevLst)
env = Env(mat)
env.proLevNum = int(readCfg.getSingleVal('proLevNum'))
# proMat = np.zeros((row,col),dtype = int)
#case 1 draw Environment
if(drawType == 1):
# env.addgrid()
env.addProGrid(proLevLst = proLevLst)
robLst = []
robLst.append(robRowLst)
robLst.append(robColLst)
env.addRobotStartPnt(robLst)
cfgFileName = cfgFileName.split('data//')[1]
cfgFileName = cfgFileName.split('.dat')[0]
env.drawPic('./png/env_'+cfgFileName,fileType)
#case 2 draw Environment with edges
if __name__ == '__main__':
drawIns( cfgFileName = './/data//1_20_20_50_Cfg.dat',fileType = True)
pass
| [
"readCfg.read_cfg.Read_Cfg",
"plotly.plotly.image.save_as",
"plotly.plotly.sign_in",
"math.floor",
"plotly.offline.plot",
"colorlover.interp",
"numpy.zeros",
"copy.deepcopy"
] | [((6172, 6219), 'plotly.plotly.sign_in', 'py.sign_in', (['"""tesla_fox"""', '"""HOTRQ3nIOdYUUszDIfgN"""'], {}), "('tesla_fox', 'HOTRQ3nIOdYUUszDIfgN')\n", (6182, 6219), True, 'import plotly.plotly as py\n'), ((6316, 6340), 'readCfg.read_cfg.Read_Cfg', 'rd.Read_Cfg', (['cfgFileName'], {}), '(cfgFileName)\n', (6327, 6340), True, 'import readCfg.read_cfg as rd\n'), ((6488, 6508), 'numpy.zeros', 'np.zeros', (['(row, col)'], {}), '((row, col))\n', (6496, 6508), True, 'import numpy as np\n'), ((2289, 2305), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (2297, 2305), True, 'import numpy as np\n'), ((3529, 3549), 'colorlover.interp', 'cl.interp', (['bupu', '(500)'], {}), '(bupu, 500)\n', (3538, 3549), True, 'import colorlover as cl\n'), ((3569, 3588), 'math.floor', 'math.floor', (['(500 / 4)'], {}), '(500 / 4)\n', (3579, 3588), False, 'import math\n'), ((5881, 5946), 'plotly.offline.plot', 'plotly.offline.plot', (['fig'], {'filename': "(name + '.html')", 'validate': '(False)'}), "(fig, filename=name + '.html', validate=False)\n", (5900, 5946), False, 'import plotly\n'), ((5973, 6019), 'plotly.plotly.image.save_as', 'py.image.save_as', (['fig'], {'filename': "(name + '.jpeg')"}), "(fig, filename=name + '.jpeg')\n", (5989, 6019), True, 'import plotly.plotly as py\n'), ((3277, 3299), 'copy.deepcopy', 'copy.deepcopy', (['rectDic'], {}), '(rectDic)\n', (3290, 3299), False, 'import copy\n'), ((4189, 4211), 'copy.deepcopy', 'copy.deepcopy', (['rectDic'], {}), '(rectDic)\n', (4202, 4211), False, 'import copy\n')] |
from tabula import read_pdf
import re
import spacy
from spacy import displacy
from collections import Counter
import en_core_web_sm
nlp = en_core_web_sm.load()
import PyPDF2
from dateutil import parser
from fpdf import FPDF
import locale
locale.setlocale(locale.LC_ALL,'')
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import timeit
### Data Frame ###
df = read_pdf("C:/Users/<NAME>/Desktop/BS.pdf", pages='all')
### Finding Column Names ###
columns = []
for col in df.columns:
columns.append(col)
### Finding Date Column ###
date_column = ''
date_key_words = ['date']
for i in range(0,len(columns)):
if any(x in columns[i].lower() for x in date_key_words):
date_column = i
break
### Finding Balance Column ###
balance_column = ''
balance_key_words = ['balance']
for i in range(0,len(columns)):
if any(x in columns[i].lower() for x in balance_key_words):
balance_column = i
break
### Finding Description Column ###
description_column = ''
description_key_words = ['description','narrat','particular','service','remark']
for i in range(0,len(columns)):
if any(x in columns[i].lower() for x in description_key_words):
description_column = i
break
### Finding Debit Column ###
debit_column = ''
debit_key_words = ['debit','withdraw']
for i in range(0,len(columns)):
if any(x in columns[i].lower() for x in debit_key_words):
debit_column = i
break
### Finding Credit Column ###
credit_column = ''
credit_key_words = ['credit','deposit']
for i in range(0,len(columns)):
if any(x in columns[i].lower() for x in credit_key_words):
credit_column = i
break
### Extracting Text from PDF ###
pdf_file = open("C:/Users/<NAME>/Desktop/BS.pdf", 'rb')
read_pdf = PyPDF2.PdfFileReader(pdf_file)
no_pages = read_pdf.getNumPages()
page = read_pdf.getPage(0)
page_content = page.extractText()
pdf_file.close()
### Appending Multiple Description Rows ###
dropped_rows = []
for i in range(0,len(df)):
if(str(df.iloc[i][date_column]) == 'nan'):
k = i
while(str(df.iloc[k][date_column]) == 'nan'):
k += 1
for j in range(i,k):
df.iloc[i-1][description_column] += (' ' + df.iloc[j][description_column])
for z in range(i,k):
dropped_rows.append(z)
dropped_rows2 = []
[dropped_rows2.append(x) for x in dropped_rows if x not in dropped_rows2]
for i in range(1,len(dropped_rows2)+1):
df = df.drop(df.index[dropped_rows2[-i]])
### Date ###
date_array = []
for i in range(0,len(df)):
try:
if str(df.iloc[i][date_column]) != 'nan':
df.iloc[i][date_column] = parser.parse(df.iloc[i][date_column])
df.iloc[i][date_column] = df.iloc[i][date_column].strftime("%d %b %Y")
except ValueError:
continue
except TypeError:
continue
date_array.append(df.iloc[i][date_column])
### Start Date ###
sdate = df.iloc[0][date_column]
try:
sdate = parser.parse(sdate)
sdays = sdate
sdate = sdate.strftime("%d %b %Y")
except ValueError:
sdate = sdate
except TypeError:
sdate = sdate
### End Date ###
edate = df.iloc[len(df)-1][date_column]
try:
edate = parser.parse(edate)
edays = edate
edate = edate.strftime("%d %b %Y")
except ValueError:
edate = edate
except TypeError:
edate = edate
### Time Delta ###
try:
time_delta = edays - sdays
time_delta = time_delta.days
except TypeError:
time_delta = 'Unknown'
except ValueError:
time_delta = 'Unknown'
### Debit ###
debit_array = []
for i in range(0,len(df)):
if (',' in str(df.iloc[i][debit_column])) == True:
df.iloc[i][debit_column] = re.sub(',','',df.iloc[i][debit_column])
if str(df.iloc[i][debit_column]) == 'nan':
df.iloc[i][debit_column] = 0
try:
debit_array.append(float(df.iloc[i][debit_column]))
except ValueError:
continue
except TypeError:
continue
debit = round(sum([x for x in debit_array if str(x) != 'nan']),2)
### Credit ###
credit_array = []
for i in range(0,len(df)):
if (',' in str(df.iloc[i][credit_column])) == True:
df.iloc[i][credit_column] = re.sub(',','',df.iloc[i][credit_column])
if str(df.iloc[i][credit_column]) == 'nan':
df.iloc[i][credit_column] = 0
try:
credit_array.append(float(df.iloc[i][credit_column]))
except ValueError:
continue
except TypeError:
continue
credit = round(sum([y for y in credit_array if str(y) != 'nan']),2)
### Balance ###
balance_array = []
for i in range(0,len(df)):
if (',' in str(df.iloc[i][balance_column])) == True:
df.iloc[i][balance_column] = re.sub(',','',df.iloc[i][balance_column])
try:
balance_array.append(float(df.iloc[i][balance_column]))
except ValueError:
continue
except TypeError:
continue
### Start Balance ###
sbalance = round(float(re.sub(',','',df.iloc[0][balance_column])),2)
### End Balance ###
ebalance = round(float(re.sub(',','',df.iloc[len(df)-1][balance_column])),2)
### Removing Title Rows ###
title_rows = []
for i in range(0,len(df)):
try:
tx = float(df.iloc[i][balance_column])
except:
title_rows.append(i)
for i in range(0,len(title_rows)):
df = df.drop(df.index[title_rows[i]])
### Date vs. Balance ###
### Clearing Credit Rows ###
#for i in range(0,len(df)):
# if(str(df.iloc[i][credit_column]) != 'nan'):
# df.iloc[i][description_column] = '----C-R-C----'
### Spending Categories ###
## Housing ##
housing = 0
housing_words = ['housing','property',' hoa ','maintenance','maintain','rent',
'mortgage','home loan','house loan','appartm','complex','condomin',
'service charge','tenant','tenancy','landlord',' land','lease',
'plumb','electrician','septic','cleaning',' maid '
]
housing_array = []
for i in range(0,len(df)):
housing_array.append(0)
for housing_word in housing_words:
if ((housing_word in ((str(df.iloc[i][description_column])).lower())) == True) and (df.iloc[i][credit_column] == 0):
df.iloc[i][description_column] = '----D-R-C----'
try:
housing += float(df.iloc[i][debit_column])
housing_array[i] = float(df.iloc[i][debit_column])
except ValueError:
continue
## Transportation ##
transportation = 0
transportation_words = ['transport','limo','taxi',' car ',' dmv ','gas','petrol',
'parking',' toll','transit',' bus','uber','lyft','careem',
'vehicle','metro','subway','tram','underground','carriage',
'train','brt','tire','tyre','oil change','car wash',
'carwash','rail','mover'
]
transportation_array = []
for i in range(0,len(df)):
transportation_array.append(0)
for transportation_word in transportation_words:
if ((transportation_word in ((str(df.iloc[i][description_column])).lower())) == True) and (df.iloc[i][credit_column] == 0):
df.iloc[i][description_column] = '----D-R-C----'
try:
transportation += float(df.iloc[i][debit_column])
transportation_array[i] = float(df.iloc[i][debit_column])
except ValueError:
continue
## Food ##
food = 0
food_words = ['restau','burger','food','sandwich','steak','grocer','meal',
'mcdonald','lunch','dinner','breakfast','gourmet','wine',
'bar','drink','f&b','beverage','nutri','meat','eat','mexic',
'india','chine','china','thai','korea','vietnam','persia',
'kebab','doner','shawarma','skewer','asia','mediterran',
'ethiop','greek','french','ital','pizza','chicken','dairy',
'claim j','salad','diet','sweet','cake','pastr','cream','ice',
'tea','fish','vegan','vegeta','turke','turki','bukhar',
'noodle','spaghet','macaron','barbe',
'grill','boil','charbroil','broil','cook','chef','fry',
'toast','roast','bak','scorch','dip','choc','sauce','dine',
'cafe','chez','chip','starbuc','wendy','plate','beer',
'liqo','alcohol','kitchen','crust','spic','tandoor','salt',
'soup','egg','balls','taste','caviar','pan','cuisine','chop',
'jar','goat','bagel','bread','biscuit','grape','cherry',
'juice','shake','bbq','pig','crab','frie','lettuce','sheep',
'ocean','hot','green','leaf','kabob','spina','pot','water',
'tavern','grove','flavo','hungry','hunger','serve','caf',
'coffee','dining',' pub ','taco','beef','brisket','smoke','cora',
'shrimp','lobster','avocado','honey','bacon','banana','orange',
'tangerine','pepper','butter','cheese','bloat','pie','bowl',
'brew','bite','candy','cow','pudding','picnic','chop',
'corn','fed ','prawn','culin','cup ','cut ','drip','donut','dough',
'nut','crisp','jam','drop','waffl','espress','capac','feast',
'feed','fig','orchard','fat','horse','potato','dump','fork',
'spoon','knife','fruit','shack','gelat','vodka','tequil',
'onion','organic','farm','free range','chill','salami',
'sausage','healthy','herb','sour','chedder','rice','koffee',
'stalk','frog','liquid','sizzl','chub','lotus','curry','mint',
'koshe','iran','afghan','zilla','nectar','nibbl','garden',
'octopus','olive','shater','tart','pork','pasta','poultr',
'pretzel','latt','burrito','rabbit','fresh','bean','coco','plum',
'rib','yard','royal','palace','sea','snack','mouth','stomach',
'span','slice','splice','sponge','puff','squish','stuff','sugar',
'swallow','pickl','snail','barn','smoothi','milk','tender',
'bery','loaf','jasm','lemon','ingred','menu','mocha','cannib',
'dragon','nose','tease','pigeon','bird','spirit','slaw','thirst',
'velve','tongue','baba','tropic','tuna','biscot','veg','seed',
'ranch','brunch','wasabi','yogurt','froze','freez','appleb',
'arby','buffalo','beavertail','auntie anne','wing','chick',
'crepe','denny','dunkin','five guy','gloria jean','hardee',
'harvey','hooter','<NAME>','the keg','kfc','krisp','ceasar',
'hut','nando','panda','baguet','pollo camp','ponderos','popeye',
'quizno','red robin','ruby tues','recipe','swensen','t.g.i',
'tim horton','tony rom','white cast','yoshino','carl','tast',
'din tai','domino','fast ed','patiss','porche','roost','schnit',
'shingle inn','zambr','zarraf','a&w','florent','time','dunn',
'earls','mario','eleph','joey','keg','king','queen','mary brow',
'panago','tree','salisbur','famil','white spot','the work',
'mostaz','rostip','jensen','roll','bel cant','flunch','hippo',
'kochl','nords','wiene','vapian','goody','café','aydar','annapo',
'bikan','goli','haldira','moshe','murugan','namma','saravan',
'hoka','the count','rcoket','ramsden','leo burdock','mao',
'milano','wagam','sushi','zizzi','bewley','caff','esquire',
'abrake','supermac','spizz','anna mill','gyoza','ippud','kura',
'saizer','sukiy','lotteria','tous les','klg','marrybrown','pelita',
'roti','sate','scr','el poll','sirloin','egon','wimpy','steers',
'cervecer','rodilla','foster','chow','dencio','bacolod','chook',
'jollibee','est.33','chester','gaggan','sirocco','mado','arab',
'falafel','chiquito','frankie','harvester','hotcha','itsu',
'loch fyne','pret a','prezzo','spudulike','strada','table tab',
'veeno','walkabout','yate','manchu','pick up','au bon','cinnab',
'le mad','le pain','chili','heine','robeks','guthri','finger',
'zaxby','baskin','ben &','braum','carvel','friend','graeter',
'dazs','mango','tcby','yogen','big boy','checkers','culver',
'fuddruck','halal','jack in','krystal','mooyah','original tom',
'penguin point','sonic drive','spangle','swenson','drive-in',
'james coney','sneaky pete','la sals','qdoba','tijuana','cicis',
'fazoli','pizzer','capriotti','cousins sub','dibella','eegee',
'erbert &','wrap','jimmy john','mcalister','pita pit',
'primo hoag','schlot','togo','tubby','which wich','arthur trea',
'captain d','long john','bennigan','furr','ground rou','houlihan',
'huddle hou','seasons 52','twin peak','village inn',
'yard hou','benihan','p.f. chang','hopcat','ale hou','first wat',
'ihop','bahama','margarit','max &','chuy','cantin','buca di',
'valentino','bertucci','happy joe','mushroom','maid-rite',
'mccormick &','bar-b-q','barrel','copeland','famous dave',
'fogo de','montana mike','texas de','tony roma','dave &'
]
food_array = []
for i in range(0,len(df)):
food_array.append(0)
for food_word in food_words:
if ((food_word in ((str(df.iloc[i][description_column])).lower())) == True) and (df.iloc[i][credit_column] == 0):
df.iloc[i][description_column] = '----D-R-C----'
try:
food += float(df.iloc[i][debit_column])
food_array[i] = float(df.iloc[i][debit_column])
except ValueError:
continue
## Utilities ##
utilities = 0
utilities_words = ['utilit','electri','kahra','duke en','engie','national gr',
'nextera','edf','enel','dominion res','iberdrol','southern com',
'exelon','kepco','tepco','grid','e.on',' gas',' coal',
'southern cali','power','light','consolidated edi','energ',
'tennessee vall','authority','arizona publ','salt riv',
'municip','public ser','irrig','river','hvac','ventil',
' air','conditioni','heating','sewag',' cable','internet',
'phone',' cell ','public work','pepco',' jea','palm beach',
' emc ',' remc ','avista','idacorp','pacificorp','ameren',
' comed ','nisource','vectren','cleco','entergy','swepco',
'emera','wmeco','nstar','unitil corp','freeborn-mow',
'entergy','ameren','aquila','wapda','orange and r','blue rid',
'district',' peco ',' ecoel','santee coop','city of','luminant',
'synergy','fuel','hydro','enmax','transalta','atco','epcor',
'altalink','churchill falls','lower churchill dev','renewabl',
'rio tinto','Comgás','gaz','poweo','gds','petro','cpc corp',
'ptt','cuadrilla','niccolo','bulb','agl','atmos','conoco',
'eqt','sec vic','alinta','actewagl','summit gro','eletro',
'celesc','cemig','cesp','copel','ceee','cez gr','fortum',
'alterna','nerg','wateur','enercoop','gdf','lampiris','te oui',
' rwe ','enbw','ppc','ntpc','kptcl','mseb','bses','nhpc',
'neyveli lig','damodar val','nuclear','transmission','dakshin g',
'dakshin h','board','gujarat urja','madhya g','paschim g',
'uttar har','rajasthan raj','uttar pradesh','tneb l','nadu gen',
'perusahaan lis','a2a',' acea ',' hera ','sorgenia','tenaga nasio',
'meralco','eskon','endesa','vattenfall','transco','al ain dis',
'bin moosa &','aadc',' pipe','tabreed al','utico','sesco',
'telstra','optus','vodafone','telecom','aapt','primus','network',
'transact',' oi ','ooredoo','vivo','irancell','rightel','taliya',
'hamra','telefô','astraqom','babytel','bell can','bce inc','bell ali',
'northerntel','ontera','mt&t','at&t','newtel','nbtel','islandtel',
'northwestel','télébec','communicat','citywest','cogeco','comwave',
'distributel','dmts','eastlink','fido','mobile','iristel','wireless',
'wifi','novus','sasktel','sene ip','signal','sogotel','tbaytel',
'teksavvy','telus','vidéotron','vonage','pccw','orange s.a','sfr',
'telesur','digicel','opt pol','tikiphone','o2','dsl','t-home',
'broadband','3 hk','csl1010','smartone','budgetalk','gts h',
'media ex','invitel','telekom','ups magy','telenor','airtel','bsnl',
' jio ','mtnl','indosat','sateli','telkom','smartfen',' axis ',
'xl axi','hiweb','mtce','media','bezeq','cellcom',
'voicenter','cellular','phone','fastweb','iliad','wind tre',
'tiscali','kddi','ntt','lg u','zain kuw','maxis','dotcom','celcom',
'red one',' tune','y-max','axtel','telmex','movistar','telcel',
'totalplay','spark new','chorus lim','2degree','etisalat','ufone',
'wateen','worldcall','wi-tri','warid','vimpelcom','megafon','mts',
'tele2',' motiv ','zain saudi','vodacom','meotel','cell c','glocalnet',
' telia ','nordisk mob','halebop','swedfone','spring mob','bredband',
'howsip','swisscom','upc swit',' vibo ','turkcell',' du ','bt group',
'kcom gr',' ee ',' hutchison ','talktalk','centurylink','comcast',
' sprint ','verizon','altice','cincinnati bell','crown cast','idt corp',
'vonage','zayo group','acn inc',' gci ','singtel','teleserv','arcor ag',
'freenet','aircel','mobilink',' zong ',' tot ','true corp','dtac',' ais',
'tel ',' steam ','stream ','américa','claro pue','acantho','aexis',
'albacom','alcotek','alltre','amtel','asco','atlanet','bergamocom',
' blu ','brennercom','cdc 1085','clicktel','easytel','ecsnet',
'elitel','eutelia','fastweb','h3g','infostrada','intred','leadercom',
'messagenet','momax','omnitel','c spire','birch','fairpoint','cytranet',
'comtech21','alaskatel',' gci ','crexendo','comtech21','<NAME>',
'sonic.net','blue casa','telepacific','tcast','voice','ztelco',
'closecall',' rcn ','cavalier','on drive tech','nettalk','fractel',
'xpedeus','c spire','deltacom','hargray','ellijay','servpac',
'rangatel','smithville fib','nitco','dsci corp','12net','metrocom',
'telnet','buckeyetel','wow!','trustid','comporium','epb','icbs',
'ubta-ubet','sabesp','sanepar','copasa',' water ','environ',' hera ',
' seabo ','waste','waterwork','unitywater','hydra'
]
utilities_array = []
for i in range(0,len(df)):
utilities_array.append(0)
for utilities_word in utilities_words:
if ((utilities_word in ((str(df.iloc[i][description_column])).lower())) == True) and (df.iloc[i][credit_column] == 0):
df.iloc[i][description_column] = '----D-R-C----'
try:
utilities += float(df.iloc[i][debit_column])
utilities_array[i] = float(df.iloc[i][debit_column])
except ValueError:
continue
## Insurance ##
insurance = 0
insurance_words = ['insur','warranti','protection',' aflac',' allstate',' aaa ',
' allianz',' aig ',' asi ','ameriprise fin',' amtrust','applied und',
'assur',' risk ','bankers life','black bear','blue adv','caresource',
'champ va','chubb corp','cigna health','civil service emp','cna fin',
'cno fin','country fin','delta den','esurance','evergreen usa',
'first coast serv','fm global','gainsco','geico','general re',
'genworth fin','gracy tit','grange mut','the hartford','horace mann',
'casualty','ironshore','jackson nat','kemper corp','kentucky farm b',
'knights of col','liberty mut','lincoln nat','markel corp','massmut',
' mbbs ','metlife','metromite','modern wood','mutual of om',
'national life','the norfolk &','northwestern mut','ohio national fin',
' omega','onebeacon','oxford health','pacific life','pacific prime',
'pemco','penn mutual','physicians mut','plymouth rock','primerica',
'principal fin',' progressive','protective life','prudential fin',
' qbe ','the regence g','reliance part','rli corp',' safeco',
'securian fin','squaretrade','sun life fin','symetra','the gen',
'the travelers comp','tiaa-cerf','transamerica corp','tricare',
'triwest','trupanion','universal prop',' unum ',' usaa ','us health g',
'vantage health','veteran affair','west coast life','southern fin',
'xl catlin','colonial penn','conseco','garden state life','ing grou',
'mature life','old mutual','unifi comp','united of om','amerisafe',
' memic ','employers mut','united heart',' aia ','assicur',' axa ',
'british marine lux',' bupa ','canada life',' cigna ',
'clerical medical','claridge-ware','euler herm','friends prov',
'generali int','gerling-kon','globalhealth asia','grouparma tran',
'hang seng life','hannover r','hong kong mor','hsbc life','kolnische r',
'underwrit','guarant','manulife','massmutual','munchener r',
'phoenix life','schweizerische r','scottish mut','standard life',
'sun life hong','taylor brun',' icici ','coface s',' ecics ',
'fwd sing','lion city run','s of lon','shc capital','sompo jap',
'standard steam','singapore life','transamerica life',
'zurich international l','aviva lim','asia sunrise','creare priv',
'reliance nat','r&v ver','scor global life','tokio marine n',
'muenchener r','xi re lim','uib asia priv','medicare',' aarp ',' aetna',
'amerigroup',' anthem ','aspen dent','cambia health','blue cross and',
'coventry health','emblemhealth',' fortis ','geisinger','group health',
'health net','healthmarket','healthpartner','healthspring','highmark',
'humana','independence blue','kaiser perman','kaleida health',
'liberty medic','lifewise health','med4home','oscar health','premera blue',
'state farm','thrivent fin','unitedhealth','unitrin','universal american c',
'wellcare','fidelis care'
]
insurance_array = []
for i in range(0,len(df)):
insurance_array.append(0)
for insurance_word in insurance_words:
if ((insurance_word in ((str(df.iloc[i][description_column])).lower())) == True) and (df.iloc[i][credit_column] == 0):
df.iloc[i][description_column] = '----D-R-C----'
try:
insurance += float(df.iloc[i][debit_column])
insurance_array[i] = float(df.iloc[i][debit_column])
except ValueError:
continue
## Healthcare ##
healthcare = 0
healthcare_words = ['medic','health','care ','well-being','hospit','prescrip','dental',
'dentis','pharma','drug','doctor','nurs','specialist','ologist',
'trauma','triage','emergency','burn cent','psych',' rehab','cancer',
' acute','infirm','pediat','treatment','illness','injur','diseas',
'surgic','surger','surgeo','obstet','postnat','ambula','clinic',
'long-term c','chronic','cardi','osteo','physio','therap','counselo',
'patient','geriat','oncolog','transplant',' organ ','physici','wound',
'recovery','recoveri','mental','lunat','asylum','communicab',
'santorium','clínic','hôpital','özel','hastanesi','holzspit','cliniq',
'kantonss','ospedal','parapleg','spital','klinik'
]
healthcare_array = []
for i in range(0,len(df)):
healthcare_array.append(0)
for healthcare_word in healthcare_words:
if ((healthcare_word in ((str(df.iloc[i][description_column])).lower())) == True) and (df.iloc[i][credit_column] == 0):
df.iloc[i][description_column] = '----D-R-C----'
try:
healthcare += float(df.iloc[i][debit_column])
healthcare_array[i] = float(df.iloc[i][debit_column])
except ValueError:
continue
## Investment ##
investment = 0
investment_words = ['saving','invest','debt','retire','superan','401(k)',' ira ',' loan',
'jpmorgan','goldman s','bofa sec','morgan stan','credit suisse',
'barclays invest','deutsche bank',' ubs ','rbc cap','wells fargo',
'jefferies group','bnp paribas','mizuho fin','lazard','nomura',
'evercore part','bmo capital','mitsubishi ufj','almeida cap',
'atlantic-pac','campbell part','helix assoc','morgan caz','park hill',
'probitas part','abn amro','barclays cap','lloyds banki','merrill lyn',
'cibc world','national westm','nomura group','william blair &',
'markets','etoro','trading','bank of amer','allahabad bank','allen & co',
'bb&t','berkery, noy','bg capital','blackstone','cantor flitz',
'capstone part','centerview part','china international cap','citic secu',
' clsa ','commerzbank','corporate fin','cowen','credit agricole',
'csg part','daewoo sec','duff & phel','europa part','financo',
'gleacher & comp','greenhill & co','guggenheim part','guosen part',
'houlihan lok','hsbc holding','imperial capital','icbc ','icici bank',
'indian bank','j.p. morg','keefe, bruy','keycorp','ladenburg',
'lancaster poll','lincoln int','macquarie gr','maple capital',
'marathon cap','mccoll part','mediobanca','miller buck','moelis & c',
'montgomery & co','morgan keegan','needham & co',' nbf ','nomura hold',
'oppenheimer & co','panmure gord','perella wein','<NAME>','pnc fin',
'punjab national bank','raymond james',' rbs ','robert w. b',
'roth capital part','rothschild','sagent advis','sandler o','sbi capital',
'scotiabank','société générale','sonenshine part','stephens, inc',
'stifel fin','sucsy, fischer','sumitomo mitsui fin','suntrust',
'syndicate bank','td secur','united bank of india','vermillion part',
'wr hambrecht','yes bank',' capital ','partners','finance',
'fidelity invest','e*trade','td ameri','robinhood',' stash ',
' acorns ',' coinbase ','fanduel','predictlt','charles schwab',
'betterment','broker','wealth','asset','merrill edge',' stock','fund',
' equit','finans','financial','transfer','telex'
]
investment_array = []
for i in range(0,len(df)):
investment_array.append(0)
for investment_word in investment_words:
if ((investment_word in ((str(df.iloc[i][description_column])).lower())) == True) and (df.iloc[i][credit_column] == 0):
df.iloc[i][description_column] = '----D-R-C----'
try:
investment += float(df.iloc[i][debit_column])
investment_array[i] = float(df.iloc[i][debit_column])
except ValueError:
continue
## Recreation & Enterntainment ##
recreation = 0
recreation_words = ['recreat','entertain',' fun','leisure','concert','sport','game','bowling',
'vacation','tour ','airline','airway','ticket','cinema','theatr',
'subscript','netflix','hulu','hobb','streami','itune','android','hotel',
'inn','emirates','etihad','lufthan','delta air','klm','air fr','ryanair',
' iag ','air china','skywest','easyjet','wizz air','trip','ferry','cruise',
'train','coach','movie','music','film','kid','activit','boat','sailing',
'flying','diving','dune','safari','expedit','theme park','disney','youtube',
'broadway','studios','amuse','cable tele','broadcast','20th century',
' fair','disco',' bar','club','comcast','crunchyroll','discover',
'fox corp','the jim henson','klasky csupo','premier park','rdf media',
'six flag','perform',' art','21st century','4licens','productions',
'aerodrome inc','a.d. vision','access ind','wrestl','aniplex','antigrav',
'aqualillies','alpha video','talent','pictures','brooklyn bould','burnloung',
'cbs corp','chippendales','cinedigm','circus','cloverway','cmd dist',
'stadium',' show','auditori','cosmote tv','crush manag','dave & bust',
'deaton-flan','dover motor','ecast, inc','eapybird','elvis presley enter',
'firework','foxnext','gaia, inc','genius brands','ghost hunt week',
'giftwrap','the goldklang','great eastern con','grindhouse','harmony gold',
'hunk-o',' ibahn ','imengine','international speed','jillian','juniornet',
'publications',' leg ','lionsgate','the madison square','martinka',
'motion pic',' moxi ','national geo','nbcunivers','nu image','pangea corp',
'paniq escape','pb&j tele','premier parks','radical axis','realnetwork',
'right stuf','ryman hosp','seagram','the shuman','society award',
'splash universe','springtime','station.com','sundance group','swarmcast',
'timetrax','tivo inc','toei animation','truly indie','gala','contest',
'festiv','fayre','celebra'
]
recreation_array = []
for i in range(0,len(df)):
recreation_array.append(0)
for recreation_word in recreation_words:
if ((recreation_word in ((str(df.iloc[i][description_column])).lower())) == True) and (df.iloc[i][credit_column] == 0):
df.iloc[i][description_column] = '----D-R-C----'
try:
recreation += float(df.iloc[i][debit_column])
recreation_array[i] = float(df.iloc[i][debit_column])
except ValueError:
continue
## Personal ##
personal = 0
personal_words = ['shop','walmart','amazon','megamart','carrefour','lifestyle',
'gym','cloth','shoe','mart','decor','furni','gift','magazine',
'hygien','dry clean','laundry','store','ikea','monoprix','spinney',
'barneys','century21','j. c. pen','kohl','lord & tay','macy',
'bloomingdale','neiman marc','bergdof good','nordstrom','saks fifth',
'sears','bealls',' belk ','boscov','dillard','goody','gordmans',
'palais royal','peebles','<NAME>','boyds','charleston dep',
'<NAME>','dunham','flemington dep','fords fed','getz',
'gus mayer','halls','<NAME>','la epoca','lavogue','dar al sal',
'leader depart','loeb','mack & dave','mccaulou',"murphy's",'groom',
"neilson's",'nichols',"norby's","ossell's","reed's","ruben's",
'rubenstein',"schroeder's",'shirokiya','the sm',"stahlke's",
'stanley korshak','tomah cash',"wakefield's","weaver's",'fitness',
"wilson's","scott seale's","young's dep",'bargain','<NAME>',
'big lots','wholesale',' sale','burlington','costco','discount',
'dirt cheap','dollar general','dollar tree','family dollar',
'five below',"fred's",'fred meyer',"gabe's",'gordmans',
'harbor freight','homegoods','homesense','marshalls','meijer',
'ocean state job','renys','roses','t.j. maxx','treasure hunt',
'tuesday morning',"sam's club",'lulu','market','hyper',
'supertarget','kroger','kaufland',' coles','the warehouse',
'loblaw','jumbo',' asda ','sainsbury','harrod','tesco',
'marks and spenc',' BHS','géant','albertsons','carrs','jewel-osco',
'pavilions','randalls','tom thumb','safeway inc',' vons','food lion',
'hannaford','giant food','king kullen','<NAME>yer','<NAME>',
'jay c','king soopers',"mariano's","owen's",' qfc ','ralphs',
"roundy's","scott's",'spartannash','supervalu','hornbacher',
"shop 'n save","bashas'",'brookshire','buehler','big y','butera'
'super saver','buy for less','calandros','caraluzzi','cash saver',
'coborns','de cicco','dierberg','fareway','giant eagle','giant value',
'giantway','gristede','h-e-b','hen house','homeland',"hugo's",
'hy-vee','la canasta','lunardi','lunds &','macey','matherne',
'mccaffrey','meijer','morton william','mollie stone','piggly wig',
'preston-safe','price chop','pricerite','publix','pueblo',"raley's",
'roche bro','rosauer','schnuck','smart & fin','stater bro',
'stew leo','supermercad','supersol',"trig's","turco's","wade's",
'wegmans',"wesselman's",'wise way',"zup's",' aldi ','cash & carry',
'outlet','plaza','hannam','marukai','mitsuwa','bazar','bazzar',
'patel bro',' bravo ','mi tienda','la placita','presidente',
'rancho',"saver's cost",'el super',"terry's","motty's",
'new day','kosher','evergreen','breadberry','grand & ess'
]
personal_array = []
for i in range(0,len(df)):
personal_array.append(0)
for personal_word in personal_words:
if ((personal_word in ((str(df.iloc[i][description_column])).lower())) == True) and (df.iloc[i][credit_column] == 0):
df.iloc[i][description_column] = '----D-R-C----'
try:
personal += float(df.iloc[i][debit_column])
personal_array[i] = float(df.iloc[i][debit_column])
except ValueError:
continue
## Education ##
education = 0
education_words = ['educat','tuition','colleg','universit','school','kindergar',
'elementary','edx','mooc','udacity','course','udemy','of tech',
'tutor','edexcel','aqa','ocr',' sat ',' act ',' gre ','toefl',
'ietls','pearson','exam','assessm','quiz'
]
education_array = []
for i in range(0,len(df)):
education_array.append(0)
for education_word in education_words:
if ((education_word in ((str(df.iloc[i][description_column])).lower())) == True) and (df.iloc[i][credit_column] == 0):
df.iloc[i][description_column] = '----D-R-C----'
try:
education += float(df.iloc[i][debit_column])
education_array[i] = float(df.iloc[i][debit_column])
except ValueError:
continue
## Miscellaneous Debit ##
misc_debit = debit - food - utilities - insurance - healthcare - investment - recreation - personal - education
## Salary ##
salary = 0
salary_words = ['salar','compens','renumer','wage','stipend','allowance','income',
'emolume','honorarium','hire','pay ','bonus'
]
salary_array = []
for i in range(0,len(df)):
salary_array.append(0)
for salary_word in salary_words:
if ((salary_word in ((str(df.iloc[i][description_column])).lower())) == True) and (df.iloc[i][debit_column] == 0):
df.iloc[i][description_column] = '----C-R-C----'
try:
salary += float(df.iloc[i][credit_column])
salary_array[i] = float(df.iloc[i][credit_column])
except ValueError:
continue
## Earnings ##
earnings = 0
earnings_words = ['invest','earning','dividend','stock','share','bond','profit',
'earned','return','payback','premium','benefit','gain','surplus',
'capital','portion','advantag','commision','rent','lease',
'hire','charter','fund','market','forex','tenan','interest',
'charge'
]
earnings_array = []
for i in range(0,len(df)):
earnings_array.append(0)
for earnings_word in earnings_words:
if ((earnings_word in ((str(df.iloc[i][description_column])).lower())) == True) and (df.iloc[i][debit_column] == 0):
df.iloc[i][description_column] = '----C-R-C----'
try:
earnings += float(df.iloc[i][credit_column])
earnings_array[i] = float(df.iloc[i][credit_column])
except ValueError:
continue
## Miscellaneous Credit ##
misc_credit = credit - salary - earnings
### Debit Categories Array ###
debit_cat_array = [['Housing',housing],['Food',food],['Insurance',insurance],
['Utilities',utilities],['Transportation',transportation],
['Healthcare',healthcare],['Recreation',recreation],
['Personal',personal],['Education',education],['Investment',investment],
['Other Debit',misc_debit]]
### Credit Categories Array ###
credit_cat_array = [['Salary',salary],['Earnings',earnings],['Other Credit',misc_credit]]
### Finding Currency ###
lpage_content = page_content.lower()
currency = ''
if ('australian' in lpage_content) == True:
currency = 'AUD'
if ('australian dollar' in lpage_content) == True:
currency = 'AUD'
if ('a$' in lpage_content) == True:
currency = 'AUD'
if ('AUD' in page_content) == True:
currency = 'AUD'
#if ('brazilian' in lpage_content) == True:
# currency = 'BRL'
#if ('brazilian real' in lpage_content) == True:
# currency = 'BRL'
#if ('r$' in lpage_content) == True:
# currency = 'BRL'
#if ('BRL' in page_content) == True:
# currency = 'BRL'
if ('british' in lpage_content) == True:
currency = 'GBP'
if ('british pound' in lpage_content) == True:
currency = 'GBP'
if ('£' in lpage_content) == True:
currency = 'GBP'
if ('GBP' in page_content) == True:
currency = 'GBP'
if ('canadian' in lpage_content) == True:
currency = 'CAD'
if ('canadian dollar' in lpage_content) == True:
currency = 'CAD'
if ('c$' in lpage_content) == True:
currency = 'CAD'
if ('CAD' in page_content) == True:
currency = 'CAD'
#if ('chilean' in lpage_content) == True:
# currency = 'CLP'
#if ('chilean peso' in lpage_content) == True:
# currency = 'CLP'
#if ('CLP' in page_content) == True:
# currency = 'CLP'
#if ('chinese' in lpage_content) == True:
# currency = 'CNY'
#if ('yuan' in lpage_content) == True:
# currency = 'CNY'
#if ('CNY' in page_content) == True:
# currency = 'CNY'
#if ('czech' in lpage_content) == True:
# currency = 'CZK'
#if ('koruna' in lpage_content) == True:
# currency = 'CZK'
#if ('kč' in lpage_content) == True:
# currency = 'CZK'
#if ('CZK' in page_content) == True:
# currency = 'CZK'
#if ('danish' in lpage_content) == True:
# currency = 'DKK'
#if ('danish krone' in lpage_content) == True:
# currency = 'DKK'
#if ('DKK' in page_content) == True:
# currency = 'DKK'
if ('euro' in lpage_content) == True:
currency = 'EUR'
if ('€' in lpage_content) == True:
currency = 'EUR'
if ('EUR' in page_content) == True:
currency = 'EUR'
#if ('hong kong' in lpage_content) == True:
# currency = 'HKD'
#if ('hong kong dollar' in lpage_content) == True:
# currency = 'HKD'
#if ('hk$' in lpage_content) == True:
# currency = 'HKD'
#if ('HKD' in page_content) == True:
# currency = 'HKD'
#if ('hungarian' in lpage_content) == True:
# currency = 'HUF'
#if ('forint' in lpage_content) == True:
# currency = 'HUF'
#if ('HUF' in page_content) == True:
# currency = 'HUF'
#if ('indian' in lpage_content) == True:
# currency = 'INR'
#if ('₹' in lpage_content) == True:
# currency = 'INR'
#if ('INR' in page_content) == True:
# currency = 'INR'
#if ('indonesian' in lpage_content) == True:
# currency = 'IDR'
#if ('rupiah' in lpage_content) == True:
# currency = 'IDR'
#if ('IDR' in page_content) == True:
# currency = 'IDR'
#if ('japanese' in lpage_content) == True:
# currency = 'JPY'
#if ('yen' in lpage_content) == True:
# currency = 'JPY'
#if ('JPY' in page_content) == True:
# currency = 'JPY'
#if ('korean' in lpage_content) == True:
# currency = 'KRW'
#if ('korean won' in lpage_content) == True:
# currency = 'KRW'
#if ('₩' in lpage_content) == True:
# currency = 'KRW'
#if ('KRW' in page_content) == True:
# currency = 'KRW'
#if ('malaysian' in lpage_content) == True:
# currency = 'MYR'
#if ('ringgit' in lpage_content) == True:
# currency = 'MYR'
#if ('MYR' in page_content) == True:
# currency = 'MYR'
#if ('mexican' in lpage_content) == True:
# currency = 'MXN'
#if ('mexican peso' in lpage_content) == True:
# currency = 'MXN'
#if ('mex$' in lpage_content) == True:
# currency = 'MXN'
#if ('MXN' in page_content) == True:
# currency = 'MXN'
if ('new zealand' in lpage_content) == True:
currency = 'NZD'
if ('new zealand dollar' in lpage_content) == True:
currency = 'NZD'
if ('nzd' in lpage_content) == True:
currency = 'NZD'
#if ('norwegian' in lpage_content) == True:
# currency = 'NOK'
#if ('norwegian krone' in lpage_content) == True:
# currency = 'NOK'
#if ('NOK' in page_content) == True:
# currency = 'NOK'
#if ('pakistani' in lpage_content) == True:
# currency = 'PKR'
#if ('pakistani rupee' in lpage_content) == True:
# currency = 'PKR'
#if ('PKR' in page_content) == True:
# currency = 'PKR'
#if ('philippine' in lpage_content) == True:
# currency = 'PHP'
#if ('philippine peso' in lpage_content) == True:
# currency = 'PHP'
#if ('₱' in lpage_content) == True:
# currency = 'PHP'
#if ('PHP' in page_content) == True:
# currency = 'PHP'
#if ('polish' in lpage_content) == True:
# currency = 'PLN'
#if ('zloty' in lpage_content) == True:
# currency = 'PLN'
#if ('zł' in lpage_content) == True:
# currency = 'PLN'
#if ('PLN' in page_content) == True:
# currency = 'PLN'
#if ('russian' in lpage_content) == True:
# currency = 'RUB'
#if ('ruble' in lpage_content) == True:
# currency = 'RUB'
#if ('RUB' in page_content) == True:
# currency = 'RUB'
#if ('singapore' in lpage_content) == True:
# currency = 'SGD'
#if ('singapore dollar' in lpage_content) == True:
# currency = 'SGD'
#if ('s$' in lpage_content) == True:
# currency = 'SGD'
#if ('SGD' in page_content) == True:
# currency = 'SGD'
#if ('south african' in lpage_content) == True:
# currency = 'ZAR'
#if ('rand' in lpage_content) == True:
# currency = 'ZAR'
#if ('ZAR' in page_content) == True:
# currency = 'ZAR'
#if ('swedish' in lpage_content) == True:
# currency = 'SEK'
#if ('krona' in lpage_content) == True:
# currency = 'SEK'
#if ('SEK' in page_content) == True:
# currency = 'SEK'
#if ('swiss' in lpage_content) == True:
# currency = 'CHF'
#if ('franc' in lpage_content) == True:
# currency = 'CHF'
#if ('CHF' in page_content) == True:
# currency = 'CHF'
#if ('taiwan' in lpage_content) == True:
# currency = 'TWD'
#if ('taiwan dollar' in lpage_content) == True:
# currency = 'TWD'
#if ('nt$' in lpage_content) == True:
# currency = 'TWD'
#if ('TWD' in page_content) == True:
# currency = 'TWD'
#if ('thai' in lpage_content) == True:
# currency = 'THB'
#if ('baht' in lpage_content) == True:
# currency = 'THB'
#if ('฿' in lpage_content) == True:
# currency = 'THB'
#if ('THB' in page_content) == True:
# currency = 'THB'
#if ('turkish' in lpage_content) == True:
# currency = 'TRY'
#if ('lira' in lpage_content) == True:
# currency = 'TRY'
#if ('₺' in lpage_content) == True:
# currency = 'TRY'
#if ('TRY' in page_content) == True:
# currency = 'TRY'
if ('us dollar' in lpage_content) == True:
currency = 'USD'
if ('united states dollar' in lpage_content) == True:
currency = 'USD'
if ('USD' in page_content) == True:
currency = 'USD'
#if ('iranian' in lpage_content) == True:
# currency = 'IRR'
#if ('iranian rial' in lpage_content) == True:
# currency = 'IRR'
#if ('IRR' in page_content) == True:
# currency = 'IRR'
if ('saudi' in lpage_content) == True:
currency = 'SAR'
if ('saudi riyal' in lpage_content) == True:
currency = 'SAR'
if ('SAR' in page_content) == True:
currency = 'SAR'
if ('kuwaiti' in lpage_content) == True:
currency = 'KWD'
if ('dinar' in lpage_content) == True:
currency = 'KWD'
if ('KWD' in page_content) == True:
currency = 'KWD'
if ('emirati' in lpage_content) == True:
currency = 'AED'
if ('dirham' in lpage_content) == True:
currency = 'AED'
if ('AED' in page_content) == True:
currency = 'AED'
if ('qatari' in lpage_content) == True:
currency = 'QAR'
if ('qatari riyal' in lpage_content) == True:
currency = 'QAR'
if ('QAR' in page_content) == True:
currency = 'QAR'
### Cost of Living Ratio CUR vs. USD ###
c_aud = 0.69
c_brl = 0.38
c_gbp = 0.92
c_cad = 0.62
c_clp = 0.36
c_cny = 0.48
c_czk = 0.46
c_dkk = 0.80
c_eur = 0.80
c_hkd = 0.93
c_huf = 0.37
c_inr = 0.22
c_idr = 0.38
c_jpy = 0.78
c_krw = 0.70
c_myr = 0.35
c_mxn = 0.35
c_nzd = 0.67
c_nok = 0.87
c_pkr = 0.18
c_php = 0.37
c_pln = 0.40
c_rub = 0.46
c_sgd = 0.87
c_zar = 0.43
c_sek = 0.69
c_chf = 1.14
c_twd = 0.51
c_thb = 0.51
c_try = 0.26
c_usd = 1.00
c_irr = 0.37
c_sar = 0.39
c_kwd = 0.51
c_aed = 0.66
c_qar = 0.68
### Extracting Name ###
doc = nlp(page_content)
people = ['']
people = [name for name in doc.ents if name.label_ == 'PERSON']
name = people[0]
name = str(name)
name = name.lower()
name = name.title()
name = name[0:20]
### Creating Report ###
pdf = FPDF(orientation = 'P', unit = 'mm', format = 'A4')
pdf.add_page()
pdf.image('C:/Users/<NAME>/Desktop/page.png', x = 0, y = 0, w = 210, h = 297)
pdf.set_font('helvetica','',10)
pdf.set_text_color(255,255,255)
pdf.text(25.0,49.0,name)
pdf.text(25.0,57.2,currency)
pdf.text(25.0,65.5,sdate)
pdf.text(25.0,73.7,edate)
## Generating Pie Charts ##
credit_color = [[0.0000,0.4392,0.7529],[0.8627,0.0784,0.8431],[0.6157,0.7647,0.9020]]
debit_color = [[0.4980,0.4980,0.4980],[0.7490,0.7490,0.7490],[0.5176,0.2353,0.04706],
[1.0000,0.0000,0.0000],[1.0000,1.0000,0.0000],[0.3294,0.5098,0.2078],
[1.0000,0.8509,0.4000],[0.7490,0.5647,0.0000],[0.0000,0.0000,0.0000],
[0.6627,0.8196,0.5569],[0.9569,0.6941,0.5137]]
def donut(name, value, category, total_value, currency, color, color_b):
values = [value, total_value - value]
my_circle=plt.Circle( (0,0), 0.8, color=[0.9490,0.9490,0.9490])
plt.pie(values,
wedgeprops = { 'linewidth' : 0, 'edgecolor' : 'white' }, colors=[color,color_b],labeldistance=1.1)
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.savefig(('%s.png' % (name)),orientation='portrait',transparent=True, bbox_inches=None, pad_inches=0)
plt.close()
for i in range(0,len(credit_cat_array)):
donut(credit_cat_array[i][0],int(credit_cat_array[i][1]),'Other Credit',credit,currency,credit_color[i],[0.8549,0.8902,0.9529])
for i in range(0,len(debit_cat_array)):
donut(debit_cat_array[i][0],int(debit_cat_array[i][1]),'Other Debit',debit,currency,debit_color[i],[0.9569,0.8627,0.8549])
## Health Score ##
housing_score = [25,0,25,35,60]
food_score = [10,0,10,15,25]
insurance_score = [10,0,10,25,35]
utilities_score = [5,0,5,10,15]
transportation_score = [10,0,10,15,25]
healthcare_score = [5,0,5,10,15]
recreation_score = [5,0,5,10,15]
personal_score = [5,0,5,10,15]
education_score = [10,0,10,20,30]
investment_score = [10,0,10,20,30]
other_debit_score = [5,0,5,10,15]
if (housing/debit*100) <= housing_score[2]:
housing_score_val = ((housing_score[2] - (housing/debit*100))/housing_score[2])*100
if ((housing/debit*100) > housing_score[2]) and ((housing/debit*100) <= housing_score[3]):
housing_score_val = 0
if ((housing/debit*100) > housing_score[3]) and ((housing/debit*100) <= housing_score[4]):
housing_score_val = ((housing_score[3] - (housing/debit*100))/housing_score[2])*100
if ((housing/debit*100) > housing_score[4]):
housing_score_val = -100
if (food/debit*100) <= food_score[2]:
food_score_val = ((food_score[2] - (food/debit*100))/food_score[2])*100
if ((food/debit*100) > food_score[2]) and ((food/debit*100) <= food_score[3]):
food_score_val = 0
if ((food/debit*100) > food_score[3]) and ((food/debit*100) <= food_score[4]):
food_score_val = ((food_score[3] - (food/debit*100))/food_score[2])*100
if ((food/debit*100) > food_score[4]):
food_score_val = -100
if (insurance/debit*100) <= insurance_score[2]:
insurance_score_val = ((insurance_score[2] - (insurance/debit*100))/insurance_score[2])*100
if ((insurance/debit*100) > insurance_score[2]) and ((insurance/debit*100) <= insurance_score[3]):
insurance_score_val = 0
if ((insurance/debit*100) > insurance_score[3]) and ((insurance/debit*100) <= insurance_score[4]):
insurance_score_val = ((insurance_score[3] - (insurance/debit*100))/insurance_score[2])*100
if ((insurance/debit*100) > insurance_score[4]):
insurance_score_val = -100
if (utilities/debit*100) <= utilities_score[2]:
utilities_score_val = ((utilities_score[2] - (utilities/debit*100))/utilities_score[2])*100
if ((utilities/debit*100) > utilities_score[2]) and ((utilities/debit*100) <= utilities_score[3]):
utilities_score_val = 0
if ((utilities/debit*100) > utilities_score[3]) and ((utilities/debit*100) <= utilities_score[4]):
utilities_score_val = ((utilities_score[3] - (utilities/debit*100))/utilities_score[2])*100
if ((utilities/debit*100) > utilities_score[4]):
utilities_score_val = -100
if (transportation/debit*100) <= transportation_score[2]:
transportation_score_val = ((transportation_score[2] - (transportation/debit*100))/transportation_score[2])*100
if ((transportation/debit*100) > transportation_score[2]) and ((transportation/debit*100) <= transportation_score[3]):
transportation_score_val = 0
if ((transportation/debit*100) > transportation_score[3]) and ((transportation/debit*100) <= transportation_score[4]):
transportation_score_val = ((transportation_score[3] - (transportation/debit*100))/transportation_score[2])*100
if ((transportation/debit*100) > transportation_score[4]):
transportation_score_val = -100
if (healthcare/debit*100) <= healthcare_score[2]:
healthcare_score_val = ((healthcare_score[2] - (healthcare/debit*100))/healthcare_score[2])*100
if ((healthcare/debit*100) > healthcare_score[2]) and ((healthcare/debit*100) <= healthcare_score[3]):
healthcare_score_val = 0
if ((healthcare/debit*100) > healthcare_score[3]) and ((healthcare/debit*100) <= healthcare_score[4]):
healthcare_score_val = ((healthcare_score[3] - (healthcare/debit*100))/healthcare_score[2])*100
if ((healthcare/debit*100) > healthcare_score[4]):
healthcare_score_val = -100
if (recreation/debit*100) <= recreation_score[2]:
recreation_score_val = ((recreation_score[2] - (recreation/debit*100))/recreation_score[2])*100
if ((recreation/debit*100) > recreation_score[2]) and ((recreation/debit*100) <= recreation_score[3]):
recreation_score_val = 0
if ((recreation/debit*100) > recreation_score[3]) and ((recreation/debit*100) <= recreation_score[4]):
recreation_score_val = ((recreation_score[3] - (recreation/debit*100))/recreation_score[2])*100
if ((recreation/debit*100) > recreation_score[4]):
recreation_score_val = -100
if (personal/debit*100) <= personal_score[2]:
personal_score_val = ((personal_score[2] - (personal/debit*100))/personal_score[2])*100
if ((personal/debit*100) > personal_score[2]) and ((personal/debit*100) <= personal_score[3]):
personal_score_val = 0
if ((personal/debit*100) > personal_score[3]) and ((personal/debit*100) <= personal_score[4]):
personal_score_val = ((personal_score[3] - (personal/debit*100))/personal_score[2])*100
if ((personal/debit*100) > personal_score[4]):
personal_score_val = -100
if (education/debit*100) <= education_score[2]:
education_score_val = ((education_score[2] - (education/debit*100))/education_score[2])*100
if ((education/debit*100) > education_score[2]) and ((education/debit*100) <= education_score[3]):
education_score_val = 0
if ((education/debit*100) > education_score[3]) and ((education/debit*100) <= education_score[4]):
education_score_val = ((education_score[3] - (education/debit*100))/heducation_score[2])*100
if ((education/debit*100) > education_score[4]):
education_score_val = -100
if (investment/debit*100) <= investment_score[2]:
investment_score_val = ((investment_score[2] - (investment/debit*100))/investment_score[2])*100
if ((investment/debit*100) > investment_score[2]) and ((investment/debit*100) <= investment_score[3]):
investment_score_val = 0
if ((investment/debit*100) > investment_score[3]) and ((investment/debit*100) <= investment_score[4]):
investment_score_val = ((investment_score[3] - (investment/debit*100))/investment_score[2])*100
if ((investment/debit*100) > investment_score[4]):
investment_score_val = -100
if (misc_debit/debit*100) <= other_debit_score[2]:
other_debit_score_val = ((other_debit_score[2] - (misc_debit/debit*100))/other_debit_score[2])*100
if ((misc_debit/debit*100) > other_debit_score[2]) and ((misc_debit/debit*100) <= other_debit_score[3]):
other_debit_score_val = 0
if ((misc_debit/debit*100) > other_debit_score[3]) and ((misc_debit/debit*100) <= other_debit_score[4]):
other_debit_score_val = ((other_debit_score[3] - (misc_debit/debit*100))/other_debit_score[2])*100
if ((misc_debit/debit*100) > other_debit_score[4]):
other_debit_score_val = -100
spending_score = ((housing_score_val/100)*housing_score[0]
+ (food_score_val/100)*food_score[0]
+ (insurance_score_val/100)*insurance_score[0]
+ (utilities_score_val/100)*utilities_score[0]
+ (transportation_score_val/100)*transportation_score[0]
+ (healthcare_score_val/100)*healthcare_score[0]
+ (recreation_score_val/100)*recreation_score[0]
+ (personal_score_val/100)*personal_score[0]
+ (education_score_val/100)*education_score[0]
+ (investment_score_val/100)*investment_score[0]
+ (other_debit_score_val/100)*other_debit_score[0]
)
balance_score = (int(credit - debit)/sbalance)*100
if int(credit - debit) > sbalance:
balance_score = 100
if int(credit - debit) < -sbalance:
balance_score = -100
health_score = int(((spending_score/100)*50) + ((balance_score/100)*50))
def donut_2(name, value, color_b):
if value >= 80:
color = [0.4392,0.6784,0.2784]
if (value < 80) and (value >= 60):
color = [0.9569,0.6941,0.5137]
if value < 60:
color = [1.0000,0.4118,0.4118]
my_circle=plt.Circle( (0,0), 0.8, color=[0.4902,0.8000,1.0000])
if value > 0:
values = [value, 100 - value]
plt.pie(values,
wedgeprops = { 'linewidth' : 0, 'edgecolor' : 'white' }, colors=[color,color_b],labeldistance=1.1)
if value < 0:
values = [-value, 100 + value]
plt.pie(values,
wedgeprops = { 'linewidth' : 0, 'edgecolor' : 'white' }, colors=[color,color_b],labeldistance=1.1, counterclock=False)
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.savefig(('%s.png' % (name)),orientation='portrait',transparent=True, bbox_inches=None, pad_inches=0)
plt.close()
donut_2('Debit_score',spending_score,[0.9569,0.8627,0.8549])
donut_2('Balance_score',balance_score,[0.8549,0.8902,0.9529])
donut_2('Health_score',health_score,[0.8549,0.8902,0.9529])
## Bar Charts ##
def bar_chart(date,balance,credit,debit,salary,earnings,housing,food,transportation,utilities,insurance,healthcare,investment,recreation,personal,education):
balance_a = []
for i in range(0,len(date)):
if i < len(date) - 1:
if date[i] != date[i+1]:
balance_a.append((date[i],balance[i]))
if i == len(date) - 1:
balance_a.append((date[i],balance[i]))
credit_d = []
for i in range(0,len(date)):
credit_d.append((date[i],credit[i]))
dictionary = dict()
for (date_v,value) in credit_d:
dictionary[date_v] = dictionary.get(date_v,0) + value
credit_a = [(key, val) for (key, val) in dictionary.items()]
debit_d = []
for i in range(0,len(date)):
debit_d.append((date[i],debit[i]))
dictionary = dict()
for (date_v,value) in debit_d:
dictionary[date_v] = dictionary.get(date_v,0) + value
debit_a = [(key, val) for (key, val) in dictionary.items()]
x = [x[0:6] for (x,y) in balance_a]
### Credit Categories ###
## Salary Category ##
salary_d = []
for i in range(0,len(date)):
salary_d.append((date[i],salary[i]))
dictionary = dict()
for (date_v,value) in salary_d:
dictionary[date_v] = dictionary.get(date_v,0) + value
salary_a = [(key, val) for (key, val) in dictionary.items()]
## Earnings Category ##
earnings_d = []
for i in range(0,len(date)):
earnings_d.append((date[i],earnings[i]))
dictionary = dict()
for (date_v,value) in earnings_d:
dictionary[date_v] = dictionary.get(date_v,0) + value
earnings_a = [(key, val) for (key, val) in dictionary.items()]
## Miscellanous Category ##
miscellanousc_a = []
for i in range(0,len(x)):
miscellanousc_a.append((x[i],credit_a[i][1]
- salary_a[i][1]
- earnings_a[i][1]
))
### Debit Categories ###
## Personal Category ##
personal_d = []
for i in range(0,len(date)):
personal_d.append((date[i],personal[i]))
dictionary = dict()
for (date_v,value) in personal_d:
dictionary[date_v] = dictionary.get(date_v,0) + value
personal_a = [(key, val) for (key, val) in dictionary.items()]
## Housing Category ##
housing_d = []
for i in range(0,len(date)):
housing_d.append((date[i],housing[i]))
dictionary = dict()
for (date_v,value) in housing_d:
dictionary[date_v] = dictionary.get(date_v,0) + value
housing_a = [(key, val) for (key, val) in dictionary.items()]
## Food Category ##
food_d = []
for i in range(0,len(date)):
food_d.append((date[i],food[i]))
dictionary = dict()
for (date_v,value) in food_d:
dictionary[date_v] = dictionary.get(date_v,0) + value
food_a = [(key, val) for (key, val) in dictionary.items()]
## Transportation Category ##
transportation_d = []
for i in range(0,len(date)):
transportation_d.append((date[i],transportation[i]))
dictionary = dict()
for (date_v,value) in transportation_d:
dictionary[date_v] = dictionary.get(date_v,0) + value
transportation_a = [(key, val) for (key, val) in dictionary.items()]
## Utilities Category ##
utilities_d = []
for i in range(0,len(date)):
utilities_d.append((date[i],utilities[i]))
dictionary = dict()
for (date_v,value) in utilities_d:
dictionary[date_v] = dictionary.get(date_v,0) + value
utilities_a = [(key, val) for (key, val) in dictionary.items()]
## Insurance Category ##
insurance_d = []
for i in range(0,len(date)):
insurance_d.append((date[i],insurance[i]))
dictionary = dict()
for (date_v,value) in insurance_d:
dictionary[date_v] = dictionary.get(date_v,0) + value
insurance_a = [(key, val) for (key, val) in dictionary.items()]
## Healthcare Category ##
healthcare_d = []
for i in range(0,len(date)):
healthcare_d.append((date[i],healthcare[i]))
dictionary = dict()
for (date_v,value) in healthcare_d:
dictionary[date_v] = dictionary.get(date_v,0) + value
healthcare_a = [(key, val) for (key, val) in dictionary.items()]
## Investment Category ##
investment_d = []
for i in range(0,len(date)):
investment_d.append((date[i],investment[i]))
dictionary = dict()
for (date_v,value) in investment_d:
dictionary[date_v] = dictionary.get(date_v,0) + value
investment_a = [(key, val) for (key, val) in dictionary.items()]
## Recreation Category ##
recreation_d = []
for i in range(0,len(date)):
recreation_d.append((date[i],recreation[i]))
dictionary = dict()
for (date_v,value) in recreation_d:
dictionary[date_v] = dictionary.get(date_v,0) + value
recreation_a = [(key, val) for (key, val) in dictionary.items()]
## Education Category ##
education_d = []
for i in range(0,len(date)):
education_d.append((date[i],education[i]))
dictionary = dict()
for (date_v,value) in education_d:
dictionary[date_v] = dictionary.get(date_v,0) + value
education_a = [(key, val) for (key, val) in dictionary.items()]
## Miscellanous Category ##
miscellanous_a = []
for i in range(0,len(x)):
miscellanous_a.append((x[i],debit_a[i][1]
- housing_a[i][1]
- food_a[i][1]
- transportation_a[i][1]
- utilities_a[i][1]
- insurance_a[i][1]
- healthcare_a[i][1]
- investment_a[i][1]
- recreation_a[i][1]
- personal_a[i][1]
- education_a[i][1]
))
## Plotting Graph ##
z1 = [y for (x,y) in housing_a]
z2 = [y for (x,y) in food_a]
z3 = [y for (x,y) in transportation_a]
z4 = [y for (x,y) in utilities_a]
z5 = [y for (x,y) in insurance_a]
z6 = [y for (x,y) in healthcare_a]
z7 = [y for (x,y) in investment_a]
z8 = [y for (x,y) in recreation_a]
z9 = [y for (x,y) in personal_a]
z10 = [y for (x,y) in education_a]
z11 = [y for (x,y) in miscellanous_a]
u1 = [y for (x,y) in salary_a]
u2 = [y for (x,y) in earnings_a]
u3 = [y for (x,y) in miscellanousc_a]
fig2, ax2 = plt.subplots(1, 1, figsize=(16,7), dpi= 96)
n = len(x)
index = np.arange(n)
width = 0.125
p1 = plt.bar(index+0.0625, np.add(np.add(np.add(np.add(np.add(np.add(np.add(np.add(np.add(np.add(z11,z10),z9),z8),z7),z6),z5),z4),z3),z2),z1), width, label='Housing',color=[(0.4980,0.4980,0.4980)])
p2 = plt.bar(index+0.0625, np.add(np.add(np.add(np.add(np.add(np.add(np.add(np.add(np.add(z11,z10),z9),z8),z7),z6),z5),z4),z3),z2), width, label='Food',color=[(0.7490,0.7490,0.7490)])
p3 = plt.bar(index+0.0625, np.add(np.add(np.add(np.add(np.add(np.add(np.add(np.add(z11,z10),z9),z8),z7),z6),z5),z4),z3), width, label='Transportation',color=[(0.5176,0.2353,0.04706)])
p4 = plt.bar(index+0.0625, np.add(np.add(np.add(np.add(np.add(np.add(np.add(z11,z10),z9),z8),z7),z6),z5),z4), width, label='Utilities',color=[(1.0000,0.0000,0.0000)])
p5 = plt.bar(index+0.0625, np.add(np.add(np.add(np.add(np.add(np.add(z11,z10),z9),z8),z7),z6),z5), width, label='Insurance',color=[(1.0000,1.0000,0.0000)])
p6 = plt.bar(index+0.0625, np.add(np.add(np.add(np.add(np.add(z11,z10),z9),z8),z7),z6), width, label='Healthcare',color=[(0.3294,0.5098,0.2078)])
p7 = plt.bar(index+0.0625, np.add(np.add(np.add(np.add(z11,z10),z9),z8),z7), width, label='Investment',color=[(0.6627,0.8196,0.5569)])
p8 = plt.bar(index+0.0625, np.add(np.add(np.add(z11,z10),z9),z8), width, label='Recreation',color=[(1.0000,0.8509,0.4000)])
p9 = plt.bar(index+0.0625, np.add(np.add(z11,z10),z9), width, label='Personal',color=[(0.7490,0.5647,0.0000)])
p10 = plt.bar(index+0.0625, np.add(z11,z10), width, label='Education',color=[(0.0000,0.0000,0.0000)])
p11 = plt.bar(index+0.0625, z11, width, label='Other Debit',color=[(0.9569,0.6941,0.5137)])
p12 = plt.bar(index-0.0625, np.add(np.add(u3,u2),u1), width, label='Salary',color=[(0.0000,0.4392,0.7529)])
p13 = plt.bar(index-0.0625, np.add(u3,u2), width, label='Earnings',color=[(0.8627,0.0784,0.8431)])
p14 = plt.bar(index-0.0625, u3, width, label='Other Credit',color=[(0.6157,0.7647,0.9020)])
angle = 45
if len(x) > 19:
angle = round(len(x)/0.44,0)
if angle > 90:
angle = 90
my_xticks = x
plt.xticks(index,x,fontsize=14, rotation = angle, horizontalalignment='center',color='darkgrey')
plt.yticks(fontsize=14,color='darkgrey')
plt.xlim(-1.0)
ax2.yaxis.grid(alpha=0.5)
plt.yscale('log', basey=1.00001)
from matplotlib.ticker import ScalarFormatter
for axis in [ax2.yaxis]:
axis.set_major_formatter(ScalarFormatter())
plt.gca().spines["top"].set_alpha(0)
plt.gca().spines["bottom"].set_alpha(0.5)
plt.gca().spines["right"].set_alpha(0)
plt.gca().spines["left"].set_alpha(0)
plt.savefig('fig2.png',orientation='portrait',transparent=True, bbox_inches=None, pad_inches=0)
### Balance Graph ###
from pandas.plotting import andrews_curves
y1 = [y for (x,y) in credit_a]
y2 = [y for (x,y) in debit_a]
y3 = [y for (x,y) in balance_a]
fig, ax = plt.subplots(1, 1, figsize=(16,7), dpi= 96)
plt.plot(x,y3,label='Balance',color='black',linewidth=2.0)
b1=plt.bar(index-0.0625,y1,label='Credit',color=[(0.06667,0.5647,0.7961)],width=0.125)
b2=plt.bar(index+0.0625,y2,label='Debit',color=[(1,0.4118,0.4118)],width=0.125)
plt.xticks(fontsize=14, rotation = angle, horizontalalignment='center',color='darkgrey')
plt.yticks(fontsize=14,color='darkgrey')
plt.xlim(-1.0)
ax.yaxis.grid(alpha=0.5)
plt.yscale('log', basey=1.00001)
plt.text(x[0],y3[0] + 0.1*y3[0],'Start Balance\n' + str(locale.format_string('%d',int(y3[0]),1)),color='darkgrey',horizontalalignment='center',fontsize=14)
plt.text(x[-1],y3[-1] + 0.1*y3[-1],'End Balance\n' + str(locale.format_string('%d',int(y3[-1]),1)),color='darkgrey',horizontalalignment='center',fontsize=14)
from matplotlib.ticker import ScalarFormatter
for axis in [ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
plt.gca().spines["top"].set_alpha(0)
plt.gca().spines["bottom"].set_alpha(0.5)
plt.gca().spines["right"].set_alpha(0)
plt.gca().spines["left"].set_alpha(0)
plt.savefig('fig1.png',orientation='portrait',transparent=True, bbox_inches=None, pad_inches=0)
bar_chart(date_array,balance_array,credit_array,debit_array,salary_array,
earnings_array,housing_array,food_array,transportation_array,
utilities_array,insurance_array,healthcare_array,investment_array,
recreation_array,personal_array,education_array)
pdf.image('fig1.png',65,10,150,65)
pdf.image('fig2.png',65,80,150,65)
## Pie Charts ##
pdf.image('Salary.png',78,157.5,44,33)
pdf.image('Earnings.png',118,157.5,44,33)
pdf.image('Other Credit.png',158,157.5,44,33)
pdf.image('Housing.png',68,189.5,44,33)
pdf.image('Food.png',101,189.5,44,33)
pdf.image('Insurance.png',135,189.5,44,33)
pdf.image('Utilities.png',168,189.5,44,33)
pdf.image('Transportation.png',68,221.5,44,33)
pdf.image('Healthcare.png',101,221.5,44,33)
pdf.image('Recreation.png',135,221.5,44,33)
pdf.image('Personal.png',168,221.5,44,33)
pdf.image('Education.png',78,253.5,44,33)
pdf.image('Investment.png',118,253.5,44,33)
pdf.image('Other Debit.png',158,253.5,44,33)
## Pie Chart Values ##
# Credit Pie Chart Function #
def credit_pie(credit_category,x1,x2,x3,x4,y1,y2,y3):
credit_category_val_p = int(round((credit_category/credit)*100,0))
pdf.set_text_color(10,100,140)
if credit_category_val_p == 100:
pdf.set_xy(x1,y1)
pdf.set_font('helvetica','',20)
pdf.cell(10,10,str(credit_category_val_p),0,0,'C')
pdf.set_xy(x2,y2)
pdf.set_font('helvetica','',8)
pdf.cell(10,10,'%',0,0,'C')
if (credit_category_val_p < 100) and (credit_category_val_p >= 1):
pdf.set_xy(x3,y1)
pdf.set_font('helvetica','',20)
pdf.cell(10,10,str(credit_category_val_p),0,0,'C')
pdf.set_xy(x4,y2)
pdf.set_font('helvetica','',8)
pdf.cell(10,10,'%',0,0,'C')
if credit_category_val_p < 1:
pdf.set_xy(x3,y1)
pdf.set_font('helvetica','',20)
pdf.cell(10,10,'<1',0,0,'C')
pdf.set_xy(x4,y2)
pdf.set_font('helvetica','',8)
pdf.cell(10,10,'%',0,0,'C')
pdf.set_xy(x3,y3)
pdf.set_font('helvetica','',9)
credit_category_val = locale.format_string('%d',int(credit_category),1)
pdf.cell(10,10,(currency + str(credit_category_val)),0,0,'C')
# Salary #
credit_pie(salary,94,101,95.5,100.7,167.5,168.7,172.5)
# Earnings #
credit_pie(earnings,134,141,135.5,140.7,167.5,168.7,172.5)
# Other Credit #
credit_pie(misc_credit,174,181,175.5,180.7,167.5,168.7,172.5)
# Debit Pie Chart Function #
def debit_pie(debit_category,x1,x2,x3,x4,y1,y2,y3,threshold):
debit_category_val_p = int(round((debit_category/debit)*100,0))
if debit_category_val_p <= threshold:
pdf.set_text_color(112,173,71)
else:
pdf.set_text_color(210,0,0)
if debit_category_val_p == 100:
pdf.set_xy(x1,y1)
pdf.set_font('helvetica','',20)
pdf.cell(10,10,str(debit_category_val_p),0,0,'C')
pdf.set_xy(x2,y2)
pdf.set_font('helvetica','',8)
pdf.cell(10,10,'%',0,0,'C')
if (debit_category_val_p < 100) and (debit_category_val_p >= 1):
pdf.set_xy(x3,y1)
pdf.set_font('helvetica','',20)
pdf.cell(10,10,str(debit_category_val_p),0,0,'C')
pdf.set_xy(x4,y2)
pdf.set_font('helvetica','',8)
pdf.cell(10,10,'%',0,0,'C')
if debit_category_val_p < 1:
pdf.set_xy(x3,y1)
pdf.set_font('helvetica','',20)
pdf.cell(10,10,'<1',0,0,'C')
pdf.set_xy(x4,y2)
pdf.set_font('helvetica','',8)
pdf.cell(10,10,'%',0,0,'C')
pdf.set_xy(x3,y3)
pdf.set_font('helvetica','',9)
debit_category_val = locale.format_string('%d',int(debit_category),1)
pdf.cell(10,2,(currency + str(debit_category_val)),0,0,'C')
def score_pie(value,x1,x2,x3,x4,y1,y2,s1,s2):
if value >= 80:
pdf.set_text_color(112,173,71)
if (value < 80) and (value >= 60):
pdf.set_text_color(244,177,131)
if value < 60:
pdf.set_text_color(255,105,105)
if (value == 100) or (value < -9):
pdf.set_xy(x1,y1)
pdf.set_font('helvetica','',s1)
pdf.cell(10,10,str(int(value)),0,0,'C')
pdf.set_xy(x2,y2)
pdf.set_font('helvetica','',s2)
pdf.cell(10,10,'%',0,0,'C')
else:
pdf.set_xy(x3,y1)
pdf.set_font('helvetica','',s1)
pdf.cell(10,10,str(int(value)),0,0,'C')
pdf.set_xy(x4,y2)
pdf.set_font('helvetica','',s2)
pdf.cell(10,10,'%',0,0,'C')
# Housing #
debit_pie(housing,84,91,85.5,90.7,199.5,200.7,208.5,35)
# Food #
debit_pie(food,117,124,118.5,123.7,199.5,200.7,208.5,15)
# Insurance #
debit_pie(insurance,151,158,152.5,157.7,199.5,200.7,208.5,25)
# Utilities #
debit_pie(utilities,184,191,185.5,190.7,199.5,200.7,208.5,10)
# Transportation #
debit_pie(transportation,84,91,85.5,90.7,231.5,232.7,240.5,15)
# Healthcare #
debit_pie(healthcare,117,124,118.5,123.7,231.5,232.7,240.5,10)
# Recreation #
debit_pie(recreation,151,158,152.5,157.7,231.5,232.7,240.5,10)
# Personal #
debit_pie(personal,184,191,185.5,190.7,231.5,232.7,240.5,10)
# Education #
debit_pie(education,94,101,95.5,100.7,263.5,264.7,272.5,20)
# Investment #
debit_pie(investment,134,141,135.5,140.7,263.5,264.7,272.5,20)
# Other Debit #
debit_pie(misc_debit,174,181,175.5,180.7,263.5,264.7,272.5,10)
# Debit Score #
pdf.image('Debit_score.png',-3,185.5,44,33)
score_pie(spending_score,13,20.7,14.5,20.6,197.6,198.8,25,8)
# Balance Score #
pdf.image('Balance_score.png',28,185.5,44,33)
score_pie(balance_score,44,51.7,45.5,51.6,197.6,198.8,25,8)
# Health Score #
pdf.image('Health_score.png',-10,218,88,66)
score_pie(health_score,28.7,42.1,30.2,42.0,246.9,249.1,44,17)
## Credit/Debit Values ##
debit_val = int(debit)
debit_val = locale.format_string('%d',int(debit_val),1)
if currency != '':
debit_val = currency + debit_val
credit_val = int(credit)
credit_val = locale.format_string('%d',int(credit_val),1)
if currency != '':
credit_val = currency + credit_val
credit_debit_balance_val = int(credit - debit)
credit_debit_balance_int = credit_debit_balance_val
credit_debit_balance_val = locale.format_string('%d',int(credit_debit_balance_val),1)
if currency != '':
credit_debit_balance_val = currency + credit_debit_balance_val
pdf.set_font('helvetica','',22)
pdf.set_text_color(10,100,140)
pdf.set_xy(25,95)
pdf.cell(20,10,credit_val,0,0,'C')
pdf.set_text_color(210,0,0)
pdf.set_xy(25,131)
pdf.cell(20,10,debit_val,0,0,'C')
if credit_debit_balance_int < 0:
pdf.set_text_color(210,0,0)
pdf.set_xy(25,167)
pdf.cell(20,10,credit_debit_balance_val,0,0,'C')
if credit_debit_balance_int > 0:
pdf.set_text_color(112,173,71)
pdf.set_xy(25,167)
pdf.cell(20,10,credit_debit_balance_val,0,0,'C')
## Final Output ##
pdf.output('BankScan Report.pdf')
| [
"en_core_web_sm.load",
"matplotlib.ticker.ScalarFormatter",
"fpdf.FPDF",
"tabula.read_pdf.getNumPages",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.yscale",
"PyPDF2.PdfFileReader",
"dateutil.parser.parse",
"matplotlib.pypl... | [((138, 159), 'en_core_web_sm.load', 'en_core_web_sm.load', ([], {}), '()\n', (157, 159), False, 'import en_core_web_sm\n'), ((238, 273), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', '""""""'], {}), "(locale.LC_ALL, '')\n", (254, 273), False, 'import locale\n'), ((390, 445), 'tabula.read_pdf', 'read_pdf', (['"""C:/Users/<NAME>/Desktop/BS.pdf"""'], {'pages': '"""all"""'}), "('C:/Users/<NAME>/Desktop/BS.pdf', pages='all')\n", (398, 445), False, 'from tabula import read_pdf\n'), ((1752, 1782), 'PyPDF2.PdfFileReader', 'PyPDF2.PdfFileReader', (['pdf_file'], {}), '(pdf_file)\n', (1772, 1782), False, 'import PyPDF2\n'), ((1794, 1816), 'tabula.read_pdf.getNumPages', 'read_pdf.getNumPages', ([], {}), '()\n', (1814, 1816), False, 'from tabula import read_pdf\n'), ((1824, 1843), 'tabula.read_pdf.getPage', 'read_pdf.getPage', (['(0)'], {}), '(0)\n', (1840, 1843), False, 'from tabula import read_pdf\n'), ((47566, 47611), 'fpdf.FPDF', 'FPDF', ([], {'orientation': '"""P"""', 'unit': '"""mm"""', 'format': '"""A4"""'}), "(orientation='P', unit='mm', format='A4')\n", (47570, 47611), False, 'from fpdf import FPDF\n'), ((2966, 2985), 'dateutil.parser.parse', 'parser.parse', (['sdate'], {}), '(sdate)\n', (2978, 2985), False, 'from dateutil import parser\n'), ((3194, 3213), 'dateutil.parser.parse', 'parser.parse', (['edate'], {}), '(edate)\n', (3206, 3213), False, 'from dateutil import parser\n'), ((48452, 48504), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(0, 0)', '(0.8)'], {'color': '[0.949, 0.949, 0.949]'}), '((0, 0), 0.8, color=[0.949, 0.949, 0.949])\n', (48462, 48504), True, 'import matplotlib.pyplot as plt\n'), ((48511, 48626), 'matplotlib.pyplot.pie', 'plt.pie', (['values'], {'wedgeprops': "{'linewidth': 0, 'edgecolor': 'white'}", 'colors': '[color, color_b]', 'labeldistance': '(1.1)'}), "(values, wedgeprops={'linewidth': 0, 'edgecolor': 'white'}, colors=[\n color, color_b], labeldistance=1.1)\n", (48518, 48626), True, 'import matplotlib.pyplot as plt\n'), ((48645, 48654), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (48652, 48654), True, 'import matplotlib.pyplot as plt\n'), ((48693, 48799), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s.png' % name)"], {'orientation': '"""portrait"""', 'transparent': '(True)', 'bbox_inches': 'None', 'pad_inches': '(0)'}), "('%s.png' % name, orientation='portrait', transparent=True,\n bbox_inches=None, pad_inches=0)\n", (48704, 48799), True, 'import matplotlib.pyplot as plt\n'), ((48802, 48813), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (48811, 48813), True, 'import matplotlib.pyplot as plt\n'), ((56834, 56883), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(0, 0)', '(0.8)'], {'color': '[0.4902, 0.8, 1.0]'}), '((0, 0), 0.8, color=[0.4902, 0.8, 1.0])\n', (56844, 56883), True, 'import matplotlib.pyplot as plt\n'), ((57308, 57317), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (57315, 57317), True, 'import matplotlib.pyplot as plt\n'), ((57356, 57462), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('%s.png' % name)"], {'orientation': '"""portrait"""', 'transparent': '(True)', 'bbox_inches': 'None', 'pad_inches': '(0)'}), "('%s.png' % name, orientation='portrait', transparent=True,\n bbox_inches=None, pad_inches=0)\n", (57367, 57462), True, 'import matplotlib.pyplot as plt\n'), ((57465, 57476), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (57474, 57476), True, 'import matplotlib.pyplot as plt\n'), ((64291, 64334), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(16, 7)', 'dpi': '(96)'}), '(1, 1, figsize=(16, 7), dpi=96)\n', (64303, 64334), True, 'import matplotlib.pyplot as plt\n'), ((64363, 64375), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (64372, 64375), True, 'import numpy as np\n'), ((65960, 66055), 'matplotlib.pyplot.bar', 'plt.bar', (['(index + 0.0625)', 'z11', 'width'], {'label': '"""Other Debit"""', 'color': '[(0.9569, 0.6941, 0.5137)]'}), "(index + 0.0625, z11, width, label='Other Debit', color=[(0.9569, \n 0.6941, 0.5137)])\n", (65967, 66055), True, 'import matplotlib.pyplot as plt\n'), ((66272, 66366), 'matplotlib.pyplot.bar', 'plt.bar', (['(index - 0.0625)', 'u3', 'width'], {'label': '"""Other Credit"""', 'color': '[(0.6157, 0.7647, 0.902)]'}), "(index - 0.0625, u3, width, label='Other Credit', color=[(0.6157, \n 0.7647, 0.902)])\n", (66279, 66366), True, 'import matplotlib.pyplot as plt\n'), ((66492, 66594), 'matplotlib.pyplot.xticks', 'plt.xticks', (['index', 'x'], {'fontsize': '(14)', 'rotation': 'angle', 'horizontalalignment': '"""center"""', 'color': '"""darkgrey"""'}), "(index, x, fontsize=14, rotation=angle, horizontalalignment=\n 'center', color='darkgrey')\n", (66502, 66594), True, 'import matplotlib.pyplot as plt\n'), ((66593, 66634), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(14)', 'color': '"""darkgrey"""'}), "(fontsize=14, color='darkgrey')\n", (66603, 66634), True, 'import matplotlib.pyplot as plt\n'), ((66638, 66652), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1.0)'], {}), '(-1.0)\n', (66646, 66652), True, 'import matplotlib.pyplot as plt\n'), ((66688, 66720), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {'basey': '(1.00001)'}), "('log', basey=1.00001)\n", (66698, 66720), True, 'import matplotlib.pyplot as plt\n'), ((67031, 67132), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fig2.png"""'], {'orientation': '"""portrait"""', 'transparent': '(True)', 'bbox_inches': 'None', 'pad_inches': '(0)'}), "('fig2.png', orientation='portrait', transparent=True,\n bbox_inches=None, pad_inches=0)\n", (67042, 67132), True, 'import matplotlib.pyplot as plt\n'), ((67325, 67368), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(16, 7)', 'dpi': '(96)'}), '(1, 1, figsize=(16, 7), dpi=96)\n', (67337, 67368), True, 'import matplotlib.pyplot as plt\n'), ((67374, 67436), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y3'], {'label': '"""Balance"""', 'color': '"""black"""', 'linewidth': '(2.0)'}), "(x, y3, label='Balance', color='black', linewidth=2.0)\n", (67382, 67436), True, 'import matplotlib.pyplot as plt\n'), ((67440, 67536), 'matplotlib.pyplot.bar', 'plt.bar', (['(index - 0.0625)', 'y1'], {'label': '"""Credit"""', 'color': '[(0.06667, 0.5647, 0.7961)]', 'width': '(0.125)'}), "(index - 0.0625, y1, label='Credit', color=[(0.06667, 0.5647, 0.7961\n )], width=0.125)\n", (67447, 67536), True, 'import matplotlib.pyplot as plt\n'), ((67531, 67619), 'matplotlib.pyplot.bar', 'plt.bar', (['(index + 0.0625)', 'y2'], {'label': '"""Debit"""', 'color': '[(1, 0.4118, 0.4118)]', 'width': '(0.125)'}), "(index + 0.0625, y2, label='Debit', color=[(1, 0.4118, 0.4118)],\n width=0.125)\n", (67538, 67619), True, 'import matplotlib.pyplot as plt\n'), ((67614, 67706), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(14)', 'rotation': 'angle', 'horizontalalignment': '"""center"""', 'color': '"""darkgrey"""'}), "(fontsize=14, rotation=angle, horizontalalignment='center', color\n ='darkgrey')\n", (67624, 67706), True, 'import matplotlib.pyplot as plt\n'), ((67707, 67748), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(14)', 'color': '"""darkgrey"""'}), "(fontsize=14, color='darkgrey')\n", (67717, 67748), True, 'import matplotlib.pyplot as plt\n'), ((67752, 67766), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1.0)'], {}), '(-1.0)\n', (67760, 67766), True, 'import matplotlib.pyplot as plt\n'), ((67800, 67832), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {'basey': '(1.00001)'}), "('log', basey=1.00001)\n", (67810, 67832), True, 'import matplotlib.pyplot as plt\n'), ((68465, 68566), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""fig1.png"""'], {'orientation': '"""portrait"""', 'transparent': '(True)', 'bbox_inches': 'None', 'pad_inches': '(0)'}), "('fig1.png', orientation='portrait', transparent=True,\n bbox_inches=None, pad_inches=0)\n", (68476, 68566), True, 'import matplotlib.pyplot as plt\n'), ((3683, 3724), 're.sub', 're.sub', (['""","""', '""""""', 'df.iloc[i][debit_column]'], {}), "(',', '', df.iloc[i][debit_column])\n", (3689, 3724), False, 'import re\n'), ((4204, 4246), 're.sub', 're.sub', (['""","""', '""""""', 'df.iloc[i][credit_column]'], {}), "(',', '', df.iloc[i][credit_column])\n", (4210, 4246), False, 'import re\n'), ((4712, 4755), 're.sub', 're.sub', (['""","""', '""""""', 'df.iloc[i][balance_column]'], {}), "(',', '', df.iloc[i][balance_column])\n", (4718, 4755), False, 'import re\n'), ((4957, 5000), 're.sub', 're.sub', (['""","""', '""""""', 'df.iloc[0][balance_column]'], {}), "(',', '', df.iloc[0][balance_column])\n", (4963, 5000), False, 'import re\n'), ((56953, 57068), 'matplotlib.pyplot.pie', 'plt.pie', (['values'], {'wedgeprops': "{'linewidth': 0, 'edgecolor': 'white'}", 'colors': '[color, color_b]', 'labeldistance': '(1.1)'}), "(values, wedgeprops={'linewidth': 0, 'edgecolor': 'white'}, colors=[\n color, color_b], labeldistance=1.1)\n", (56960, 57068), True, 'import matplotlib.pyplot as plt\n'), ((57150, 57285), 'matplotlib.pyplot.pie', 'plt.pie', (['values'], {'wedgeprops': "{'linewidth': 0, 'edgecolor': 'white'}", 'colors': '[color, color_b]', 'labeldistance': '(1.1)', 'counterclock': '(False)'}), "(values, wedgeprops={'linewidth': 0, 'edgecolor': 'white'}, colors=[\n color, color_b], labeldistance=1.1, counterclock=False)\n", (57157, 57285), True, 'import matplotlib.pyplot as plt\n'), ((65876, 65892), 'numpy.add', 'np.add', (['z11', 'z10'], {}), '(z11, z10)\n', (65882, 65892), True, 'import numpy as np\n'), ((66191, 66205), 'numpy.add', 'np.add', (['u3', 'u2'], {}), '(u3, u2)\n', (66197, 66205), True, 'import numpy as np\n'), ((2642, 2679), 'dateutil.parser.parse', 'parser.parse', (['df.iloc[i][date_column]'], {}), '(df.iloc[i][date_column])\n', (2654, 2679), False, 'from dateutil import parser\n'), ((65767, 65783), 'numpy.add', 'np.add', (['z11', 'z10'], {}), '(z11, z10)\n', (65773, 65783), True, 'import numpy as np\n'), ((66086, 66100), 'numpy.add', 'np.add', (['u3', 'u2'], {}), '(u3, u2)\n', (66092, 66100), True, 'import numpy as np\n'), ((66834, 66851), 'matplotlib.ticker.ScalarFormatter', 'ScalarFormatter', ([], {}), '()\n', (66849, 66851), False, 'from matplotlib.ticker import ScalarFormatter\n'), ((68269, 68286), 'matplotlib.ticker.ScalarFormatter', 'ScalarFormatter', ([], {}), '()\n', (68284, 68286), False, 'from matplotlib.ticker import ScalarFormatter\n'), ((65646, 65662), 'numpy.add', 'np.add', (['z11', 'z10'], {}), '(z11, z10)\n', (65652, 65662), True, 'import numpy as np\n'), ((65514, 65530), 'numpy.add', 'np.add', (['z11', 'z10'], {}), '(z11, z10)\n', (65520, 65530), True, 'import numpy as np\n'), ((66858, 66867), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (66865, 66867), True, 'import matplotlib.pyplot as plt\n'), ((66899, 66908), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (66906, 66908), True, 'import matplotlib.pyplot as plt\n'), ((66945, 66954), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (66952, 66954), True, 'import matplotlib.pyplot as plt\n'), ((66988, 66997), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (66995, 66997), True, 'import matplotlib.pyplot as plt\n'), ((68293, 68302), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (68300, 68302), True, 'import matplotlib.pyplot as plt\n'), ((68334, 68343), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (68341, 68343), True, 'import matplotlib.pyplot as plt\n'), ((68380, 68389), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (68387, 68389), True, 'import matplotlib.pyplot as plt\n'), ((68423, 68432), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (68430, 68432), True, 'import matplotlib.pyplot as plt\n'), ((65371, 65387), 'numpy.add', 'np.add', (['z11', 'z10'], {}), '(z11, z10)\n', (65377, 65387), True, 'import numpy as np\n'), ((65218, 65234), 'numpy.add', 'np.add', (['z11', 'z10'], {}), '(z11, z10)\n', (65224, 65234), True, 'import numpy as np\n'), ((65054, 65070), 'numpy.add', 'np.add', (['z11', 'z10'], {}), '(z11, z10)\n', (65060, 65070), True, 'import numpy as np\n'), ((64873, 64889), 'numpy.add', 'np.add', (['z11', 'z10'], {}), '(z11, z10)\n', (64879, 64889), True, 'import numpy as np\n'), ((64692, 64708), 'numpy.add', 'np.add', (['z11', 'z10'], {}), '(z11, z10)\n', (64698, 64708), True, 'import numpy as np\n'), ((64497, 64513), 'numpy.add', 'np.add', (['z11', 'z10'], {}), '(z11, z10)\n', (64503, 64513), True, 'import numpy as np\n')] |
from __future__ import division
from __future__ import print_function
import prettytensor as pt
import tensorflow as tf
import numpy as np
import scipy.misc
import os
import sys
from six.moves import range
from progressbar import ETA, Bar, Percentage, ProgressBar
from misc.config import cfg
from misc.utils import mkdir_p
TINY = 1e-8
from stageI.trainer import CondGANTrainer
class CondGANTrainer_mscoco(CondGANTrainer):
def build_placeholder(self):
self.generator_lr = tf.placeholder(
tf.float32, [],
name='generator_learning_rate'
)
self.discriminator_lr = tf.placeholder(
tf.float32, [],
name='discriminator_learning_rate'
)
def sampler(self):
with tf.variable_scope('duplicate_embedding'):
embed = self.duplicate_input(self.embeddings, cfg.TRAIN.NUM_COPY)
c, _ = self.sample_encoded_context(embed)
# if cfg.TRAIN.FLAG:
# z = tf.zeros([self.batch_size, cfg.Z_DIM]) # Expect similar BGs
# else:
z = tf.random_normal([self.batch_size, cfg.Z_DIM])
self.fake_images = self.model.get_generator(tf.concat([c, z], 1))
def train(self):
config = tf.ConfigProto(allow_soft_placement=True)
with tf.Session(config=config) as sess:
with tf.variable_scope('input'):
self.images, self.wrong_images, self.embeddings =\
self.dataset.get_batch(self.batch_size)
with tf.device("/gpu:%d" % cfg.GPU_ID):
counter = self.build_model(sess)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
sess.run(self.weight_clip_op)
saver = tf.train.Saver(tf.global_variables(),
keep_checkpoint_every_n_hours=2)
tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(self.log_dir,
sess.graph)
img_sum = self.epoch_sum_images(sess, \
cfg.TRAIN.NUM_COPY, -1)
summary_writer.add_summary(img_sum, -1)
keys = ["d_loss", "g_loss"]
log_vars = []
log_keys = []
for k, v in self.log_vars:
if k in keys:
log_vars.append(v)
log_keys.append(k)
# print(k, v)
generator_lr = cfg.TRAIN.GENERATOR_LR
discriminator_lr = cfg.TRAIN.DISCRIMINATOR_LR
lr_decay_step = cfg.TRAIN.LR_DECAY_EPOCH
number_example = self.dataset.num_examples
updates_per_epoch = int(number_example / self.batch_size)
epoch_start = int(counter / updates_per_epoch)
for epoch in range(epoch_start, self.max_epoch):
widgets = ["epoch #%d|" % epoch,
Percentage(), Bar(), ETA()]
pbar = ProgressBar(maxval=updates_per_epoch,
widgets=widgets)
pbar.start()
if epoch % lr_decay_step == 0 and epoch != 0:
generator_lr *= 0.5
discriminator_lr *= 0.5
all_log_vals = []
for i in range(updates_per_epoch):
pbar.update(i)
feed_out = [self.discriminator_trainer,
self.d_sum,
self.hist_sum,
log_vars]
for _ in range(cfg.TRAIN.CRITIC_PER_GENERATION):
feed_dict = {self.generator_lr: generator_lr,
self.discriminator_lr: discriminator_lr
}
# training d
_,d_sum, hist_sum, log_vals = \
sess.run(feed_out, feed_dict)
sess.run(self.weight_clip_op)
summary_writer.add_summary(d_sum, counter)
summary_writer.add_summary(hist_sum, counter)
all_log_vals.append(log_vals)
# train g
feed_out = [self.generator_trainer,
self.g_sum,
]
_, g_sum = sess.run(feed_out,feed_dict)
summary_writer.add_summary(g_sum, counter)
# save checkpoint
counter += 1
if counter % self.snapshot_interval == 0:
snapshot_path = "%s/%s_%s.ckpt" %\
(self.checkpoint_dir,
self.exp_name,
str(counter))
fn = saver.save(sess, snapshot_path)
print("Model saved in file: %s" % fn)
img_sum = self.epoch_sum_images(sess, cfg.TRAIN.NUM_COPY, epoch)
summary_writer.add_summary(img_sum, counter)
all_d_hist_sum = sess.run(self.all_d_hist_sum)
summary_writer.add_summary(all_d_hist_sum, counter)
avg_log_vals = np.mean(np.array(all_log_vals), axis=0)
dic_logs = {}
for k, v in zip(log_keys, avg_log_vals):
dic_logs[k] = v
# print(k, v)
log_line = "; ".join("%s: %s" %
(str(k), str(dic_logs[k]))
for k in dic_logs)
print("Epoch %d | " % (epoch) + log_line)
sys.stdout.flush()
if np.any(np.isnan(avg_log_vals)):
raise ValueError("NaN detected!")
coord.request_stop()
coord.join(threads)
def visualize_one_superimage(self, fake_images, real_images,
n, filename):
stacked_img = []
for row in range(n):
row_img = [real_images[row * n, :, :, :]]
for col in range(n):
row_img.append(fake_images[row * n + col, :, :, :])
# each rows is 1realimage +10_fakeimage
stacked_img.append(tf.concat(row_img, 1))
superimages = tf.expand_dims(tf.concat(stacked_img, 0), 0)
current_img_summary = tf.summary.image(filename, superimages)
return current_img_summary, superimages
def visualization(self, n):
with tf.variable_scope('duplicate_image'):
images_train = self.duplicate_input(self.images, n)
with tf.variable_scope('visualization'):
fake_sum_train, superimage_train = \
self.visualize_one_superimage(self.fake_images[:n * n],
images_train[:n * n],
n, "train")
self.superimages = superimage_train
self.image_summary = tf.summary.merge([fake_sum_train])
def duplicate_input(self, x, n):
assert n*n < self.batch_size
xlist = []
for i in range(n):
for j in range(n):
xlist.append(tf.gather(x, tf.stack([i*n])))
pad = tf.gather(x, tf.stack(list(range(self.batch_size-n*n))))
out = tf.concat([tf.concat(xlist, 0), pad], 0)
return out
def epoch_sum_images(self, sess, n, epoch):
gen_samples, img_summary =\
sess.run([self.superimages, self.image_summary])
scipy.misc.imsave(\
'%s/train_%d.jpg' % (self.log_dir, epoch), gen_samples[0])
return img_summary
| [
"numpy.array",
"progressbar.Percentage",
"progressbar.ProgressBar",
"tensorflow.summary.image",
"tensorflow.random_normal",
"tensorflow.train.Coordinator",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.concat",
"tensorflow.ConfigProto",
"sys.stdout.flush",
"tensorflow.stack",
"... | [((490, 552), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {'name': '"""generator_learning_rate"""'}), "(tf.float32, [], name='generator_learning_rate')\n", (504, 552), True, 'import tensorflow as tf\n'), ((619, 685), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[]'], {'name': '"""discriminator_learning_rate"""'}), "(tf.float32, [], name='discriminator_learning_rate')\n", (633, 685), True, 'import tensorflow as tf\n'), ((1064, 1110), 'tensorflow.random_normal', 'tf.random_normal', (['[self.batch_size, cfg.Z_DIM]'], {}), '([self.batch_size, cfg.Z_DIM])\n', (1080, 1110), True, 'import tensorflow as tf\n'), ((1225, 1266), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (1239, 1266), True, 'import tensorflow as tf\n'), ((6057, 6065), 'six.moves.range', 'range', (['n'], {}), '(n)\n', (6062, 6065), False, 'from six.moves import range\n'), ((6425, 6464), 'tensorflow.summary.image', 'tf.summary.image', (['filename', 'superimages'], {}), '(filename, superimages)\n', (6441, 6464), True, 'import tensorflow as tf\n'), ((7185, 7193), 'six.moves.range', 'range', (['n'], {}), '(n)\n', (7190, 7193), False, 'from six.moves import range\n'), ((758, 798), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""duplicate_embedding"""'], {}), "('duplicate_embedding')\n", (775, 798), True, 'import tensorflow as tf\n'), ((1163, 1183), 'tensorflow.concat', 'tf.concat', (['[c, z]', '(1)'], {}), '([c, z], 1)\n', (1172, 1183), True, 'import tensorflow as tf\n'), ((1280, 1305), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1290, 1305), True, 'import tensorflow as tf\n'), ((1608, 1630), 'tensorflow.train.Coordinator', 'tf.train.Coordinator', ([], {}), '()\n', (1628, 1630), True, 'import tensorflow as tf\n'), ((1653, 1705), 'tensorflow.train.start_queue_runners', 'tf.train.start_queue_runners', ([], {'sess': 'sess', 'coord': 'coord'}), '(sess=sess, coord=coord)\n', (1681, 1705), True, 'import tensorflow as tf\n'), ((1887, 1909), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (1907, 1909), True, 'import tensorflow as tf\n'), ((1939, 1986), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['self.log_dir', 'sess.graph'], {}), '(self.log_dir, sess.graph)\n', (1960, 1986), True, 'import tensorflow as tf\n'), ((2827, 2861), 'six.moves.range', 'range', (['epoch_start', 'self.max_epoch'], {}), '(epoch_start, self.max_epoch)\n', (2832, 2861), False, 'from six.moves import range\n'), ((6144, 6152), 'six.moves.range', 'range', (['n'], {}), '(n)\n', (6149, 6152), False, 'from six.moves import range\n'), ((6365, 6390), 'tensorflow.concat', 'tf.concat', (['stacked_img', '(0)'], {}), '(stacked_img, 0)\n', (6374, 6390), True, 'import tensorflow as tf\n'), ((6560, 6596), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""duplicate_image"""'], {}), "('duplicate_image')\n", (6577, 6596), True, 'import tensorflow as tf\n'), ((6675, 6709), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""visualization"""'], {}), "('visualization')\n", (6692, 6709), True, 'import tensorflow as tf\n'), ((7039, 7073), 'tensorflow.summary.merge', 'tf.summary.merge', (['[fake_sum_train]'], {}), '([fake_sum_train])\n', (7055, 7073), True, 'import tensorflow as tf\n'), ((7216, 7224), 'six.moves.range', 'range', (['n'], {}), '(n)\n', (7221, 7224), False, 'from six.moves import range\n'), ((1332, 1358), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""input"""'], {}), "('input')\n", (1349, 1358), True, 'import tensorflow as tf\n'), ((1504, 1537), 'tensorflow.device', 'tf.device', (["('/gpu:%d' % cfg.GPU_ID)"], {}), "('/gpu:%d' % cfg.GPU_ID)\n", (1513, 1537), True, 'import tensorflow as tf\n'), ((1783, 1804), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (1802, 1804), True, 'import tensorflow as tf\n'), ((2990, 3044), 'progressbar.ProgressBar', 'ProgressBar', ([], {'maxval': 'updates_per_epoch', 'widgets': 'widgets'}), '(maxval=updates_per_epoch, widgets=widgets)\n', (3001, 3044), False, 'from progressbar import ETA, Bar, Percentage, ProgressBar\n'), ((3316, 3340), 'six.moves.range', 'range', (['updates_per_epoch'], {}), '(updates_per_epoch)\n', (3321, 3340), False, 'from six.moves import range\n'), ((5735, 5753), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5751, 5753), False, 'import sys\n'), ((6305, 6326), 'tensorflow.concat', 'tf.concat', (['row_img', '(1)'], {}), '(row_img, 1)\n', (6314, 6326), True, 'import tensorflow as tf\n'), ((7382, 7401), 'tensorflow.concat', 'tf.concat', (['xlist', '(0)'], {}), '(xlist, 0)\n', (7391, 7401), True, 'import tensorflow as tf\n'), ((2939, 2951), 'progressbar.Percentage', 'Percentage', ([], {}), '()\n', (2949, 2951), False, 'from progressbar import ETA, Bar, Percentage, ProgressBar\n'), ((2953, 2958), 'progressbar.Bar', 'Bar', ([], {}), '()\n', (2956, 2958), False, 'from progressbar import ETA, Bar, Percentage, ProgressBar\n'), ((2960, 2965), 'progressbar.ETA', 'ETA', ([], {}), '()\n', (2963, 2965), False, 'from progressbar import ETA, Bar, Percentage, ProgressBar\n'), ((3599, 3637), 'six.moves.range', 'range', (['cfg.TRAIN.CRITIC_PER_GENERATION'], {}), '(cfg.TRAIN.CRITIC_PER_GENERATION)\n', (3604, 3637), False, 'from six.moves import range\n'), ((5303, 5325), 'numpy.array', 'np.array', (['all_log_vals'], {}), '(all_log_vals)\n', (5311, 5325), True, 'import numpy as np\n'), ((5780, 5802), 'numpy.isnan', 'np.isnan', (['avg_log_vals'], {}), '(avg_log_vals)\n', (5788, 5802), True, 'import numpy as np\n'), ((7327, 7357), 'six.moves.range', 'range', (['(self.batch_size - n * n)'], {}), '(self.batch_size - n * n)\n', (7332, 7357), False, 'from six.moves import range\n'), ((7268, 7285), 'tensorflow.stack', 'tf.stack', (['[i * n]'], {}), '([i * n])\n', (7276, 7285), True, 'import tensorflow as tf\n')] |
import numpy as np
from matplotlib import pyplot
from env import StochasticMAB
# Use Bernoulli reward distribution, and Beta-Kernel for sampling
def subsample_ts(total_time_slot, arm_num, seed=10):
distribution = "Gaussian"
bandit_model = StochasticMAB(n_arms=arm_num, random_type=distribution)
total_reward = [0] # current total reward, recorded at each time slot
ave_reward = list() # average reward for each arm
roll_time = list() # roll time for each arm
# subsample the arm set by the number of square root (total_time_slot)
subsample_arm_num = int(np.sqrt(total_time_slot))
subsample_arm_set = np.random.randint(0, arm_num, subsample_arm_num)
sampled_values = [[] for i in range(subsample_arm_num)]
for i in range(subsample_arm_num):
current_arm = subsample_arm_set[i]
this_reward = bandit_model.roll(current_arm)
sampled_values[i].append(this_reward)
ave_reward.append(this_reward)
roll_time.append(1)
total_reward.append(total_reward[-1]+this_reward)
for i in range(total_time_slot-subsample_arm_num):
# select arm
thetas = [np.random.normal(np.average(sampled_values[j]), np.var(sampled_values[j])) for j in range(subsample_arm_num)]
arm = np.argmax(thetas)
# roll
current_arm = subsample_arm_set[arm]
this_reward = bandit_model.roll(current_arm)
# update distribution
sampled_values[arm].append(this_reward)
total_reward.append(total_reward[-1] + this_reward)
max_reward = bandit_model.max_expectation()
regret = [i * max_reward - total_reward[i] for i in range(total_time_slot + 1)]
return ave_reward, roll_time, total_reward, regret
if __name__ == '__main__':
ave_reward, roll_time, sum_reward, cumulative_regret = subsample_ts(total_time_slot=500, arm_num=100)
t = [i for i in range(1, 500)]
reward_t = [sum_reward[i] for i in range(1, 500)]
regret_t = [cumulative_regret[i] for i in range(1, 500)]
# pyplot.plot(t, reward_t)
pyplot.plot(t, regret_t)
pyplot.show()
| [
"numpy.sqrt",
"env.StochasticMAB",
"numpy.average",
"matplotlib.pyplot.plot",
"numpy.argmax",
"numpy.random.randint",
"numpy.var",
"matplotlib.pyplot.show"
] | [((249, 304), 'env.StochasticMAB', 'StochasticMAB', ([], {'n_arms': 'arm_num', 'random_type': 'distribution'}), '(n_arms=arm_num, random_type=distribution)\n', (262, 304), False, 'from env import StochasticMAB\n'), ((645, 693), 'numpy.random.randint', 'np.random.randint', (['(0)', 'arm_num', 'subsample_arm_num'], {}), '(0, arm_num, subsample_arm_num)\n', (662, 693), True, 'import numpy as np\n'), ((2058, 2082), 'matplotlib.pyplot.plot', 'pyplot.plot', (['t', 'regret_t'], {}), '(t, regret_t)\n', (2069, 2082), False, 'from matplotlib import pyplot\n'), ((2087, 2100), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (2098, 2100), False, 'from matplotlib import pyplot\n'), ((595, 619), 'numpy.sqrt', 'np.sqrt', (['total_time_slot'], {}), '(total_time_slot)\n', (602, 619), True, 'import numpy as np\n'), ((1280, 1297), 'numpy.argmax', 'np.argmax', (['thetas'], {}), '(thetas)\n', (1289, 1297), True, 'import numpy as np\n'), ((1173, 1202), 'numpy.average', 'np.average', (['sampled_values[j]'], {}), '(sampled_values[j])\n', (1183, 1202), True, 'import numpy as np\n'), ((1204, 1229), 'numpy.var', 'np.var', (['sampled_values[j]'], {}), '(sampled_values[j])\n', (1210, 1229), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
from argparse import ArgumentParser
import os
import subprocess
import numpy as np
from transformers import RobertaTokenizer, RobertaModel
import torch
import tqdm
from chg.db.database import get_store
# fix odd fault...
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
def remove_color_ascii(msg):
proc = subprocess.Popen(
"sed 's/\x1b\[[0-9;]*m//g'",
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
)
output, _ = proc.communicate(msg.encode())
return output.decode().strip()
def normalize_vectors(mat):
# make vectors unit norm
norm = np.sqrt(np.sum(mat**2, axis=1))
# set to 1.0 to avoid nan
norm[norm == 0] = 1.0
norm_mat = mat / norm.reshape(-1, 1)
return norm_mat
class BasicEmbedder(object):
def __init__(self):
self.tokenizer = RobertaTokenizer.from_pretrained(
"microsoft/codebert-base"
)
self.model = RobertaModel.from_pretrained("microsoft/codebert-base")
self.model = self.model.to("cpu")
# self.model = self.model.eval()
self.max_len = self.model.config.max_position_embeddings
def embed_(self, txt):
tokens = [self.tokenizer.cls_token]
tokens = self.tokenizer.tokenize(txt)
# split up chunks according to max_len
embeddings = []
chunk_len = self.max_len - 4
for i in tqdm.tqdm(list(range(0, len(tokens), chunk_len))):
chunk = [self.tokenizer.cls_token]
chunk.extend(tokens[i:(i + chunk_len)])
chunk.append(self.tokenizer.sep_token)
chunk_token_ids = self.tokenizer.convert_tokens_to_ids(chunk)
with torch.no_grad():
chunk_embedding = self.model(
torch.tensor(chunk_token_ids)[None, :]
)[0]
# average over tokens
chunk_embedding = chunk_embedding.mean(dim=1)
embeddings.append(chunk_embedding)
embeddings = torch.stack(embeddings)
# average over chunks
txt_embedding = embeddings.mean(dim=0)
txt_embedding = txt_embedding.numpy()
# unit norm
txt_embedding = normalize_vectors(txt_embedding)
txt_embedding = txt_embedding.flatten()
return txt_embedding
def embed_code(self, code):
return self.embed_(remove_color_ascii(code))
def embed_nl(self, nl):
return self.embed_(nl)
def embed_dialogue(self, question_and_answers):
# empty history
if len(question_and_answers) == 0:
question_and_answers = [("", "")]
merged_dialogue = "\n".join(
"{}:{}".format(q, a) for q, a in question_and_answers
)
return self.embed_nl(merged_dialogue)
def get_args():
parser = ArgumentParser(description="Embed chg database")
return parser.parse_args()
def main():
_ = get_args()
embedder = BasicEmbedder()
store = get_store()
# need to embed every chunk
chunk_ids = store.run_query(
"SELECT id FROM Chunks WHERE chunk IS NOT NULL"
)
chunk_ids = [row[0] for row in chunk_ids]
print("Embedding code and dialogue for {} chunks".format(len(chunk_ids)))
for chunk_id in tqdm.tqdm(chunk_ids):
chunk = store.run_query(
"SELECT chunk FROM Chunks WHERE id={}".format(chunk_id)
)
assert len(chunk) == 1, "Chunks should be uniquely identified"
chunk = chunk[0]
code_embedding = embedder.embed_code(chunk[0])
# embed dialogue associated with this chunk
dialogue = store.run_query(
"SELECT question, answer FROM Dialogue WHERE chunk_id={} ORDER BY id"
.format(chunk_id)
)
assert len(dialogue) >= 1, "Should have at least one commit message"
nl_embedding = embedder.embed_dialogue(dialogue)
store.record_embeddings((chunk_id, code_embedding, nl_embedding))
if __name__ == "__main__":
try:
main()
except Exception as err:
import pdb
pdb.post_mortem()
| [
"transformers.RobertaTokenizer.from_pretrained",
"argparse.ArgumentParser",
"subprocess.Popen",
"tqdm.tqdm",
"torch.stack",
"pdb.post_mortem",
"numpy.sum",
"torch.tensor",
"transformers.RobertaModel.from_pretrained",
"torch.no_grad",
"chg.db.database.get_store"
] | [((334, 443), 'subprocess.Popen', 'subprocess.Popen', (['"""sed \'s/\x1b\\\\[[0-9;]*m//g\'"""'], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'shell': '(True)'}), '("sed \'s/\\x1b\\\\[[0-9;]*m//g\'", stdin=subprocess.PIPE,\n stdout=subprocess.PIPE, shell=True)\n', (350, 443), False, 'import subprocess\n'), ((2820, 2868), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Embed chg database"""'}), "(description='Embed chg database')\n", (2834, 2868), False, 'from argparse import ArgumentParser\n'), ((2976, 2987), 'chg.db.database.get_store', 'get_store', ([], {}), '()\n', (2985, 2987), False, 'from chg.db.database import get_store\n'), ((3260, 3280), 'tqdm.tqdm', 'tqdm.tqdm', (['chunk_ids'], {}), '(chunk_ids)\n', (3269, 3280), False, 'import tqdm\n'), ((638, 662), 'numpy.sum', 'np.sum', (['(mat ** 2)'], {'axis': '(1)'}), '(mat ** 2, axis=1)\n', (644, 662), True, 'import numpy as np\n'), ((859, 918), 'transformers.RobertaTokenizer.from_pretrained', 'RobertaTokenizer.from_pretrained', (['"""microsoft/codebert-base"""'], {}), "('microsoft/codebert-base')\n", (891, 918), False, 'from transformers import RobertaTokenizer, RobertaModel\n'), ((962, 1017), 'transformers.RobertaModel.from_pretrained', 'RobertaModel.from_pretrained', (['"""microsoft/codebert-base"""'], {}), "('microsoft/codebert-base')\n", (990, 1017), False, 'from transformers import RobertaTokenizer, RobertaModel\n'), ((2017, 2040), 'torch.stack', 'torch.stack', (['embeddings'], {}), '(embeddings)\n', (2028, 2040), False, 'import torch\n'), ((4071, 4088), 'pdb.post_mortem', 'pdb.post_mortem', ([], {}), '()\n', (4086, 4088), False, 'import pdb\n'), ((1702, 1717), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1715, 1717), False, 'import torch\n'), ((1785, 1814), 'torch.tensor', 'torch.tensor', (['chunk_token_ids'], {}), '(chunk_token_ids)\n', (1797, 1814), False, 'import torch\n')] |
# -*- coding: utf8 -*-
# engine
# helper class for cuatro
# <NAME> 2021
import numpy as np
import random
import copy
import time
version = 'engine.v.1.0.0'
class State:
"""instance attributes:
size: int: size of one side of the board (defines a cube that holds the game)
win: int: how many items in a row constitute a win of the game
state: numpy array or shape (size, sizel size): 3D matrix containing 0 if position is empty,
1 for player one items and 2 for player 2 items
winner: int: winner of the game (0 until one player has won, then 1 or 2)
winning_diag: list of 'win' tuples of three ints (coordinates of the positions that constitute the win of the game
nex_turn: int (1 or 2) player that will play next
previous_turn: int (1 or 2) player that has played last
play: numpy array of shape (size, size) containint ints. It contains how many plays have been performed on each
of the 2d coordinates in the board. It marks the next third coordinate for each 2d coordinate play
pl1: numpy array of shape (size, size, size) containing 1 if the position is occupied by an item of player 1 and
0 otherwise
pl2: numpy array of shape (size, size, size) containing 1 if the position is occupied by an item of player 2 and
0 otherwise
empty: numpy array of shape (size, size, size) containing 1 if the position is empty and 0 otherwise
last_play: tuple of two ints containing the last play performed
last_3dplay: tuple of three ints containing the last 3dplay performed
game_over: bool: True if all the positions of the 3d board are occupied or one of the players have won and False
otherwise
diags: dictionary of lists of lists of 'win' tuples containing 3 ints. The key of the dictionary is a tuple with
3 ints (coordinates of a 3dpos). the values are lists of diags (a diag is a list containing 'win'
coordinates, which are tuples of three ints). All diags in a list contain the coordinate of the key.
history: list of dicts. Each dict contains the history of a turn. The dictionary fields are 'turn', 'play',
'play3d', 'offensive_score', 'defensive_score' and 'offensive_diag'
valid_pos: list of tuples with 2 ints: list of all valid plays at this time
valid_3dpos: list of tuples with 3 ints: list of all valid 3dplays at this time
"""
def __init__(self, size=5, win=4, next_turn=1):
"""this method initiallizes the instance
size: int: size of the board"""
random.seed(time.time())
self.size = size
self.win = win
self.state = np.zeros((self.size, self.size, self.size)).astype('int8')
self.winner = 0
self.winning_diag = None
self.next_turn = next_turn
self.previous_turn = 3 - next_turn
self.play = np.array([[0 for _ in range(self.size)] for _ in range(self.size)])
self.pl1 = None
self.pl2 = None
self.get_pl()
self.last_play = None # last play done
self.last_3dplay = None
self.valid_pos = None
self.valid_3dpos = None
self.get_valid_pos()
self.game_over = False # whether game is over or not
self.history = []
# get the diagonals of size self.win that cross very cell in the 3d board
self.diags = dict()
diags = []
for i in range(self.size):
for j in range(self.size):
for k in range(self.size):
self.diags[(i, j, k)] = []
diags += self.get_diags(i, j, k)
for i in range(self.size):
for j in range(self.size):
for k in range(self.size):
for diag in diags:
if (i, j, k) in diag:
self.diags[(i, j, k)].append(diag)
#for key in self.diags.keys(): # todo eliminate the reformatted version after changes
#self.old_diags[key] = [[tuple(diag[i][j] for i in range(self.win)) for j in range(3)] for diag in self.diags[key]]
def get_valid_pos(self):
"""updates the valid_pos and valid_3dpos instance attributes
"""
self.valid_pos = []
self.valid_3dpos = []
for i in range(self.size):
for j in range(self.size):
if self.play[i][j] < self.size:
self.valid_pos.append((i, j))
self.valid_3dpos.append((i, j, self.play[i][j]))
def get_diags(self, i, j, k):
"""creates all the diagonals of self.win me"""
diags = []
diags.append([(i + a, j, k) if i + a < self.size else None for a in range(self.win)])
diags.append([(i, j + a, k) if j + a < self.size else None for a in range(self.win)])
diags.append([(i, j, k + a) if k + a < self.size else None for a in range(self.win)])
diags.append([(i + a, j + a, k) if i + a < self.size and j + a < self.size else None for a in range(self.win)])
diags.append([(i + a, j - a, k) if i + a < self.size and j - a > -1 else None for a in range(self.win)])
diags.append([(i + a, j, k + a) if i + a < self.size and k + a < self.size else None for a in range(self.win)])
diags.append([(i + a, j, k - a) if i + a < self.size and k - a > -1 else None for a in range(self.win)])
diags.append([(i, j + a, k + a) if j + a < self.size and k + a < self.size else None for a in range(self.win)])
diags.append([(i, j + a, k - a) if j + a < self.size and k - a > -1 else None for a in range(self.win)])
diags.append([(i + a, j + a, k + a) if i + a < self.size and j + a < self.size and k + a < self.size else None for a in range(self.win)])
diags.append([(i - a, j + a, k + a) if i - a > -1 and j + a < self.size and k + a < self.size else None for a in range(self.win)])
diags.append([(i + a, j - a, k + a) if i + a < self.size and j - a > -1 and k + a < self.size else None for a in range(self.win)])
diags.append([(i - a, j - a, k + a) if i - a > -1 and j - a > -1 and k + a < self.size else None for a in range(self.win)])
diags = [diag for diag in diags if None not in diag]
return diags
def get_score(self, pos3d):
"""computes the score of playing in position pos for both the next_turn (offensive score) and the
previous_turn (defensive score) and returns scores, best diag etc (#todo complete this)
pos3d: tuple of three ints: (coordinates of the play)
returns: score: tuple of:
offensive_score: float
num_offensive_score: int
defensive_score: float
num_defensive_score: int
best_diag: list of tuples containing 3 ints"""
own_score = []
other_score = []
best_diag = None
for diag in self.diags[pos3d]:
own_score.append(0.)
other_score.append(0.)
for item in diag:
if item in self.valid_3dpos: # the position of item is reachable and it is empty
own_score[-1] += 0.5
other_score[-1] += 0.5
else: # the position of item is not reachable but may or may not be empty
if self.next_turn == 1:
if self.pl1[item]: # the position of item is occupied by the current turn
own_score[-1] += 1 # the position of item is occupied by the current turn
other_score[-1] -= self.win
elif self.pl2[item]: # the position of item is occupied by the def
own_score[-1] -= self.win
other_score[-1] += 1
else: # the position of item is not occupied (and it is not reachable either)
own_score[-1] += 0.1
other_score[-1] += 0.1
if self.next_turn == 2:
if self.pl2[item]: # the position of item is occupied by the current turn
own_score[-1] += 1 # the position of item is occupied by the current turn
other_score[-1] -= self.win
elif self.pl1[item]: # the position of item is occupied by the def
own_score[-1] -= self.win
other_score[-1] += 1
else: # the position of item is not occupied (and it is not reachable either)
own_score[-1] += 0.1
other_score[-1] += 0.1
if own_score[-1] == max(own_score):
best_diag = [pos for pos in diag] # make a copy of diag just in case
offensive_score = max(own_score)
num_offensive_score = own_score.count(offensive_score)
defensive_score = max(other_score)
num_defensive_score = other_score.count(defensive_score)
return offensive_score, num_offensive_score, defensive_score, num_defensive_score, best_diag
def get_best_score_play(self):
"""gets the play for which the score is the best
returns: chosen_play: tuple of:
play: tuple of two ints
play3d: tuple of three ints
score: float
diag: list of tuples of tree ints
"""
# initiallization
o_scores = []
n_o_scores = []
d_scores = []
n_d_scores = []
diags = []
centroid = np.array([(self.size - 1) / 2. for _ in range(3)])
# getting list of scores
for play, play3d in zip(self.valid_pos, self.valid_3dpos):
o_score, n_o_score, d_score, n_d_score, diag = self.get_score(play3d)
o_scores.append(o_score)
n_o_scores.append(n_o_score)
d_scores.append(d_score)
n_d_scores.append(n_d_score)
diags.append([item for item in diag])
# eliminate everything that does not have the max score
max_score = max(max(o_scores), max(d_scores))
o_indexes = [i for i in range(len(o_scores)) if o_scores[i] == max_score]
d_indexes = [i for i in range(len(d_scores)) if d_scores[i] == max_score]
o_scores = [o_scores[i] for i in o_indexes]
n_o_scores = [n_o_scores[i] for i in o_indexes]
diags = [diags[i] for i in o_indexes]
o_plays = [self.valid_pos[i] for i in o_indexes]
o_plays3d = [self.valid_3dpos[i] for i in o_indexes]
d_scores = [d_scores[i] for i in d_indexes]
n_d_scores = [n_d_scores[i] for i in d_indexes]
d_plays = [self.valid_pos[i] for i in d_indexes]
d_plays3d = [self.valid_3dpos[i] for i in d_indexes]
# Select the play
if max_score == self.win - 0.5 and len(o_scores) > 0: # this play is winner
return o_plays[0], o_plays3d[0], o_scores[0], diags[0]
if max_score == self.win - 0.5 and len(d_scores) > 0: # this avoids a winner play
return d_plays[0], d_plays3d[0], d_scores[0], None
if len(o_scores) == 1 and len(d_scores) == 0: # will play the best offensive move
return o_plays[0], o_plays3d[0], o_scores[0], diags[0]
if len(o_scores) == 0 and len(d_scores) == 1: # will play the best defensive move
return d_plays[0], d_plays3d[0], d_scores[0], None
if len(o_scores) > 1 and len(d_scores) == 0: # will play an offensive move but there is more than one
# first select based on the number of diags giving the score
max_n = max(n_o_scores)
o_indexes = [i for i in range(len(n_o_scores)) if n_o_scores[i] == max_n]
o_scores = [o_scores[i] for i in o_indexes]
n_o_scores = [n_o_scores[i] for i in o_indexes]
diags = [diags[i] for i in o_indexes]
o_plays = [o_plays[i] for i in o_indexes]
o_plays3d = [o_plays3d[i] for i in o_indexes]
if len(o_scores) == 1: # this is the best
return o_plays[0], o_plays3d[0], o_scores[0], diags[0]
else: # there is more than one option that tied, chose the one more centered
dists = [((np.array(play3d) - centroid) ** 2).sum() for play3d in o_plays3d]
mindist = min(dists)
o_indexes = [i for i in range(len(dists)) if dists[i] == mindist]
o_scores = [o_scores[i] for i in o_indexes]
diags = [diags[i] for i in o_indexes]
o_plays = [o_plays[i] for i in o_indexes]
o_plays3d = [o_plays3d[i] for i in o_indexes]
index = random.randrange(len(o_indexes))
return o_plays[index], o_plays3d[index], o_scores[index], diags[index]
if len(o_scores) == 0 and len(d_scores) > 1: # we will play an defensive move but there is more than one
# first select based on the number of diags giving the score
max_n = max(n_d_scores)
d_indexes = [i for i in range(len(n_d_scores)) if n_d_scores[i] == max_n]
d_scores = [d_scores[i] for i in d_indexes]
d_plays = [d_plays[i] for i in d_indexes]
d_plays3d = [d_plays3d[i] for i in d_indexes]
if len(d_scores) == 1: # this is the best
return d_plays[0], d_plays3d[0], d_scores[0], None
else: # there is more than one option that tied, chose the one more centered
dists = [((np.array(play3d) - centroid) ** 2).sum() for play3d in d_plays3d]
mindist = min(dists)
d_indexes = [i for i in range(len(dists)) if dists[i] == mindist]
d_scores = [d_scores[i] for i in d_indexes]
d_plays = [d_plays[i] for i in d_indexes]
d_plays3d = [d_plays3d[i] for i in d_indexes]
index = random.randrange(len(d_indexes))
return d_plays[index], d_plays3d[index], d_scores[index], None
if len(o_scores) > 0 and len(d_scores) > 0: # there are offensive and defensive scores tied
# remove all options that do not have the maximum number of diags giving that score
max_n = max(max(n_o_scores), max(n_d_scores))
o_indexes = [i for i in range(len(n_o_scores)) if n_o_scores[i] == max_n]
d_indexes = [i for i in range(len(n_d_scores)) if n_d_scores[i] == max_n]
o_scores = [o_scores[i] for i in o_indexes]
n_o_scores = [n_o_scores[i] for i in o_indexes]
diags = [diags[i] for i in o_indexes]
o_plays = [o_plays[i] for i in o_indexes]
o_plays3d = [o_plays3d[i] for i in o_indexes]
d_scores = [d_scores[i] for i in d_indexes]
n_d_scores = [n_d_scores[i] for i in d_indexes]
d_plays = [d_plays[i] for i in d_indexes]
d_plays3d = [d_plays3d[i] for i in d_indexes]
if len(o_scores) > 0 and len(d_scores) == 0: # will play an offensive move
if len(o_scores) == 1: # there is only one option
return o_plays[0], o_plays3d[0], o_scores[0], diags[0]
else: # there there is more than one option, chose based on centrality
dists = [((np.array(play3d) - centroid) ** 2).sum() for play3d in o_plays3d]
mindist = min(dists)
o_indexes = [i for i in range(len(dists)) if dists[i] == mindist]
o_scores = [o_scores[i] for i in o_indexes]
diags = [diags[i] for i in o_indexes]
o_plays = [o_plays[i] for i in o_indexes]
o_plays3d = [o_plays3d[i] for i in o_indexes]
index = random.randrange(len(o_indexes))
return o_plays[index], o_plays3d[index], o_scores[index], diags[index]
if len(o_scores) == 0 and len(d_scores) > 0: # will play a defensive move
if len(d_scores) == 1: # we chose this one
return d_plays[0], d_plays3d[0], d_scores[0], None # diags[0] is useless
else: # there are ties, chose based on centrality
dists = [((np.array(play3d) - centroid) ** 2).sum() for play3d in d_plays3d]
mindist = min(dists)
d_indexes = [i for i in range(len(dists)) if dists[i] == mindist]
d_scores = [d_scores[i] for i in d_indexes]
d_plays = [d_plays[i] for i in d_indexes]
d_plays3d = [d_plays3d[i] for i in d_indexes]
index = random.randrange(len(d_indexes))
return d_plays[index], d_plays3d[index], d_scores[index], None # diags[0] is useless
if len(o_scores) > 0 and len(d_scores) > 0: # there are ties, play the offensive move
if len(o_scores) == 1: # there is only one option
return o_plays[0], o_plays3d[0], o_scores[0], diags[0]
else: # there there is more than one option, chose based on centrality
dists = [((np.array(play3d) - centroid) ** 2).sum() for play3d in o_plays3d]
mindist = min(dists)
o_indexes = [i for i in range(len(dists)) if dists[i] == mindist]
o_scores = [o_scores[i] for i in o_indexes]
diags = [diags[i] for i in o_indexes]
o_plays = [o_plays[i] for i in o_indexes]
o_plays3d = [o_plays3d[i] for i in o_indexes]
index = random.randrange(len(o_indexes))
return o_plays[index], o_plays3d[index], o_scores[index], diags[index]
else:
raise(ValueError, 'this should not have happened')
else:
raise (ValueError, 'this should not have happened')
def get_pl(self):
"""tis method gets the state for each player"""
self.pl1 = self.state == 1
self.pl2 = self.state == 2
self.empty = self.state == 0
self.game_over = self.empty.sum() == 0
def clone(self):
"""clone the current instance except the children (to make it faster)"""
newself = copy.deepcopy(self)
newself.children = []
return newself
def run_play(self, play=None):
"""update a state with a play. If play is none it will find the best play.
if it is a play it will update the state if the play is valid
play: tuple of two ints or None
returns: success: bool (whether the state was updated or not)"""
if self.game_over:
return False
if play is None:
play, play3d, score, diag = self.get_best_score_play()
if self.play[play] >= self.size: # the play is ilegal
return False
play3d = (play[0], play[1], self.play[play]) # 3d position played
offensive_score, num_offensive_score, defensive_score, num_defensive_score, best_diag = self.get_score(play3d)
self.last_play = play # last play in this state
self.last_3dplay = play3d
self.play[play] += 1 # updates play
self.state[play3d] = self.next_turn # updates state
if self.next_turn == 1: # update the position of the player in this turn
self.pl1[play3d] = 1
else:
self.pl2[play3d] = 1
self.empty[play3d] = 0 # update the empty states
self.next_turn, self.previous_turn = self.previous_turn, self.next_turn # swaps turns
self.get_valid_pos() # updates valid_pos and valid_3dpos
if offensive_score == self.win - 0.5: # updates winner
self.winner = self.previous_turn
self.winning_diag = best_diag
# self.get_winner() #todo eliminate after testing
self.game_over = self.empty.sum() == 0 or self.winner > 0 # updates game over
self.history.append({'turn': self.previous_turn, 'play':self.last_play, 'play3d': self.last_3dplay,
'offensive_score': offensive_score, 'defensive_score': defensive_score,
'best_diag': best_diag})
print(self.history[-1])
return True
if __name__ == '__main__':
print(version)
| [
"numpy.array",
"numpy.zeros",
"time.time",
"copy.deepcopy"
] | [((18413, 18432), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (18426, 18432), False, 'import copy\n'), ((2578, 2589), 'time.time', 'time.time', ([], {}), '()\n', (2587, 2589), False, 'import time\n'), ((2660, 2703), 'numpy.zeros', 'np.zeros', (['(self.size, self.size, self.size)'], {}), '((self.size, self.size, self.size))\n', (2668, 2703), True, 'import numpy as np\n'), ((12394, 12410), 'numpy.array', 'np.array', (['play3d'], {}), '(play3d)\n', (12402, 12410), True, 'import numpy as np\n'), ((13672, 13688), 'numpy.array', 'np.array', (['play3d'], {}), '(play3d)\n', (13680, 13688), True, 'import numpy as np\n'), ((15455, 15471), 'numpy.array', 'np.array', (['play3d'], {}), '(play3d)\n', (15463, 15471), True, 'import numpy as np\n'), ((16389, 16405), 'numpy.array', 'np.array', (['play3d'], {}), '(play3d)\n', (16397, 16405), True, 'import numpy as np\n'), ((17301, 17317), 'numpy.array', 'np.array', (['play3d'], {}), '(play3d)\n', (17309, 17317), True, 'import numpy as np\n')] |
"""
Created on 16/03/2012
@author: victor
"""
import unittest
import pyproct.clustering.test.data as test_data
from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list
import numpy
from pyRMSD.condensedMatrix import CondensedMatrix
import os
class Test(unittest.TestCase):
def test_get_size(self):
cluster = Cluster(prototype = 0, elements = [0,4,5,7,13])
self.assertEqual(cluster.get_size(),5)
def test_get_sizes(self):
myclusters = []
for c in test_data.clusters:
myclusters.append(cluster_from_tuple(c))
sizes = [5,4,4,4,3]
numpy.testing.assert_array_equal(sizes, get_cluster_sizes(myclusters)[1], "Cluster sizes are different")
def test_gen_clusters_from_grouping_list(self):
# numpy.random.random_integers(0,4,20)
numclusters = 5
group_list = [4, 1, 2, 2, 4, 4, 3, 4, 2, 0, 0, 3, 3, 4, 0, 3, 1, 1, 1, 2]
true_clusters = [Cluster(0,[0,4,5,7,13]),
Cluster(1,[1,16,17,18]),
Cluster(2,[2,3,8,19]),
Cluster(6,[6,11,12,15]),
Cluster(9,[9,10,14])]
clusters = gen_clusters_from_class_list(group_list)
sorted_clusters = sorted(clusters, key=lambda c: c.prototype)
self.assertEqual(numclusters,len(sorted_clusters))
for i in range(numclusters):
self.assertEqual(true_clusters[i], sorted_clusters[i])
def test_clusters_are_equal(self):
clusters = [Cluster(0,[0,4,5,7,13]),
Cluster(2,[2,4,5,7,13]),
Cluster(1,[1,16,17,18]),
Cluster(1,[1,16,17,18]),
Cluster(0,[0,4,5,7,13]),
Cluster(1,[1,16,15,18,19]),
Cluster(2,[2,3,8,19])]
self.assertEqual(True,clusters[0] == clusters[0])
self.assertEqual(False,clusters[0] == clusters[1])
self.assertEqual(False,clusters[0] == clusters[2])
self.assertEqual(False,clusters[0] == clusters[3])
self.assertEqual(True, clusters[0] == clusters[4])
self.assertEqual(False,clusters[0] == clusters[5])
self.assertEqual(False,clusters[0] == clusters[6])
self.assertEqual(True, clusters[1] == clusters[1])
self.assertEqual(False,clusters[1] == clusters[2])
self.assertEqual(False,clusters[1] == clusters[3])
self.assertEqual(False,clusters[1] == clusters[4])
self.assertEqual(False, clusters[1] == clusters[5])
self.assertEqual(False,clusters[1] == clusters[6])
self.assertEqual(True,clusters[2] == clusters[2])
self.assertEqual(True,clusters[2] == clusters[3])
self.assertEqual(False,clusters[2] == clusters[4])
self.assertEqual(False, clusters[2] == clusters[5])
self.assertEqual(False,clusters[2] == clusters[6])
self.assertEqual(True,clusters[3] == clusters[3])
self.assertEqual(False,clusters[3] == clusters[4])
self.assertEqual(False, clusters[3] == clusters[5])
self.assertEqual(False,clusters[3] == clusters[6])
self.assertEqual(True,clusters[4] == clusters[4])
self.assertEqual(False, clusters[4] == clusters[5])
self.assertEqual(False,clusters[4] == clusters[6])
def test_cluster_from_tuple(self):
cluster_tuple = (1,[16,17,18])
expected_cluster = Cluster(1,[1,16,17,18])
cluster = cluster_from_tuple(cluster_tuple)
self.assertEquals(expected_cluster,cluster)
def test_creation(self):
try:
Cluster(1,[16,17,18])
self.fail()
except Exception:
pass
cluster = Cluster(1,[16,1,17,18])
cluster_copy = Cluster(1,[16,1,17,18])
elements = [16,1,17,18]
obtained = cluster.all_elements
# Are they equal?
numpy.testing.assert_array_equal(elements, obtained)
# A modification in this list modifies the cluster
obtained[2] = -1
self.assertNotEquals(cluster,cluster_copy)
def test_calculate_biased_medoid(self):
condensed_matrix = CondensedMatrix([1.0, 4.5, 7.2, 6.7,
8.5, 4.5, 3.6,
7.8, 2.2,
2.0])
c = Cluster(None,[0,2,3,4])
interesting_elements = [3,4,0]
self.assertEquals(4, c.calculate_biased_medoid(condensed_matrix,interesting_elements))
interesting_elements = [4,2,3]
self.assertEquals(4,c.calculate_biased_medoid(condensed_matrix,interesting_elements))
def test_calculate_biased_medoid_scenario(self):
cluster = Cluster.from_dic({
"prototype": 28,
"elements": "0:46, 49, 51, 53, 57:58, 62:67",
"id": "cluster_0"
})
matrix = CondensedMatrix(list(numpy.asfarray(numpy.load(os.path.join(test_data.__path__[0],"matrix.npy")))))
self.assertEqual(cluster.prototype, cluster.calculate_medoid(matrix))
cluster = Cluster.from_dic({
"prototype": 54,
"elements": "0:117, 119:135, 138:139, 141, 143, 145:146, 148:150, 153, 155:156, 167:168, 170:172, 175, 177, 190, 193, 212, 215, 234",
"id": "cluster_0"
})
self.assertEqual(cluster.prototype, cluster.calculate_medoid(matrix))
cluster = Cluster.from_dic({
"prototype": 1604,
"elements": "224, 290, 312, 334, 378, 422, 444, 466, 468, 488, 504, 526, 645, 782, 799, 821, 843, 953, 1208, 1254, 1276, 1291, 1313, 1320, 1357, 1445, 1450, 1467, 1472, 1489, 1494, 1516, 1538, 1560, 1582, 1591, 1604, 1613, 1626, 1635, 1671, 1693, 1767, 1789, 1811, 1833, 1841, 1855, 1877, 1899, 1921, 1943, 1965, 2007, 2049, 2070, 2091, 2112, 2203",
"id": "cluster_18"
})
self.assertEqual(cluster.prototype, cluster.calculate_medoid(matrix))
def test_random_sample(self):
cluster = Cluster(None, range(0,100))
self.assertItemsEqual(cluster.get_random_sample(10, 123), [45, 66, 89, 62, 67, 51, 65, 56, 22, 77])
def test_to_dic(self):
true_clusters = [Cluster(0,[0,4,5,7,13]),
Cluster(1,[1,16,17,18]),
Cluster(2,[2,3,8,19]),
Cluster(6,[6,11,12,15]),
Cluster(9,[9,10,14])]
dic_clusters = [
{'prototype': 0, 'elements': '0, 4:5, 7, 13'},
{'prototype': 1, 'elements': '1, 16:18'},
{'prototype': 2, 'elements': '2:3, 8, 19'},
{'prototype': 6, 'elements': '6, 11:12, 15'},
{'prototype': 9, 'elements': '9:10, 14'}
]
for i in range(len(true_clusters)):
self.assertDictEqual(Cluster.to_dic(true_clusters[i]), dic_clusters[i])
def test_from_dic(self):
clusters = [
{
"prototype": 400,
"elements": "400:410, 0, 1 ,2,3"
},
{
"prototype": 500,
"elements": "4,500:510, 5, 6:10, 11"
}
]
expected_elements =[
[400, 401, 402, 403, 404, 405, 406, 407, 408, 409, 410, 0, 1, 2, 3],
[4, 500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 5, 6, 7, 8, 9, 10, 11]
]
for i in range(len(clusters)):
self.assertEqual(Cluster.from_dic(clusters[i]).all_elements, expected_elements[i])
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | [
"pyproct.clustering.cluster.gen_clusters_from_class_list",
"pyproct.clustering.cluster.Cluster.to_dic",
"os.path.join",
"pyproct.clustering.cluster.Cluster.from_dic",
"pyproct.clustering.cluster.cluster_from_tuple",
"pyRMSD.condensedMatrix.CondensedMatrix",
"pyproct.clustering.cluster.Cluster",
"unitt... | [((8271, 8286), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8284, 8286), False, 'import unittest\n'), ((383, 430), 'pyproct.clustering.cluster.Cluster', 'Cluster', ([], {'prototype': '(0)', 'elements': '[0, 4, 5, 7, 13]'}), '(prototype=0, elements=[0, 4, 5, 7, 13])\n', (390, 430), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((1246, 1286), 'pyproct.clustering.cluster.gen_clusters_from_class_list', 'gen_clusters_from_class_list', (['group_list'], {}), '(group_list)\n', (1274, 1286), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((3522, 3549), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(1)', '[1, 16, 17, 18]'], {}), '(1, [1, 16, 17, 18])\n', (3529, 3549), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((3564, 3597), 'pyproct.clustering.cluster.cluster_from_tuple', 'cluster_from_tuple', (['cluster_tuple'], {}), '(cluster_tuple)\n', (3582, 3597), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((3831, 3858), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(1)', '[16, 1, 17, 18]'], {}), '(1, [16, 1, 17, 18])\n', (3838, 3858), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((3878, 3905), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(1)', '[16, 1, 17, 18]'], {}), '(1, [16, 1, 17, 18])\n', (3885, 3905), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((4018, 4070), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['elements', 'obtained'], {}), '(elements, obtained)\n', (4050, 4070), False, 'import numpy\n'), ((4293, 4360), 'pyRMSD.condensedMatrix.CondensedMatrix', 'CondensedMatrix', (['[1.0, 4.5, 7.2, 6.7, 8.5, 4.5, 3.6, 7.8, 2.2, 2.0]'], {}), '([1.0, 4.5, 7.2, 6.7, 8.5, 4.5, 3.6, 7.8, 2.2, 2.0])\n', (4308, 4360), False, 'from pyRMSD.condensedMatrix import CondensedMatrix\n'), ((4539, 4566), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['None', '[0, 2, 3, 4]'], {}), '(None, [0, 2, 3, 4])\n', (4546, 4566), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((4907, 5011), 'pyproct.clustering.cluster.Cluster.from_dic', 'Cluster.from_dic', (["{'prototype': 28, 'elements': '0:46, 49, 51, 53, 57:58, 62:67', 'id':\n 'cluster_0'}"], {}), "({'prototype': 28, 'elements':\n '0:46, 49, 51, 53, 57:58, 62:67', 'id': 'cluster_0'})\n", (4923, 5011), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((5348, 5545), 'pyproct.clustering.cluster.Cluster.from_dic', 'Cluster.from_dic', (["{'prototype': 54, 'elements':\n '0:117, 119:135, 138:139, 141, 143, 145:146, 148:150, 153, 155:156, 167:168, 170:172, 175, 177, 190, 193, 212, 215, 234'\n , 'id': 'cluster_0'}"], {}), "({'prototype': 54, 'elements':\n '0:117, 119:135, 138:139, 141, 143, 145:146, 148:150, 153, 155:156, 167:168, 170:172, 175, 177, 190, 193, 212, 215, 234'\n , 'id': 'cluster_0'})\n", (5364, 5545), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((5768, 6184), 'pyproct.clustering.cluster.Cluster.from_dic', 'Cluster.from_dic', (["{'prototype': 1604, 'elements':\n '224, 290, 312, 334, 378, 422, 444, 466, 468, 488, 504, 526, 645, 782, 799, 821, 843, 953, 1208, 1254, 1276, 1291, 1313, 1320, 1357, 1445, 1450, 1467, 1472, 1489, 1494, 1516, 1538, 1560, 1582, 1591, 1604, 1613, 1626, 1635, 1671, 1693, 1767, 1789, 1811, 1833, 1841, 1855, 1877, 1899, 1921, 1943, 1965, 2007, 2049, 2070, 2091, 2112, 2203'\n , 'id': 'cluster_18'}"], {}), "({'prototype': 1604, 'elements':\n '224, 290, 312, 334, 378, 422, 444, 466, 468, 488, 504, 526, 645, 782, 799, 821, 843, 953, 1208, 1254, 1276, 1291, 1313, 1320, 1357, 1445, 1450, 1467, 1472, 1489, 1494, 1516, 1538, 1560, 1582, 1591, 1604, 1613, 1626, 1635, 1671, 1693, 1767, 1789, 1811, 1833, 1841, 1855, 1877, 1899, 1921, 1943, 1965, 2007, 2049, 2070, 2091, 2112, 2203'\n , 'id': 'cluster_18'})\n", (5784, 6184), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((1006, 1034), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(0)', '[0, 4, 5, 7, 13]'], {}), '(0, [0, 4, 5, 7, 13])\n', (1013, 1034), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((1056, 1083), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(1)', '[1, 16, 17, 18]'], {}), '(1, [1, 16, 17, 18])\n', (1063, 1083), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((1106, 1131), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(2)', '[2, 3, 8, 19]'], {}), '(2, [2, 3, 8, 19])\n', (1113, 1131), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((1154, 1181), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(6)', '[6, 11, 12, 15]'], {}), '(6, [6, 11, 12, 15])\n', (1161, 1181), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((1204, 1227), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(9)', '[9, 10, 14]'], {}), '(9, [9, 10, 14])\n', (1211, 1227), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((1585, 1613), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(0)', '[0, 4, 5, 7, 13]'], {}), '(0, [0, 4, 5, 7, 13])\n', (1592, 1613), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((1630, 1658), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(2)', '[2, 4, 5, 7, 13]'], {}), '(2, [2, 4, 5, 7, 13])\n', (1637, 1658), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((1675, 1702), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(1)', '[1, 16, 17, 18]'], {}), '(1, [1, 16, 17, 18])\n', (1682, 1702), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((1720, 1747), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(1)', '[1, 16, 17, 18]'], {}), '(1, [1, 16, 17, 18])\n', (1727, 1747), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((1765, 1793), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(0)', '[0, 4, 5, 7, 13]'], {}), '(0, [0, 4, 5, 7, 13])\n', (1772, 1793), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((1810, 1841), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(1)', '[1, 16, 15, 18, 19]'], {}), '(1, [1, 16, 15, 18, 19])\n', (1817, 1841), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((1858, 1883), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(2)', '[2, 3, 8, 19]'], {}), '(2, [2, 3, 8, 19])\n', (1865, 1883), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((3714, 3738), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(1)', '[16, 17, 18]'], {}), '(1, [16, 17, 18])\n', (3721, 3738), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((6617, 6645), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(0)', '[0, 4, 5, 7, 13]'], {}), '(0, [0, 4, 5, 7, 13])\n', (6624, 6645), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((6667, 6694), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(1)', '[1, 16, 17, 18]'], {}), '(1, [1, 16, 17, 18])\n', (6674, 6694), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((6717, 6742), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(2)', '[2, 3, 8, 19]'], {}), '(2, [2, 3, 8, 19])\n', (6724, 6742), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((6765, 6792), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(6)', '[6, 11, 12, 15]'], {}), '(6, [6, 11, 12, 15])\n', (6772, 6792), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((6815, 6838), 'pyproct.clustering.cluster.Cluster', 'Cluster', (['(9)', '[9, 10, 14]'], {}), '(9, [9, 10, 14])\n', (6822, 6838), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((605, 626), 'pyproct.clustering.cluster.cluster_from_tuple', 'cluster_from_tuple', (['c'], {}), '(c)\n', (623, 626), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((704, 733), 'pyproct.clustering.cluster.get_cluster_sizes', 'get_cluster_sizes', (['myclusters'], {}), '(myclusters)\n', (721, 733), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((7325, 7357), 'pyproct.clustering.cluster.Cluster.to_dic', 'Cluster.to_dic', (['true_clusters[i]'], {}), '(true_clusters[i])\n', (7339, 7357), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((8103, 8132), 'pyproct.clustering.cluster.Cluster.from_dic', 'Cluster.from_dic', (['clusters[i]'], {}), '(clusters[i])\n', (8119, 8132), False, 'from pyproct.clustering.cluster import Cluster, cluster_from_tuple, get_cluster_sizes, gen_clusters_from_class_list\n'), ((5190, 5239), 'os.path.join', 'os.path.join', (['test_data.__path__[0]', '"""matrix.npy"""'], {}), "(test_data.__path__[0], 'matrix.npy')\n", (5202, 5239), False, 'import os\n')] |
"""
Code to plot average nearest neighbor distance between fish in a school as a function of group size - one line per water temperature.
"""
# imports
import sys, os
import numpy as np
import matplotlib.pyplot as plt
import pickle
from matplotlib import cm
in_dir1 = '../../output/temp_collective/roi/annd.p'
annd_values = pickle.load(open(in_dir1, 'rb')) # 'rb is for read binary
in_dir2 = '../../output/temp_collective/roi/annd_std.p'
out_dir = '../../output/temp_collective/roi_figures/annd.png'
std_annd_values = pickle.load(open(in_dir2, 'rb')) # 'rb is for read binary
temperature = [29,25,21,17,13,9]
group = [1,2,4,8,16]
x = 5
#Plotting
lw=1.25
fs=14
colors = plt.cm.viridis_r(np.linspace(0,1,6))
plt.close('all') # always start by cleaning up
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(211)
for i in range(6):
ax.plot(group[0:x], annd_values[i,0:x], label = str(temperature[i])+ r'$^{\circ}$C', linewidth = lw, color = colors[i])
ax.fill_between(group[0:x], annd_values[i,0:x] - std_annd_values[i,0:x], annd_values[i,0:x] + std_annd_values[i,0:x], alpha = 0.3, color = colors[i])
plt.xlabel('Group Size', size = 0.9*fs)
plt.ylabel('ANND (Body Length)', size = 0.9*fs)
ax.tick_params(labelsize=.8*fs)
ax.set_title('a)', loc='left', fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Water Temperature')
x=6
colors = plt.cm.viridis(np.linspace(0,1,5))
ax = fig.add_subplot(212)
for i in range(1,5):
ax.plot(temperature[0:x], annd_values[0:x,i], label = str(group[i]), linewidth = lw, color = colors[i])
ax.fill_between(temperature[0:x], annd_values[0:x,i] - std_annd_values[0:x,i], annd_values[0:x,i] + std_annd_values[0:x,i], alpha = 0.3, color = colors[i])
plt.xlabel('Temperature '+r'($^{\circ}$C)', size = 0.9*fs)
plt.locator_params(axis='x', nbins=5)
plt.ylabel('ANND (Body Length)', size = 0.9*fs)
ax.tick_params(labelsize=.8*fs)
ax.set_title('b)', loc='left', fontsize = fs)
plt.legend(fontsize=fs, loc='upper right', title = 'Group Size')
fig.suptitle('Average Nearest Neighbor Distance (ANND)', size = 1.5*fs)
fig.savefig(out_dir)
plt.show() | [
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.locator_params",
"matplotlib.pyplot.close",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((746, 762), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (755, 762), True, 'import matplotlib.pyplot as plt\n'), ((800, 827), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (810, 827), True, 'import matplotlib.pyplot as plt\n'), ((1158, 1197), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Group Size"""'], {'size': '(0.9 * fs)'}), "('Group Size', size=0.9 * fs)\n", (1168, 1197), True, 'import matplotlib.pyplot as plt\n'), ((1199, 1246), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ANND (Body Length)"""'], {'size': '(0.9 * fs)'}), "('ANND (Body Length)', size=0.9 * fs)\n", (1209, 1246), True, 'import matplotlib.pyplot as plt\n'), ((1328, 1397), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Water Temperature"""'}), "(fontsize=fs, loc='upper right', title='Water Temperature')\n", (1338, 1397), True, 'import matplotlib.pyplot as plt\n'), ((1775, 1835), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Temperature ' + '($^{\\\\circ}$C)')"], {'size': '(0.9 * fs)'}), "('Temperature ' + '($^{\\\\circ}$C)', size=0.9 * fs)\n", (1785, 1835), True, 'import matplotlib.pyplot as plt\n'), ((1835, 1872), 'matplotlib.pyplot.locator_params', 'plt.locator_params', ([], {'axis': '"""x"""', 'nbins': '(5)'}), "(axis='x', nbins=5)\n", (1853, 1872), True, 'import matplotlib.pyplot as plt\n'), ((1874, 1921), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ANND (Body Length)"""'], {'size': '(0.9 * fs)'}), "('ANND (Body Length)', size=0.9 * fs)\n", (1884, 1921), True, 'import matplotlib.pyplot as plt\n'), ((2003, 2065), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': 'fs', 'loc': '"""upper right"""', 'title': '"""Group Size"""'}), "(fontsize=fs, loc='upper right', title='Group Size')\n", (2013, 2065), True, 'import matplotlib.pyplot as plt\n'), ((2174, 2184), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2182, 2184), True, 'import matplotlib.pyplot as plt\n'), ((725, 745), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(6)'], {}), '(0, 1, 6)\n', (736, 745), True, 'import numpy as np\n'), ((1432, 1452), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5)'], {}), '(0, 1, 5)\n', (1443, 1452), True, 'import numpy as np\n')] |
import numpy as np
import json
from importlib import reload
import os
from models.core.tf_models.cae_model import CAE
import pickle
import tensorflow as tf
import dill
from collections import deque
from models.core.tf_models import utils
from scipy.interpolate import CubicSpline
import time
from models.core.tf_models import cae_model
reload(cae_model)
from models.core.tf_models.cae_model import CAE
class GenModel():
"""TODO:
- reward (A*state + B*Belief)
- can be attr of the mcts planner
"""
def __init__(self):
self.set_stateIndex()
def set_stateIndex(self):
self.indx_m = {}
self.indx_y = {}
self.indx_f = {}
self.indx_fadj = {}
i = 0
for name in ['vel', 'pc', 'act_long_p','act_lat_p']:
self.indx_m[name] = i
i += 1
for name in ['vel', 'dx', 'act_long_p']:
self.indx_y[name] = i
i += 1
for name in ['vel', 'dx', 'act_long_p']:
self.indx_f[name] = i
i += 1
for name in ['vel', 'dx', 'act_long_p']:
self.indx_fadj[name] = i
i += 1
def get_dx(self, vel_m, vel_o, veh_orientation):
"""
veh_orientation is mering vehicle's orientation relative to others
"""
if veh_orientation == 'front':
dv = vel_m - vel_o
else:
dv = vel_o - vel_m
dx = dv*0.1
return dx
def step(self, st_arr_i, acts_arr_i):
"""
:Return: state_ii, state at the next time_step
"""
st_arr_ii = st_arr_i.copy()
st_arr_ii[:, self.indx_m['vel']] += acts_arr_i[:, 0]*0.1
st_arr_ii[:, self.indx_y['vel']] += acts_arr_i[:, 2]*0.1
st_arr_ii[:, self.indx_f['vel']] += acts_arr_i[:, 3]*0.1
st_arr_ii[:, self.indx_fadj['vel']] += acts_arr_i[:, 4]*0.1
indx = self.indx_m['pc']
st_arr_ii[:, indx] += acts_arr_i[:, 1]*0.1
lc_left = st_arr_ii[:, indx] > self.max_pc
st_arr_ii[lc_left, indx] = self.min_pc
lc_right = st_arr_ii[:, indx] < self.min_pc
st_arr_ii[lc_right, indx] = self.max_pc
vel_m = st_arr_i[:, self.indx_m['vel']]
vel_y = st_arr_i[:, self.indx_y['vel']]
vel_f = st_arr_i[:, self.indx_f['vel']]
vel_fadj = st_arr_i[:, self.indx_fadj['vel']]
st_arr_ii[:, self.indx_y['dx']] += self.get_dx(vel_m, vel_y, 'front')
st_arr_ii[:, self.indx_f['dx']] += self.get_dx(vel_m, vel_f, 'behind')
st_arr_ii[:, self.indx_fadj['dx']] += self.get_dx(vel_m, vel_fadj, 'behind')
st_arr_ii[:, self.indx_m['act_long_p']] = acts_arr_i[:, 0]
st_arr_ii[:, self.indx_m['act_lat_p']] = acts_arr_i[:, 1]
st_arr_ii[:, self.indx_y['act_long_p']] = acts_arr_i[:, 2]
st_arr_ii[:, self.indx_f['act_long_p']] = acts_arr_i[:, 3]
st_arr_ii[:, self.indx_fadj['act_long_p']] = acts_arr_i[:, 4]
return st_arr_ii
def forwardSim(self, st_arr, acts_arr, steps_n):
"""
Simulates the world forward for given number of steps
:Params:
- st_arr is unscaled state vector for time_step0
shape: ([traj_n, states])
- acts_arr is unscaled action sequence for pred_horizon
shape:([traj_n, steps, all-actions])
:Return: Predicted future states
"""
state_predictions = np.zeros([st_arr.shape[0], steps_n, st_arr.shape[1]])
state_predictions[:, 0, :] = st_arr
state_i = st_arr.copy()
for step in range(1, steps_n):
st_arr_ii = self.step(state_i, acts_arr[:, step, :])
state_predictions[:, step, :] = st_arr_ii
state_i = st_arr_ii
return state_predictions
class MergePolicy():
def __init__(self, test_data, config):
self.loadModel(config)
self.data_obj = test_data.data_obj
# TODO:
# objective function/ evaluate function/ set execution time, which will
# execute best ranked traj for a period of time.
def loadModel(self, config):
checkpoint_dir = './models/experiments/'+config['exp_id'] +'/model_dir'
self.model = CAE(config, model_use='inference')
Checkpoint = tf.train.Checkpoint(net=self.model)
# Checkpoint.restore(tf.train.latest_checpoint(checkpoint_dir)).expect_partial()
Checkpoint.restore(checkpoint_dir+'/ckpt-6')
self.enc_model = self.model.enc_model
self.dec_model = self.model.dec_model
def get_cae_outputs(self, seq, traj_n, pred_h):
"""Output includes both samplred action sequences and their correspoinding
distributions.
:Param: [state_seq, cond_seq], traj_n, pred_h(s)
"""
st_seq, cond_seq = seq
# reshape to fit model
st_seq.shape = (1, st_seq.shape[0], st_seq.shape[1])
for n in range(5):
cond_seq[n].shape = (1, 1, 1)
cond_seq[n] = np.repeat(cond_seq[n], traj_n, axis=0)
st_seq = np.repeat(st_seq, traj_n, axis=0)
# get enc_h state
enc_state = self.enc_model(st_seq)
self.skip_n = 2 # done for a smoother trajectory
self.step_len = round(self.skip_n*self.data_obj.step_size*0.1, 1) # [s]
steps_n = int(np.ceil(np.ceil(pred_h/self.step_len)*self.step_len/ \
(self.data_obj.step_size*0.1)))
print('steps_n: ', steps_n)
self.dec_model.steps_n = steps_n
self.dec_model.traj_n = traj_n
t0 = time.time()
sampled_actions, gmm_mlon, gmm_mlat, prob_mlon, prob_mlat = self.dec_model(\
[cond_seq, enc_state])
print(time.time() - t0)
prob_mlon = prob_mlon.numpy()
prob_mlat = prob_mlat.numpy()
return sampled_actions, gmm_mlon, gmm_mlat, prob_mlon, prob_mlat
def construct_policy(self, unscaled_acts, bc_der, traj_n, pred_h):
bc_der = np.repeat([bc_der], traj_n, axis=0)
# spline fitting
traj_len = self.dec_model.steps_n*self.data_obj.step_size*0.1 # [s]
trajectories = np.zeros([traj_n, int(traj_len*10)+1, 5])
x_whole = np.arange(0, traj_len+0.1, self.step_len)
x_snippet = np.arange(0, traj_len, 0.1)
# t0 = time.time()
for act_n in range(5):
f = CubicSpline(x_whole, unscaled_acts[:,:,act_n],
bc_type=(
(1, bc_der[:, act_n]),
(2, [0]*traj_n)), axis=1)
coefs = np.stack(f.c, axis=2)
trajectories[:, :, act_n] = f(x_snippet)
# print(time.time() - t0)
return trajectories[:, 0:pred_h*10+1,:]
def get_actions(self, seq, bc_der, traj_n, pred_h):
"""
:Return: unscaled action array for all cars
"""
sampled_actions, _, _, prob_mlon, prob_mlat = self.get_cae_outputs(seq, traj_n, pred_h)
act_mlon, act_mlat, act_y, act_f, act_fadj = sampled_actions
st_seq, cond_seq = seq
total_acts_count = traj_n*self.dec_model.steps_n
veh_acts_count = 5 # 2 for merging, 1 for each of the other cars
scaled_acts = np.zeros([total_acts_count, veh_acts_count])
i = 0
actions = [act_.numpy() for act_ in [act_mlon, act_mlat, act_y, act_f, act_fadj]]
for act_ in actions:
act_.shape = (total_acts_count)
scaled_acts[:, i] = act_
i += 1
unscaled_acts = self.data_obj.action_scaler.inverse_transform(scaled_acts)
unscaled_acts.shape = (traj_n, self.dec_model.steps_n, veh_acts_count)
cond0 = [cond_seq[n][0, 0, :].tolist() for n in range(5)]
cond0 = np.array([item for sublist in cond0 for item in sublist])
cond0 = self.data_obj.action_scaler.inverse_transform(np.reshape(cond0, [1,-1]))
cond0.shape = (1, 1, 5)
cond0 = np.repeat(cond0, traj_n, axis=0)
unscaled_acts = np.concatenate([cond0, unscaled_acts], axis=1)
actions = self.construct_policy(unscaled_acts[:,0::self.skip_n,:], bc_der, traj_n, pred_h)
return actions, prob_mlon, prob_mlat
class TestdataObj():
dirName = './datasets/preprocessed/'
def __init__(self, traffic_density, config):
self.traffic_density = traffic_density
self.setup(config['data_config']) # load test_data and validation data
def setup(self, data_config):
self.test_episodes = np.loadtxt('./datasets/'+self.traffic_density+\
'test_episodes.csv', delimiter=',')
config_names = os.listdir(self.dirName+'config_files')
for config_name in config_names:
with open(self.dirName+'config_files/'+config_name, 'r') as f:
config = json.load(f)
if config == data_config:
with open(self.dirName+config_name[:-5]+'/'+'data_obj', 'rb') as f:
self.data_obj = dill.load(f, ignore=True)
if self.traffic_density == '':
with open(self.dirName+config_name[:-5]+'/'+self.traffic_density+\
'states_test', 'rb') as f:
self.states_set = pickle.load(f)
with open(self.dirName+config_name[:-5]+'/'+self.traffic_density+\
'targets_test', 'rb') as f:
self.targets_set = pickle.load(f)
else:
with open(self.dirName+self.traffic_density+'states_test', 'rb') as f:
self.states_set = pickle.load(f)
with open(self.dirName+self.traffic_density+'targets_test', 'rb') as f:
self.targets_set = pickle.load(f)
class ModelEvaluation():
def __init__(self, model, test_data, config):
self.policy = model
self.test_data = test_data
self.gen_model = GenModel()
self.episode_n = 50
self.traj_n = 50
self.pred_h = 2 # [s]
self.dirName = './models/experiments/'+config['exp_id']
data_obj = self.test_data.data_obj
def obsSequence(self, state_arr, target_arr, test_data):
state_arr = test_data.data_obj.applyStateScaler(state_arr)
target_arr = test_data.data_obj.applyActionScaler(target_arr)
actions = [target_arr[:, n:n+1] for n in range(5)]
traj_len = len(state_arr)
step_size = test_data.data_obj.step_size
pred_step_n = int(np.ceil(self.pred_h/(step_size*0.1)))
conds = [[],[],[],[],[]]
states = []
if traj_len > 20:
prev_states = deque(maxlen=20)
for i in range(traj_len):
prev_states.append(state_arr[i])
if len(prev_states) == 20:
indx = np.arange(i, i+(pred_step_n+1)*step_size, step_size)
indx = indx[indx<traj_len]
if indx.size != pred_step_n+1:
break
states.append(np.array(prev_states))
for n in range(5):
conds[n].append(actions[n][indx[0:1]])
return np.array(states), [np.array(conds[n]) for n in range(5)]
def episodeSetup(self, episode_id):
""":Return: All info needed for evaluating model on a given scene
"""
test_data = self.test_data
st_arr = test_data.states_set[test_data.states_set[:, 0] == episode_id][:, 1:]
targ_arr = test_data.targets_set[test_data.targets_set[:, 0] == episode_id][:, 1:]
st_seq, cond_seq = self.obsSequence(st_arr.copy(), targ_arr.copy(), test_data)
return st_seq, cond_seq, st_arr, targ_arr
def sceneSetup(self, st_seq, cond_seq, st_arr, targ_arr, current_step, pred_h):
"""Set up a scence for a given initial step.
Note: steps are index of numpy array, starting from 0.
"""
obs_n = self.test_data.data_obj.obs_n
start_step = current_step - 19
end_step = int(current_step + pred_h/0.1)
bc_der_i = (targ_arr[current_step, :]-targ_arr[current_step-1, :])*10
st_seq_i = st_seq[start_step, -obs_n:,:] # -obs_n will allow for variable observation length
cond_seq_i = [cond_seq[n][start_step,:,:] for n in range(5)]
history_i = targ_arr[start_step:current_step+1, :]
targ_i = targ_arr[current_step:end_step+1, :]
st_i = st_arr[current_step:end_step+1, :]
return st_seq_i, cond_seq_i, bc_der_i, history_i, st_i, targ_i
def root_weightet_sqr(self, true_traj, pred_trajs):
true_traj = true_traj[~np.all(true_traj == 0, axis=1)]
pred_trajs = pred_trajs[~np.all(pred_trajs == 0, axis=1)]
err = np.sqrt(np.mean(np.square(true_traj-pred_trajs), axis=0))
print('Total number of generated trajs: ', pred_trajs.shape)
return err
def trajCompute(self, episode_id):
st_seq, cond_seq, st_arr, targ_arr = self.episodeSetup(episode_id, self.test_data)
actions = self.policy.get_actions([st_seq[29].copy(), cond_seq[29].copy()],
self.traj_n, self.pred_h)
# simulate state forward
state_i = np.repeat([st_arr[29, :]], self.traj_n, axis=0)
self.gen_model.max_pc = max(st_arr[:, self.gen_model.indx_m['pc']])
self.gen_model.min_pc = min(st_arr[:, self.gen_model.indx_m['pc']])
state_predictions = self.gen_model.forwardSim(state_i.copy(), actions, self.pred_h)
state_true = st_arr[0:29+self.pred_h]
return state_true, state_predictions
def compute_rwse(self, traffic_density):
"""
dumps dict into exp folder containing RWSE for all vehicle actions across time.
"""
# Ensure experiment has not beem done before
np.random.seed(2020)
file_names = os.listdir(self.dirName)
if traffic_density+'rwse' in file_names:
print("This experiment has been done already!")
return None
elif traffic_density == '':
print("select a traffic dinsity level!")
return None
rwse_dict = {'vel_m':0,
'lat_vel':1,
'vel_y':2,
'vel_f':3,
'vel_fadj':4}
pred_step_n = self.pred_h*10+1
splits_n = 6 # number of splits across an entire trajectory
pred_arrs = [np.zeros([self.episode_n*self.traj_n*6,
pred_step_n]) for i in range(5)]
truth_arrs = [np.zeros([self.episode_n*self.traj_n*6,
pred_step_n]) for i in range(5)]
_row = 0
for episode_id in self.test_data.test_episodes[:self.episode_n]:
# for episode_id in [1289]:
st_seq, cond_seq, st_arr, targ_arr = self.episodeSetup(episode_id)
self.gen_model.max_pc = max(st_arr[:, self.gen_model.indx_m['pc']])
self.gen_model.min_pc = min(st_arr[:, self.gen_model.indx_m['pc']])
if len(st_seq) >= 6:
splits_n = 6
else:
splits_n = len(st_seq)
obs_n = self.test_data.data_obj.obs_n
traj_splits = np.random.choice(range(19, 19+len(st_seq)), splits_n, replace=False)
# leave value of 19 to ensure scenarios remain consistent
for split in traj_splits:
st_seq_i, cond_seq_i, bc_der_i, _, st_i, targ_i = self.sceneSetup(st_seq,
cond_seq,
st_arr,
targ_arr,
current_step=split,
pred_h=self.pred_h)
targ_i.shape = (1, pred_step_n, 5)
st_init = np.repeat(np.reshape(st_i[0,:], [1,17]), self.traj_n, axis=0)
actions, _, _ = self.policy.get_actions([st_seq_i, cond_seq_i], bc_der_i,
traj_n=self.traj_n, pred_h=self.pred_h)
st_pred = self.gen_model.forwardSim(st_init, actions, pred_step_n)
truth_arrs[0][_row:_row+self.traj_n, :] = \
st_i[:,self.gen_model.indx_m['vel']]
pred_arrs[0][_row:_row+self.traj_n, :] = \
st_pred[:,:,self.gen_model.indx_m['vel']]
truth_arrs[1][_row:_row+self.traj_n, :] = \
st_i[:,self.gen_model.indx_m['act_lat_p']]
pred_arrs[1][_row:_row+self.traj_n, :] = \
st_pred[:,:,self.gen_model.indx_m['act_lat_p']]
truth_arrs[2][_row:_row+self.traj_n, :] = \
st_i[:,self.gen_model.indx_y['vel']]
pred_arrs[2][_row:_row+self.traj_n, :] = \
st_pred[:,:,self.gen_model.indx_y['vel']]
truth_arrs[3][_row:_row+self.traj_n, :] = \
st_i[:,self.gen_model.indx_f['vel']]
pred_arrs[3][_row:_row+self.traj_n, :] = \
st_pred[:,:,self.gen_model.indx_f['vel']]
truth_arrs[4][_row:_row+self.traj_n, :] = \
st_i[:,self.gen_model.indx_fadj['vel']]
pred_arrs[4][_row:_row+self.traj_n, :] = \
st_pred[:,:,self.gen_model.indx_fadj['vel']]
_row += self.traj_n
# return st_pred
print('Episode ', episode_id, ' has been completed!')
for key in rwse_dict.keys():
rwse_dict[key] = self.root_weightet_sqr(truth_arrs[rwse_dict[key]], \
pred_arrs[rwse_dict[key]])
with open(self.dirName+'/'+ traffic_density + 'rwse', "wb") as f:
pickle.dump(rwse_dict, f)
return rwse_dict
| [
"tensorflow.train.Checkpoint",
"numpy.array",
"numpy.arange",
"dill.load",
"os.listdir",
"numpy.repeat",
"numpy.reshape",
"collections.deque",
"scipy.interpolate.CubicSpline",
"numpy.stack",
"numpy.random.seed",
"numpy.concatenate",
"numpy.ceil",
"numpy.all",
"pickle.load",
"numpy.squa... | [((349, 366), 'importlib.reload', 'reload', (['cae_model'], {}), '(cae_model)\n', (355, 366), False, 'from importlib import reload\n'), ((3529, 3582), 'numpy.zeros', 'np.zeros', (['[st_arr.shape[0], steps_n, st_arr.shape[1]]'], {}), '([st_arr.shape[0], steps_n, st_arr.shape[1]])\n', (3537, 3582), True, 'import numpy as np\n'), ((4332, 4366), 'models.core.tf_models.cae_model.CAE', 'CAE', (['config'], {'model_use': '"""inference"""'}), "(config, model_use='inference')\n", (4335, 4366), False, 'from models.core.tf_models.cae_model import CAE\n'), ((4389, 4424), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'net': 'self.model'}), '(net=self.model)\n', (4408, 4424), True, 'import tensorflow as tf\n'), ((5188, 5221), 'numpy.repeat', 'np.repeat', (['st_seq', 'traj_n'], {'axis': '(0)'}), '(st_seq, traj_n, axis=0)\n', (5197, 5221), True, 'import numpy as np\n'), ((5730, 5741), 'time.time', 'time.time', ([], {}), '()\n', (5739, 5741), False, 'import time\n'), ((6195, 6230), 'numpy.repeat', 'np.repeat', (['[bc_der]', 'traj_n'], {'axis': '(0)'}), '([bc_der], traj_n, axis=0)\n', (6204, 6230), True, 'import numpy as np\n'), ((6421, 6464), 'numpy.arange', 'np.arange', (['(0)', '(traj_len + 0.1)', 'self.step_len'], {}), '(0, traj_len + 0.1, self.step_len)\n', (6430, 6464), True, 'import numpy as np\n'), ((6484, 6511), 'numpy.arange', 'np.arange', (['(0)', 'traj_len', '(0.1)'], {}), '(0, traj_len, 0.1)\n', (6493, 6511), True, 'import numpy as np\n'), ((7473, 7517), 'numpy.zeros', 'np.zeros', (['[total_acts_count, veh_acts_count]'], {}), '([total_acts_count, veh_acts_count])\n', (7481, 7517), True, 'import numpy as np\n'), ((8011, 8068), 'numpy.array', 'np.array', (['[item for sublist in cond0 for item in sublist]'], {}), '([item for sublist in cond0 for item in sublist])\n', (8019, 8068), True, 'import numpy as np\n'), ((8209, 8241), 'numpy.repeat', 'np.repeat', (['cond0', 'traj_n'], {'axis': '(0)'}), '(cond0, traj_n, axis=0)\n', (8218, 8241), True, 'import numpy as np\n'), ((8267, 8313), 'numpy.concatenate', 'np.concatenate', (['[cond0, unscaled_acts]'], {'axis': '(1)'}), '([cond0, unscaled_acts], axis=1)\n', (8281, 8313), True, 'import numpy as np\n'), ((8771, 8860), 'numpy.loadtxt', 'np.loadtxt', (["('./datasets/' + self.traffic_density + 'test_episodes.csv')"], {'delimiter': '""","""'}), "('./datasets/' + self.traffic_density + 'test_episodes.csv',\n delimiter=',')\n", (8781, 8860), True, 'import numpy as np\n'), ((8918, 8959), 'os.listdir', 'os.listdir', (["(self.dirName + 'config_files')"], {}), "(self.dirName + 'config_files')\n", (8928, 8959), False, 'import os\n'), ((13668, 13715), 'numpy.repeat', 'np.repeat', (['[st_arr[29, :]]', 'self.traj_n'], {'axis': '(0)'}), '([st_arr[29, :]], self.traj_n, axis=0)\n', (13677, 13715), True, 'import numpy as np\n'), ((14282, 14302), 'numpy.random.seed', 'np.random.seed', (['(2020)'], {}), '(2020)\n', (14296, 14302), True, 'import numpy as np\n'), ((14325, 14349), 'os.listdir', 'os.listdir', (['self.dirName'], {}), '(self.dirName)\n', (14335, 14349), False, 'import os\n'), ((5129, 5167), 'numpy.repeat', 'np.repeat', (['cond_seq[n]', 'traj_n'], {'axis': '(0)'}), '(cond_seq[n], traj_n, axis=0)\n', (5138, 5167), True, 'import numpy as np\n'), ((6591, 6703), 'scipy.interpolate.CubicSpline', 'CubicSpline', (['x_whole', 'unscaled_acts[:, :, act_n]'], {'bc_type': '((1, bc_der[:, act_n]), (2, [0] * traj_n))', 'axis': '(1)'}), '(x_whole, unscaled_acts[:, :, act_n], bc_type=((1, bc_der[:,\n act_n]), (2, [0] * traj_n)), axis=1)\n', (6602, 6703), False, 'from scipy.interpolate import CubicSpline\n'), ((6817, 6838), 'numpy.stack', 'np.stack', (['f.c'], {'axis': '(2)'}), '(f.c, axis=2)\n', (6825, 6838), True, 'import numpy as np\n'), ((8132, 8158), 'numpy.reshape', 'np.reshape', (['cond0', '[1, -1]'], {}), '(cond0, [1, -1])\n', (8142, 8158), True, 'import numpy as np\n'), ((10864, 10904), 'numpy.ceil', 'np.ceil', (['(self.pred_h / (step_size * 0.1))'], {}), '(self.pred_h / (step_size * 0.1))\n', (10871, 10904), True, 'import numpy as np\n'), ((11013, 11029), 'collections.deque', 'deque', ([], {'maxlen': '(20)'}), '(maxlen=20)\n', (11018, 11029), False, 'from collections import deque\n'), ((11559, 11575), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (11567, 11575), True, 'import numpy as np\n'), ((14901, 14958), 'numpy.zeros', 'np.zeros', (['[self.episode_n * self.traj_n * 6, pred_step_n]'], {}), '([self.episode_n * self.traj_n * 6, pred_step_n])\n', (14909, 14958), True, 'import numpy as np\n'), ((15046, 15103), 'numpy.zeros', 'np.zeros', (['[self.episode_n * self.traj_n * 6, pred_step_n]'], {}), '([self.episode_n * self.traj_n * 6, pred_step_n])\n', (15054, 15103), True, 'import numpy as np\n'), ((18639, 18664), 'pickle.dump', 'pickle.dump', (['rwse_dict', 'f'], {}), '(rwse_dict, f)\n', (18650, 18664), False, 'import pickle\n'), ((5933, 5944), 'time.time', 'time.time', ([], {}), '()\n', (5942, 5944), False, 'import time\n'), ((9102, 9114), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9111, 9114), False, 'import json\n'), ((11579, 11597), 'numpy.array', 'np.array', (['conds[n]'], {}), '(conds[n])\n', (11587, 11597), True, 'import numpy as np\n'), ((13051, 13081), 'numpy.all', 'np.all', (['(true_traj == 0)'], {'axis': '(1)'}), '(true_traj == 0, axis=1)\n', (13057, 13081), True, 'import numpy as np\n'), ((13117, 13148), 'numpy.all', 'np.all', (['(pred_trajs == 0)'], {'axis': '(1)'}), '(pred_trajs == 0, axis=1)\n', (13123, 13148), True, 'import numpy as np\n'), ((13181, 13214), 'numpy.square', 'np.square', (['(true_traj - pred_trajs)'], {}), '(true_traj - pred_trajs)\n', (13190, 13214), True, 'import numpy as np\n'), ((9278, 9303), 'dill.load', 'dill.load', (['f'], {'ignore': '(True)'}), '(f, ignore=True)\n', (9287, 9303), False, 'import dill\n'), ((9561, 9575), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9572, 9575), False, 'import pickle\n'), ((9791, 9805), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9802, 9805), False, 'import pickle\n'), ((9952, 9966), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (9963, 9966), False, 'import pickle\n'), ((10098, 10112), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (10109, 10112), False, 'import pickle\n'), ((11193, 11251), 'numpy.arange', 'np.arange', (['i', '(i + (pred_step_n + 1) * step_size)', 'step_size'], {}), '(i, i + (pred_step_n + 1) * step_size, step_size)\n', (11202, 11251), True, 'import numpy as np\n'), ((16494, 16525), 'numpy.reshape', 'np.reshape', (['st_i[0, :]', '[1, 17]'], {}), '(st_i[0, :], [1, 17])\n', (16504, 16525), True, 'import numpy as np\n'), ((5465, 5496), 'numpy.ceil', 'np.ceil', (['(pred_h / self.step_len)'], {}), '(pred_h / self.step_len)\n', (5472, 5496), True, 'import numpy as np\n'), ((11414, 11435), 'numpy.array', 'np.array', (['prev_states'], {}), '(prev_states)\n', (11422, 11435), True, 'import numpy as np\n')] |
"""
Artificial Intelligence for Humans
Volume 2: Nature-Inspired Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2014 by <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
============================================================================================================
This example shows how to use genetic programming to find a function that approximates data stored in a CSV
file. The CSV file contains data from the equation 4x^2+6x+2. The exact function is not found by the program
but a close approximation is usually found. This program is only a very simple introduction to genetic
programming. For more advanced genetic programming tasks in Python you might consider using full scale framework
such as deep (https://code.google.com/p/deap/).
This example was influenced from code found in the following examples:
http://cswww.essex.ac.uk/staff/rpoli/TinyGP/
http://zhanggw.wordpress.com/2009/11/08/a-simple-genetic-programming-in-python-4/
Sample output:
Generation #0, best score=12720.1411766
Generation #1, best score=12720.1411766
Generation #2, best score=10352.1103796
Generation #3, best score=10352.1103796
Generation #4, best score=10352.1103796
Generation #5, best score=10352.1103796
Generation #6, best score=10352.1103796
Generation #7, best score=10352.1103796
Generation #8, best score=10352.1103796
Generation #9, best score=10352.1103796
Generation #10, best score=10352.1103796
Generation #11, best score=10352.1103796
Generation #12, best score=4760.92294456
Generation #13, best score=1324.0
Generation #14, best score=1324.0
Generation #15, best score=1324.0
Generation #16, best score=1324.0
Generation #17, best score=1324.0
Generation #18, best score=1324.0
Generation #19, best score=1324.0
Generation #20, best score=1324.0
Generation #21, best score=1324.0
Generation #22, best score=1324.0
Generation #23, best score=1324.0
...
Generation #89, best score=52.7362001915
Generation #90, best score=52.7362001915
Generation #91, best score=52.7362001915
Generation #92, best score=52.7362001915
Generation #93, best score=52.7362001915
Generation #94, best score=52.7362001915
Generation #95, best score=52.7362001915
Generation #96, best score=52.7362001915
Generation #97, best score=12.2930539351
Generation #98, best score=12.2930539351
Generation #99, best score=12.2930539351
((x+x)*((x-(-2.42355201966*(x/(((-2.42355201966*x)*(((-5.58918290567/-6.00102217712)--2.42355201966)/1.9928565674))/-4.65011284939))))+x))
"""
# Find the AIFH core files
import os
import sys
import numpy as np
aifh_dir = os.path.dirname(os.path.abspath(__file__))
aifh_dir = os.path.abspath(aifh_dir + os.sep + ".." + os.sep + "lib" + os.sep + "aifh")
sys.path.append(aifh_dir)
from random import random, randint, choice
from copy import deepcopy
from normalize import Normalize
from error import ErrorCalculation
from random import *
class FunctionWrapper:
def __init__(self, function, child_count, name):
self.function = function
self.child_count = child_count
self.name = name
class Variable:
def __init__(self, var, value=0):
self.var = var
self.value = value
self.name = str(var)
self.type = "variable"
def evaluate(self):
return self.varvalue
def setvar(self, value):
self.value = value
def display(self):
return (self.var)
class Constant:
def __init__(self, value):
self.value = value
self.name = str(value)
self.type = "constant"
def evaluate(self):
return self.value
def display(self):
return(self.value)
class Node:
def __init__(self, type, children, function_wrapper, var=None, const=None):
self.type = type
self.children = children
self.funwrap = function_wrapper
self.variable = var
self.const = const
self.depth = self.refresh_depth()
self.value = 0
self.fitness = 0
def eval(self):
if self.type == "variable":
return self.variable.value
elif self.type == "constant":
return self.const.value
else:
for c in self.children:
result = [c.eval() for c in self.children]
return self.funwrap.function(result)
def set_variable_value(self, var_names, values):
if self.type == "variable":
idx = var_names.index(self.variable.var)
if idx!=-1:
self.variable.setvar(values[idx])
else:
print("There is no value for variable:", self.variable.var)
return
if self.type == "constant":
pass
if self.children:#function node
for child in self.children:
child.set_variable_value(var_names,values)
def refresh_depth(self):
if self.type == "constant" or self.type == "variable":
return 0
else:
depth = []
for c in self.children:
depth.append(c.refresh_depth())
return max(depth) + 1
def display(self):
if self.type == "function":
return( "(" + self.children[0].display() + self.funwrap.name + self.children[1].display() + ")")
elif self.type == "variable":
return (self.variable.name)
elif self.type == "constant":
return (self.const.name)
class Population:
def __init__(self, wrapper_list, variable_list, constant_list, score_function, \
goal="min", population=None, size=10, max_depth=10, \
max_generations=100, cross_rate=0.9, mutation_rate=0.1, new_birth_rate=0.6):
self.wrapper_list = wrapper_list
self.variable_list = variable_list
self.constant_list = constant_list
self.score_function = score_function
self.goal = goal
self.max_depth = max_depth
self.population = population or self._makepopulation(size)
self.size = size
self.max_generations = max_generations
self.cross_rate = cross_rate
self.mutation_rate = mutation_rate
self.new_birth_rate = new_birth_rate
self.best_tree = self.population[0]
for i in range(0, self.size):
self.population[i].depth=self.population[i].refresh_depth()
self.population[i].fitness = self.score_function(self.population[i])
if self.goal == "min":
if self.population[i].fitness < self.best_tree.fitness:
self.best_tree = self.population[i]
elif self.goal == "max":
if self.population[i].fitness > self.best_tree.fitness:
self.best_tree = self.population[i]
def _makepopulation(self, popsize):
return [self._maketree(0) for i in range(0, popsize)]
def _maketree(self, startdepth):
if startdepth == 0:
#make a new tree
nodepattern = 0#function
elif startdepth == self.max_depth:
nodepattern = 1#variable or constant
else:
nodepattern = randint(0, 1)
if nodepattern == 0:
childlist = []
selectedfun = randint(0, len(self.wrapper_list) - 1)
for i in range(0, self.wrapper_list[selectedfun].child_count):
child = self._maketree(startdepth + 1)
childlist.append(child)
return Node("function", childlist, self.wrapper_list[selectedfun])
else:
if randint(0, 1) == 0:#variable
selectedvariable = randint(0, len(self.variable_list) - 1)
return Node("variable", None, None, \
Variable(self.variable_list[selectedvariable]), None)
else:
selectedconstant = randint(0, len(self.constant_list) - 1)
return Node("constant", None, None, None,\
Constant(self.constant_list[selectedconstant]))
def mutate(self, tree, probchange=0.1, startdepth=0):
if random() < probchange:
return self._maketree(startdepth)
else:
result = deepcopy(tree)
if result.type == "function":
result.children = [self.mutate(c, probchange, startdepth + 1) \
for c in tree.children]
return result
def crossover(self, tree1, tree2, probswap=0.8, top=1):
if random() < probswap and not top:
return deepcopy(tree2)
else:
result = deepcopy(tree1)
if tree1.type == "function" and tree2.type == "function":
result.children = [self.crossover(c, choice(tree2.children),
probswap, 0) for c in tree1.children]
return result
def envolve(self, maxgen=100, crossrate=0.9, mutationrate=0.1):
for i in range(0, maxgen):
child = []
for j in range(0, int(self.size * self.new_birth_rate / 2)):
parent1, p1 = self.roulette_wheel_select()
parent2, p2 = self.roulette_wheel_select()
new_child = self.crossover(parent1, parent2)
child.append(new_child)#generate new tree
parent, p3 = self.roulette_wheel_select()
new_child = self.mutate(parent, mutationrate)
child.append(new_child)
#refresh all tree's fitness
for j in range(0, int(self.size * self.new_birth_rate)):
replacedtree, replacedindex = self.roulette_wheel_select(reverse=True)
#replace bad tree with child
self.population[replacedindex] = child[j]
for k in range(0, self.size):
self.population[k].fitness = self.score_function(self.population[k])
self.population[k].depth=self.population[k].refresh_depth()
if self.goal == "min":
if self.population[k].fitness < self.best_tree.fitness:
self.best_tree = self.population[k]
elif self.goal == "max":
if self.population[k].fitness > self.best_tree.fitness:
self.best_tree = self.population[k]
print("Generation #" + str(i) + ", best score=" + str(self.best_tree.fitness))
print(self.best_tree.display())
def gettoptree(self, choosebest=0.9, reverse=False):
if self.goal == "min":
self.population.sort()
elif self.goal == "max":
self.population.sort(reverse=True)
if reverse == False:
if random() < choosebest:
i = randint(0, self.size * self.new_birth_rate)
return self.population[i], i
else:
i = randint(self.size * self.new_birth_rate, self.size - 1)
return self.population[i], i
else:
if random() < choosebest:
i = self.size - randint(0, self.size * self.new_birth_rate) - 1
return self.population[i], i
else:
i = self.size - randint(self.size * self.new_birth_rate,\
self.size - 1)
return self.population[i], i
def roulette_wheel_select(self, reverse=False):
if reverse == False:
all_fitness = 0
for i in range(0, self.size):
all_fitness += self.population[i].fitness
random_num = random()*(self.size - 1)
check = 0
for i in range(0, self.size):
check += (1.0 - self.population[i].fitness / all_fitness)
if check >= random_num:
return self.population[i], i
if reverse == True:
all_fitness = 0
for i in range(0, self.size):
all_fitness += self.population[i].fitness
random_num = random()
check = 0
for i in range(0, self.size):
check += self.population[i].fitness * 1.0 / all_fitness
if check >= random_num:
return self.population[i], i
#############################################################
def add(args):
sum_total = 0
for val in args:
sum_total = sum_total + val
return sum_total
def sub(args):
return args[0] - args[1]
def mul(args):
return args[0] * args[1]
def div(args):
if args[1] == 0:
return 1
return args[0] / args[1]
add_wrapper = FunctionWrapper(add, 2, "+")
sub_wrapper = FunctionWrapper(sub, 2, "-")
mul_wrapper = FunctionWrapper(mul, 2, "*")
div_wrapper = FunctionWrapper(div, 2, "/")
# find the Iris data set
polyFile = os.path.dirname(os.path.realpath(__file__))
polyFile = os.path.abspath(polyFile + "../../datasets/simple-poly.csv")
# Read the Iris data set.
print('Reading CSV file: ' + polyFile)
norm = Normalize()
poly_work = norm.load_csv(polyFile)
norm.make_col_numeric(poly_work,0)
norm.make_col_numeric(poly_work,1)
# Prepare training data. Separate into input and ideal.
training = np.array(poly_work)
training_input = training[:, 0:1]
training_ideal = training[:, 1:2]
# Calculate the error with MSE.
def score_function(genome):
# Loop over the training set and calculate the output for each.
actual_output = []
for input_data in training_input:
genome.set_variable_value(["x"], input_data)
output_data = genome.eval()
actual_output.append([output_data])
result = ErrorCalculation.mse(np.array(actual_output), training_ideal)
return result
const_pool = [uniform(-10,10) for i in range(10)]
env = Population([add_wrapper, sub_wrapper, mul_wrapper, div_wrapper], ["x"],
const_pool, score_function)
env.envolve() | [
"random.choice",
"os.path.realpath",
"numpy.array",
"normalize.Normalize",
"copy.deepcopy",
"os.path.abspath",
"random.random",
"sys.path.append",
"random.randint"
] | [((3424, 3500), 'os.path.abspath', 'os.path.abspath', (["(aifh_dir + os.sep + '..' + os.sep + 'lib' + os.sep + 'aifh')"], {}), "(aifh_dir + os.sep + '..' + os.sep + 'lib' + os.sep + 'aifh')\n", (3439, 3500), False, 'import os\n'), ((3501, 3526), 'sys.path.append', 'sys.path.append', (['aifh_dir'], {}), '(aifh_dir)\n', (3516, 3526), False, 'import sys\n'), ((12421, 12481), 'os.path.abspath', 'os.path.abspath', (["(polyFile + '../../datasets/simple-poly.csv')"], {}), "(polyFile + '../../datasets/simple-poly.csv')\n", (12436, 12481), False, 'import os\n'), ((12555, 12566), 'normalize.Normalize', 'Normalize', ([], {}), '()\n', (12564, 12566), False, 'from normalize import Normalize\n'), ((12742, 12761), 'numpy.array', 'np.array', (['poly_work'], {}), '(poly_work)\n', (12750, 12761), True, 'import numpy as np\n'), ((3386, 3411), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (3401, 3411), False, 'import os\n'), ((12382, 12408), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (12398, 12408), False, 'import os\n'), ((13187, 13210), 'numpy.array', 'np.array', (['actual_output'], {}), '(actual_output)\n', (13195, 13210), True, 'import numpy as np\n'), ((8257, 8265), 'random.random', 'random', ([], {}), '()\n', (8263, 8265), False, 'from random import random, randint, choice\n'), ((8345, 8359), 'copy.deepcopy', 'deepcopy', (['tree'], {}), '(tree)\n', (8353, 8359), False, 'from copy import deepcopy\n'), ((8649, 8664), 'copy.deepcopy', 'deepcopy', (['tree2'], {}), '(tree2)\n', (8657, 8664), False, 'from copy import deepcopy\n'), ((8690, 8705), 'copy.deepcopy', 'deepcopy', (['tree1'], {}), '(tree1)\n', (8698, 8705), False, 'from copy import deepcopy\n'), ((11614, 11622), 'random.random', 'random', ([], {}), '()\n', (11620, 11622), False, 'from random import random, randint, choice\n'), ((7430, 7443), 'random.randint', 'randint', (['(0)', '(1)'], {}), '(0, 1)\n', (7437, 7443), False, 'from random import random, randint, choice\n'), ((7789, 7802), 'random.randint', 'randint', (['(0)', '(1)'], {}), '(0, 1)\n', (7796, 7802), False, 'from random import random, randint, choice\n'), ((8603, 8611), 'random.random', 'random', ([], {}), '()\n', (8609, 8611), False, 'from random import random, randint, choice\n'), ((10520, 10528), 'random.random', 'random', ([], {}), '()\n', (10526, 10528), False, 'from random import random, randint, choice\n'), ((10555, 10598), 'random.randint', 'randint', (['(0)', '(self.size * self.new_birth_rate)'], {}), '(0, self.size * self.new_birth_rate)\n', (10562, 10598), False, 'from random import random, randint, choice\n'), ((10660, 10715), 'random.randint', 'randint', (['(self.size * self.new_birth_rate)', '(self.size - 1)'], {}), '(self.size * self.new_birth_rate, self.size - 1)\n', (10667, 10715), False, 'from random import random, randint, choice\n'), ((10772, 10780), 'random.random', 'random', ([], {}), '()\n', (10778, 10780), False, 'from random import random, randint, choice\n'), ((11249, 11257), 'random.random', 'random', ([], {}), '()\n', (11255, 11257), False, 'from random import random, randint, choice\n'), ((10940, 10995), 'random.randint', 'randint', (['(self.size * self.new_birth_rate)', '(self.size - 1)'], {}), '(self.size * self.new_birth_rate, self.size - 1)\n', (10947, 10995), False, 'from random import random, randint, choice\n'), ((8815, 8837), 'random.choice', 'choice', (['tree2.children'], {}), '(tree2.children)\n', (8821, 8837), False, 'from random import random, randint, choice\n'), ((10819, 10862), 'random.randint', 'randint', (['(0)', '(self.size * self.new_birth_rate)'], {}), '(0, self.size * self.new_birth_rate)\n', (10826, 10862), False, 'from random import random, randint, choice\n')] |
# -*- coding: utf-8 -*-
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.stats import mode
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from xgboost import XGBRegressor
"""
Combine train and test data
Add one more column to the combined data to identify train and test data.
"""
def combine_datasets(train, test):
dataset_train['source'] = 'train'
dataset_test['source'] = 'test'
combined_data = pd.concat([dataset_train,dataset_test], ignore_index=True)
return combined_data;
"""
Print the summary of the given dataframe.
This method prints following summary details of the dataframe:
1: shape
2: Null values per column
"""
def print_dataset_summary(dataset):
print("\n\n<------ Summary for data --->")
print("\tShape of train data",dataset.shape)
print("\tPrinting null values per column :")
print(combined_data.apply(lambda x: sum(x.isnull())))
"""
Calculate mean square error.
"""
def calculate_mse(Y_pred, Y_actual):
# calculate MSE
mse = np.mean((Y_pred-Y_actual)**2)
return mse
def plot_residual_graph(Y_pred, Y_actual):
# Plot the graph and check the data pattern.
# residual plot
x_plot = plt.scatter(Y_pred, (Y_pred - Y_actual), c='b')
plt.hlines(y=0, xmin= -1000, xmax=5000)
plt.title('Residual plot')
def unique_val_categorical_col(categorical_columns):
for column in categorical_columns:
print("<--------- Column name: ",column," ----------->")
print(combined_data[column].value_counts())
def plot_categorical_features(df, categorical_columns):
print("Size of list: ",len(categorical_columns))
for column in categorical_columns:
df[column].value_counts().plot(kind="bar",title=column)
plt.show()
# Importing the dataset
dataset_train = pd.read_csv('data/train_av.csv')
dataset_test = pd.read_csv('data/test_av.csv')
combined_data = combine_datasets(dataset_train, dataset_test)
print_dataset_summary(dataset_train)
print_dataset_summary(dataset_test)
print_dataset_summary(combined_data)
#get categorical column
categorical_column = [x for x in combined_data.dtypes.index if combined_data.dtypes[x] == 'object']
print(categorical_column)
head = combined_data.head()
unique_val_categorical_col(categorical_column)
plot_categorical_features(combined_data, categorical_column)
# Missing value imputation.
combined_data["Gender"].fillna("Male",inplace=True)
combined_data["Married"].fillna("Yes",inplace=True)
combined_data["Credit_History"].fillna(1,inplace=True)
combined_data["Credit_History"].fillna(1,inplace=True)
combined_data['Dependents'] = combined_data['Dependents'].map({'3+':'3', '1' : '1', '0' : '0', '2' : '2'})
combined_data['Dependents'].fillna
combined_data["Credit_History"].value_counts()
combined_data["Dependents"].isnull().sum()
# step 10
# separate train and test data
# Remove Item_Outlet_Sales from test data
train = combined_data.loc[combined_data['source']=="train"]
test = combined_data.loc[combined_data['source']=="test"]
train.drop(['source'],axis=1,inplace=True)
# target variable name.
target = ''
IDcol = []
submissionCols = []
predictors = [x for x in train.columns if x not in [target]+IDcol]
X = train[predictors]
Y = train[target]
#split the data into train and test data. Cross validation.
X_train, X_test, Y_train , Y_test = train_test_split(X,Y,test_size = 0.2, random_state = 0)
'''
# Uncomment this code to use LinearRegression.
# Predict Values using LinearRegression Model
linear_regression = LinearRegression()
linear_regression.fit(X_train,Y_train)
Y_predict = linear_regression.predict(X_test)
print(calculate_mse(Y_predict, Y_test))
# calculate R-Squared Adjusted.
score = linear_regression.score(X_test,Y_test)
print(score)
# plot the graph.
plot_residual_graph(Y_predict, Y_test)
test[target] = linear_regression.predict(test[predictors])
# Linear model processing end here...
'''
'''
# Uncomment this code for using Polynomial Regression.
# Predict the values using Polynomial regression.
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree = 2)
X_train_poly = poly_reg.fit_transform(X_train)
poly_reg.fit(X_train_poly, Y_train)
polynomial_regression = LinearRegression()
polynomial_regression.fit(X_train_poly, Y_train)
X_test_poly = poly_reg.fit_transform(X_test)
Y_predict_poly = polynomial_regression.predict(X_test_poly)
# calculate MSE
print(calculate_mse(Y_predict_poly, Y_test))
# calculate R-Squared Adjusted.
score = polynomial_regression.score(X_test_poly,Y_test)
print(score)
plot_residual_graph(Y_predict_poly, Y_test)
# predict for test data.
test[target] = polynomial_regression.predict(poly_reg.fit_transform(test[predictors]))
# checking magnitude of coefficient.
coeff = polynomial_regression.coef_
print(max(coeff))
print(min(coeff))
print(sum(coeff)/len(coeff))
# Evaluating model performance using k-fold cross validation.
poly_regression_accuracies = cross_val_score(estimator = polynomial_regression, X = X_train_poly,
y = Y_train, cv = 10)
print(poly_regression_accuracies.mean())
print(poly_regression_accuracies.std())
# Polynomical regression processing ends here.
'''
'''
# Uncomment this code to use RidgeRegression.
# Training the model using Ridge Regression.
ridge_regression = Ridge(normalize = True)
ridge_regression.fit(X_train, Y_train)
Y_pred_ridge = ridge_regression.predict(X_test)
print(calculate_mse(Y_pred_ridge, Y_test))
ridge_regression.score(X_test, Y_test)
ridge_coeff = ridge_regression.coef_
print(max(ridge_coeff))
print(min(ridge_coeff))
print(sum(ridge_coeff)/len(ridge_coeff))
# Evaluting model performance using k-folde cross validation.
ridge_accuracies = cross_val_score(estimator = ridge_regression, X = X_train,
y = Y_train, cv = 10)
print(ridge_accuracies.mean())
print(ridge_accuracies.std())
# Applying Grid Search to find the best model and the best parameters
alphas = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
fit_interceptOptions = ([True, False])
solverOptions = (['svd', 'cholesky', 'sparse_cg', 'sag'])
parameters = dict(alpha=alphas, fit_intercept=fit_interceptOptions, solver=solverOptions)
grid_search = GridSearchCV(estimator = ridge_regression,
param_grid = parameters)
grid_search = grid_search.fit(X_train, Y_train)
best_accuracy = grid_search.best_score_
best_parameters = grid_search.best_params_
test[target] = grid_search.predict(test[predictors])
# Ridge regression ends here..
'''
# Method to evaludate model performance
def evaluate_model_performance(model, x_train, y_train, x_test, y_test):
model.fit(x_train, y_train)
y_predict = model.predict(x_test)
print("Mean Square Error: ",calculate_mse(y_predict, y_test))
accurracy = cross_val_score(estimator = model, X = x_train, y = y_train, cv = 10)
print("Accurracy Mean: ", accurracy.mean())
print("Accurracy Std : ", accurracy.std())
# Training model using XGBoost.
xgbRegressor = XGBRegressor()
evaluate_model_performance(xgbRegressor, X_train, Y_train, X_test, Y_test)
'''
Performance of above model :
Mean Square Error: 1186173.7950376957
Accurracy Mean: 0.594592170829
Accurracy Std : 0.019906704365
'''
test[target] = xgbRegressor.predict(test[predictors])
submission = test[IDcol]
submission[target] = test[target]
submission.to_csv(DIR+"/ridge_regression.csv", index=False) | [
"numpy.mean",
"pandas.read_csv",
"matplotlib.pyplot.show",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.hlines",
"xgboost.XGBRegressor",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title",
"pandas.concat",
"sklearn.model_selection.cross_val_score"
] | [((2098, 2130), 'pandas.read_csv', 'pd.read_csv', (['"""data/train_av.csv"""'], {}), "('data/train_av.csv')\n", (2109, 2130), True, 'import pandas as pd\n'), ((2146, 2177), 'pandas.read_csv', 'pd.read_csv', (['"""data/test_av.csv"""'], {}), "('data/test_av.csv')\n", (2157, 2177), True, 'import pandas as pd\n'), ((3687, 3740), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.2)', 'random_state': '(0)'}), '(X, Y, test_size=0.2, random_state=0)\n', (3703, 3740), False, 'from sklearn.model_selection import train_test_split\n'), ((7425, 7439), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {}), '()\n', (7437, 7439), False, 'from xgboost import XGBRegressor\n'), ((716, 775), 'pandas.concat', 'pd.concat', (['[dataset_train, dataset_test]'], {'ignore_index': '(True)'}), '([dataset_train, dataset_test], ignore_index=True)\n', (725, 775), True, 'import pandas as pd\n'), ((1311, 1344), 'numpy.mean', 'np.mean', (['((Y_pred - Y_actual) ** 2)'], {}), '((Y_pred - Y_actual) ** 2)\n', (1318, 1344), True, 'import numpy as np\n'), ((1487, 1532), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Y_pred', '(Y_pred - Y_actual)'], {'c': '"""b"""'}), "(Y_pred, Y_pred - Y_actual, c='b')\n", (1498, 1532), True, 'import matplotlib.pyplot as plt\n'), ((1539, 1577), 'matplotlib.pyplot.hlines', 'plt.hlines', ([], {'y': '(0)', 'xmin': '(-1000)', 'xmax': '(5000)'}), '(y=0, xmin=-1000, xmax=5000)\n', (1549, 1577), True, 'import matplotlib.pyplot as plt\n'), ((1583, 1609), 'matplotlib.pyplot.title', 'plt.title', (['"""Residual plot"""'], {}), "('Residual plot')\n", (1592, 1609), True, 'import matplotlib.pyplot as plt\n'), ((7212, 7273), 'sklearn.model_selection.cross_val_score', 'cross_val_score', ([], {'estimator': 'model', 'X': 'x_train', 'y': 'y_train', 'cv': '(10)'}), '(estimator=model, X=x_train, y=y_train, cv=10)\n', (7227, 7273), False, 'from sklearn.model_selection import cross_val_score\n'), ((2046, 2056), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2054, 2056), True, 'import matplotlib.pyplot as plt\n')] |
import sys
import string
import numpy as np
import astropy.units as u
from astropy.table import Column, Table
from astropy.io import ascii
from astropy.coordinates import SkyCoord
from astroquery.simbad import Simbad
from astroquery.esasky import ESASky
from astroquery.gaia import Gaia
star = sys.argv[1]
#star = 'HD128620'
#tables = Gaia.load_tables(only_names=True)
##get data from simbad
#print('star: ' + star)
#check all votable simbad fields: Simbad.list_votable_fields()
Simbad.add_votable_fields('pm', 'plx','rv_value','rvz_error','flux(V)', 'flux_error(V)')
result_table = Simbad.query_object(star)
ra0 = [float(x) for x in result_table['RA'][0].split()]
ra1 = (ra0[0] + ra0[1]/60 + ra0[2]/3600) * 360/24 #deg
dec0 = [float(x) for x in result_table['DEC'][0].split()]
dec1 = dec0[0] + dec0[1]/60 + dec0[2]/3600 #deg
pmra = result_table['PMRA'][0]
pmdec = result_table['PMDEC'][0]
##convert to Gaia epoch
#dec = dec1 + 15.5*pmdec/3600000 #deg
#ra = ra1 + (15.5*pmra/3600000)/np.cos((dec + dec1)*np.pi/360) #deg
dec = dec1
ra = ra1
rv = result_table['RV_VALUE'][0]
erv = result_table['RVZ_ERROR'][0]
plx = result_table['PLX_VALUE'][0]
##get data from Gaia
##get source id
r = Simbad.query_objectids(star)
g = [x for x in r if 'Gaia DR2' in str(x)]
h = [x for x in r if 'HIP' in str(x)]
if len(h) == 0:
sys.exit("No Hipparcos source found to match the target!")
h1 = h[0][0].split()[-1]
hid = int(h1)
if len(g) == 0:
query = "SELECT * FROM gaiadr2.gaia_source " + \
"WHERE CONTAINS(POINT('ICRS',gaiadr2.gaia_source.ra,gaiadr2.gaia_source.dec),CIRCLE('ICRS',{},{},0.1))=1 ".format(ra, dec) + \
"AND gaiadr2.gaia_source.parallax/{}<1.1 ".format(plx) + \
"AND gaiadr2.gaia_source.parallax/{}>0.9;".format(plx)
else:
g1 = str(g).split()[-1]
gid = int(g1.split(']')[0])
query = "select * from gaiadr2.gaia_source where source_id={}".format(gid)
job = Gaia.launch_job(query=query)
out = job.get_results()
N = np.shape(out)[0]
if N == 0:
N = 1
hh = Column(name='HIP', data=[hid]*N)
#ra = Column(name='ra', data=[ra1]*N)
#dec = Column(name='dec', data=[dec1]*N)
#pmra = Column(name='pmra', data=[pmra]*N)
#pmdec = Column(name='pmdec', data=[pmdec]*N)
#plx = Column(name='parallax', data=[plx]*N)
#rv = Column(name='rv', data=[rv]*N)
#erv = Column(name='erv', data=[erv]*N)
fout = star + '_gaia_hip.csv'
v = Column(name='RV', data=[rv]*N)
ev = Column(name='eRV', data=[erv]*N)
if len(out) > 0:
out.add_columns([hh, v, ev])
ascii.write(out, fout, format='csv', overwrite=True)
else:
out = np.array([hid, ra, dec, plx, pmra, pmdec, rv, erv]).transpose()
ascii.write(out, fout,
format='csv',
overwrite=True,
names=['HIP','ra','dec','parallax','pmra','pmdec','radial_velocity','radial_velocity_error']
)
| [
"numpy.shape",
"astropy.io.ascii.write",
"astroquery.simbad.Simbad.query_object",
"astroquery.gaia.Gaia.launch_job",
"numpy.array",
"astropy.table.Column",
"sys.exit",
"astroquery.simbad.Simbad.query_objectids",
"astroquery.simbad.Simbad.add_votable_fields"
] | [((484, 579), 'astroquery.simbad.Simbad.add_votable_fields', 'Simbad.add_votable_fields', (['"""pm"""', '"""plx"""', '"""rv_value"""', '"""rvz_error"""', '"""flux(V)"""', '"""flux_error(V)"""'], {}), "('pm', 'plx', 'rv_value', 'rvz_error', 'flux(V)',\n 'flux_error(V)')\n", (509, 579), False, 'from astroquery.simbad import Simbad\n'), ((588, 613), 'astroquery.simbad.Simbad.query_object', 'Simbad.query_object', (['star'], {}), '(star)\n', (607, 613), False, 'from astroquery.simbad import Simbad\n'), ((1197, 1225), 'astroquery.simbad.Simbad.query_objectids', 'Simbad.query_objectids', (['star'], {}), '(star)\n', (1219, 1225), False, 'from astroquery.simbad import Simbad\n'), ((1914, 1942), 'astroquery.gaia.Gaia.launch_job', 'Gaia.launch_job', ([], {'query': 'query'}), '(query=query)\n', (1929, 1942), False, 'from astroquery.gaia import Gaia\n'), ((2015, 2049), 'astropy.table.Column', 'Column', ([], {'name': '"""HIP"""', 'data': '([hid] * N)'}), "(name='HIP', data=[hid] * N)\n", (2021, 2049), False, 'from astropy.table import Column, Table\n'), ((2376, 2408), 'astropy.table.Column', 'Column', ([], {'name': '"""RV"""', 'data': '([rv] * N)'}), "(name='RV', data=[rv] * N)\n", (2382, 2408), False, 'from astropy.table import Column, Table\n'), ((2413, 2447), 'astropy.table.Column', 'Column', ([], {'name': '"""eRV"""', 'data': '([erv] * N)'}), "(name='eRV', data=[erv] * N)\n", (2419, 2447), False, 'from astropy.table import Column, Table\n'), ((1328, 1386), 'sys.exit', 'sys.exit', (['"""No Hipparcos source found to match the target!"""'], {}), "('No Hipparcos source found to match the target!')\n", (1336, 1386), False, 'import sys\n'), ((1971, 1984), 'numpy.shape', 'np.shape', (['out'], {}), '(out)\n', (1979, 1984), True, 'import numpy as np\n'), ((2500, 2552), 'astropy.io.ascii.write', 'ascii.write', (['out', 'fout'], {'format': '"""csv"""', 'overwrite': '(True)'}), "(out, fout, format='csv', overwrite=True)\n", (2511, 2552), False, 'from astropy.io import ascii\n'), ((2637, 2798), 'astropy.io.ascii.write', 'ascii.write', (['out', 'fout'], {'format': '"""csv"""', 'overwrite': '(True)', 'names': "['HIP', 'ra', 'dec', 'parallax', 'pmra', 'pmdec', 'radial_velocity',\n 'radial_velocity_error']"}), "(out, fout, format='csv', overwrite=True, names=['HIP', 'ra',\n 'dec', 'parallax', 'pmra', 'pmdec', 'radial_velocity',\n 'radial_velocity_error'])\n", (2648, 2798), False, 'from astropy.io import ascii\n'), ((2569, 2620), 'numpy.array', 'np.array', (['[hid, ra, dec, plx, pmra, pmdec, rv, erv]'], {}), '([hid, ra, dec, plx, pmra, pmdec, rv, erv])\n', (2577, 2620), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""lhs_opt.py: Module to generate design matrix from an optimized
Latin Hypercube design
"""
import numpy as np
from . import lhs
__author__ = "<NAME>"
def create_ese(n: int, d: int, seed: int, max_outer: int,
obj_function: str="w2_discrepancy",
threshold_init: float=0,
num_exchanges: int=0,
max_inner: int = 0,
improving_params: list = [0.1, 0.8],
exploring_params: list = [0.1, 0.8, 0.9, 0.7]) -> np.ndarray:
"""Generate an optimized LHS using Enhanced Stochastic Evolutionary Alg.
The default parameters of the optimization can be overridden, if necessary.
:param n: the number of samples
:param d: the number of dimension
:param seed: the random seed number
:param max_outer: the maximum number of outer iterations
:param obj_function: the objective function to optimize
:param threshold_init: the initial threshold
:param num_exchanges: the number of candidates in perturbation step
:param max_inner: the maximum number of inner iterations
:param improving_params: the 2 parameters used in improve process
(a) the cut-off value to decrease the threshold
(b) the multiplier to decrease or increase the threshold
:param exploring_params: the 4 parameters used in explore process
(a) the cut-off value of acceptance, start increasing the threshold
(b) the cut-off value of acceptance, start decreasing the threshold
(c) the cooling multiplier for the threshold
(d) the warming multiplier for the threshold
"""
from .opt_alg.stochastic_evolutionary import optimize
# If dimension is less than 2, abort optimization
if d < 2:
raise ValueError("Dimension less than 2, optimization irrelevant!")
if seed is not None:
np.random.seed(seed)
# Create initial LHD sample
dm = lhs.create(n, d, seed=seed)
# Optimize the LHD sample
dm_opt = optimize(dm, obj_function, threshold_init, num_exchanges,
max_inner, max_outer, improving_params, exploring_params)
return dm_opt.dm_best
| [
"numpy.random.seed"
] | [((1869, 1889), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1883, 1889), True, 'import numpy as np\n')] |
"""
Created on Thu Apr 9
@author: nrw
This plots residuals,
And also takes shelved torque data, adds in torque estimate and residual data
And writes it all to a CSV
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import plotly.plotly as py
import plotly.offline as po
import plotly.graph_objs as go
from plotly import tools
from sklearn import linear_model
from sklearn.linear_model import Ridge
from sklearn import metrics
import shelve
with shelve.open('calculated_data', 'r') as shelf:
BigTheta = shelf['BigTheta']
BigTorque = shelf['BigTorque']
BigForce = shelf['BigForce']
BigPosition = shelf['BigPosition']
#path = "~/Documents/projects_Spring2018/howe299r/Experiments/03April2018/WIP/"
IMUCols = ['timeSysCal', 'XYZ','X', 'Y', 'Z']
#===============================================
#### DECLARE CONSTANTS ####
#===============================================
print('number of datapoints', BigTheta.shape)
#### CALCULATE K ####
#===============================================
#### FIT TO ESTIMATE K ####
#===============================================
## Note: For the IMU, orientation.Y is pitch; X is roll; Z is yaw
torq_names = ['x', 'y', 'z']
dim = 1
#torq_1d = BigTorque[:,dim]
torq = BigTorque
theta = BigTheta
print('torq shape', torq.shape)
myX = BigTheta#theta_1Dreshape(-1,1)
myy = torq
regr= Ridge(fit_intercept=True, alpha=1.0, random_state=0, normalize=True)
regr2 = linear_model.LinearRegression()
regr.fit(myX, myy)
regr2.fit(myX, myy)
K = regr.coef_
K2 = regr2.coef_
yPred= regr.predict(myX)
yPred2= regr2.predict(myX)
print('\n======================')
matK = np.linalg.lstsq(BigTorque, BigTheta, rcond=None)[0]
print(matK.shape)
print('Numpy linalg.lstsq() K coefficients:\n', matK)
print('LinReg K Coefficients: \n', K2)
print('Ridge K Coefficients: \n', K)
print('\n======================')
torq_est = np.dot(K2, theta.T).T #n.3
resid = torq - yPred
mse = (resid ** 2).mean(axis=0)
print('resid shape', resid.shape)
print('RMSE Per Torque Dim', np.sqrt(mse))
#print('Variance score (ideal 1): %.2f' % r2_score(thetaY))
print('\n======= SkLearn Metrics====')
print('\n---- Using LinReg K dot theta. This has worse error as we have no intercept term. ===')
print('Mean Absolute Error: %0.02f' % metrics.mean_absolute_error(torq, torq_est))
print('Mean Squared Error: %0.02f' % metrics.mean_squared_error(torq, torq_est) )
print('Root Mean Squared Error %0.02f' % np.sqrt(metrics.mean_squared_error(torq, torq_est)))
print('\n---- Using sklearn LinearRegression.pred(theta). ========')
print('Mean Absolute Error: %0.02f:' % metrics.mean_absolute_error(torq, yPred2)),
print('Mean Squared Error: %0.02f' % metrics.mean_squared_error(torq, yPred2) )
print('Root Mean Squared Error: %0.02f' % np.sqrt(metrics.mean_squared_error(torq, yPred2)))
print('\n---- Using sklearn Ridge.pred(theta). ========')
print('Mean Absolute Error: %0.02f' % metrics.mean_absolute_error(torq, yPred))
print('Mean Squared Error: %0.02f' % metrics.mean_squared_error(torq, yPred) )
print('Root Mean Squared Error: %0.02f' % np.sqrt(metrics.mean_squared_error(torq, yPred)))
print('\n --- LinRegr has the best fit ----')
print('\nNote: torques about y axis: Min', myy.min(), '; Max', myy.max(), 'grams * cm')
print('\n======================')
'''
#===============================================
#### PLOT: Residuals (of Y torque_est - torque) vs Torque_est (either Y or X axis)
#===============================================
print(resid)
print(resid.shape)
names = ['X', 'Y', 'Z']
param = 'Torque'
dim = 1
xplot = torq_est[:,dim]
xplot2 = torq[:,dim]
print(xplot.shape)
yplot = resid[:,dim]
print(yplot.shape)
trace0 = go.Scatter( x = xplot, y = yplot, mode = 'markers',
name = '%s-axis %s estimated'%(names[dim], param))
trace1 = go.Scatter( x = xplot2, y = yplot, mode = 'markers',
name = '%s-axis %s calculated from data'%(names[dim], param))
data = [trace0]
layout = go.Layout(
title='%s-axis %s: Resid vs Estimate (with 3x3 K, using SkLearn LinReg) (IMU data)' % (names[dim], param),
yaxis=dict(title= 'resid (g cm)'),
xaxis=dict(title='%s (g cm)' % param),
legend=dict(x=.5, y=0.1) )
fig = tools.make_subplots(rows=2, cols=1, subplot_titles=(trace1.name, trace0.name))
fig.append_trace(trace0, 1,1)
fig.append_trace(trace1, 2,1)
fig['layout'].update(title = layout.title)
fig['layout']['xaxis2'].update(title=layout.xaxis['title'])
fig['layout']['yaxis1'].update(title = layout.yaxis['title'])
fig['layout']['yaxis2'].update(title = layout.yaxis['title'])
#fig = go.Figure(data=data, layout=layout)
po.plot(fig)
'''
#===============================================
#### PLOT: Residuals (of Y torque_est - torque) vs Force (Z only)
#===============================================
print(resid.shape)
names = ['X', 'Y', 'Z']
param = 'Torque'
x2param = 'Force'
dim = 0
xplot = torq_est[:,dim]
xplot2 = BigForce[:,2]
yplot = resid[:,dim]
trace0 = go.Scatter( x = xplot, y = yplot, mode = 'markers',
name = 'resid_torqY vs %s-axis %s estimated'%(names[dim], param))
trace1 = go.Scatter( x = xplot2, y = yplot, mode = 'markers',
name = 'resid_torqY vs Resid vs Z-axis Force, as applied')
#data = [trace0]
overall_title='%s-axis %s: Resid vs Force applied (with 3x3 K, using SkLearn LinReg) (IMU data)' % \
(names[dim], param) + '<br>K: ' + np.array_str(K, precision=2) + '<br>'
yaxistitle= 'resid (g cm)'
xaxistitle= 'force (g)'
layout = go.Layout(
title = overall_title,
legend=dict(x=.5, y=0.1) )
fig = tools.make_subplots(rows=2, cols=1, subplot_titles=(trace0.name, trace1.name))
fig.append_trace(trace0, 1,1)
fig.append_trace(trace1, 2,1)
fig['layout'].update(title=overall_title, showlegend=False)
fig['layout']['xaxis1'].update(title='%s torque est (g cm)' % (names[dim]))
fig['layout']['xaxis2'].update(title=xaxistitle)
fig['layout']['yaxis1'].update(title=yaxistitle)
fig['layout']['yaxis2'].update(title=yaxistitle)
#fig = go.Figure(data=data, layout=layout)
#po.plot(fig)
full_data = np.hstack((BigPosition, BigForce, BigTheta, BigTorque))
full_data = np.hstack((full_data, torq_est, resid))
print(torq_est.shape)
print(resid.shape)
np.savetxt("full_calculated_data.csv", full_data, delimiter=",", fmt='%0.02f')
with shelve.open('calculated_data2', 'c') as shelf:
shelf['torq_est'] = torq_est
shelf['resid'] = resid
shelf['K'] = K
| [
"plotly.tools.make_subplots",
"numpy.sqrt",
"numpy.hstack",
"numpy.array_str",
"sklearn.linear_model.Ridge",
"plotly.graph_objs.Scatter",
"sklearn.metrics.mean_squared_error",
"numpy.dot",
"shelve.open",
"numpy.savetxt",
"numpy.linalg.lstsq",
"sklearn.metrics.mean_absolute_error",
"sklearn.l... | [((1372, 1440), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'fit_intercept': '(True)', 'alpha': '(1.0)', 'random_state': '(0)', 'normalize': '(True)'}), '(fit_intercept=True, alpha=1.0, random_state=0, normalize=True)\n', (1377, 1440), False, 'from sklearn.linear_model import Ridge\n'), ((1449, 1480), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (1478, 1480), False, 'from sklearn import linear_model\n'), ((4971, 5086), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': 'xplot', 'y': 'yplot', 'mode': '"""markers"""', 'name': "('resid_torqY vs %s-axis %s estimated' % (names[dim], param))"}), "(x=xplot, y=yplot, mode='markers', name=\n 'resid_torqY vs %s-axis %s estimated' % (names[dim], param))\n", (4981, 5086), True, 'import plotly.graph_objs as go\n'), ((5103, 5210), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': 'xplot2', 'y': 'yplot', 'mode': '"""markers"""', 'name': '"""resid_torqY vs Resid vs Z-axis Force, as applied"""'}), "(x=xplot2, y=yplot, mode='markers', name=\n 'resid_torqY vs Resid vs Z-axis Force, as applied')\n", (5113, 5210), True, 'import plotly.graph_objs as go\n'), ((5550, 5628), 'plotly.tools.make_subplots', 'tools.make_subplots', ([], {'rows': '(2)', 'cols': '(1)', 'subplot_titles': '(trace0.name, trace1.name)'}), '(rows=2, cols=1, subplot_titles=(trace0.name, trace1.name))\n', (5569, 5628), False, 'from plotly import tools\n'), ((6046, 6101), 'numpy.hstack', 'np.hstack', (['(BigPosition, BigForce, BigTheta, BigTorque)'], {}), '((BigPosition, BigForce, BigTheta, BigTorque))\n', (6055, 6101), True, 'import numpy as np\n'), ((6117, 6156), 'numpy.hstack', 'np.hstack', (['(full_data, torq_est, resid)'], {}), '((full_data, torq_est, resid))\n', (6126, 6156), True, 'import numpy as np\n'), ((6198, 6276), 'numpy.savetxt', 'np.savetxt', (['"""full_calculated_data.csv"""', 'full_data'], {'delimiter': '""","""', 'fmt': '"""%0.02f"""'}), "('full_calculated_data.csv', full_data, delimiter=',', fmt='%0.02f')\n", (6208, 6276), True, 'import numpy as np\n'), ((476, 511), 'shelve.open', 'shelve.open', (['"""calculated_data"""', '"""r"""'], {}), "('calculated_data', 'r')\n", (487, 511), False, 'import shelve\n'), ((1648, 1696), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['BigTorque', 'BigTheta'], {'rcond': 'None'}), '(BigTorque, BigTheta, rcond=None)\n', (1663, 1696), True, 'import numpy as np\n'), ((1894, 1913), 'numpy.dot', 'np.dot', (['K2', 'theta.T'], {}), '(K2, theta.T)\n', (1900, 1913), True, 'import numpy as np\n'), ((2037, 2049), 'numpy.sqrt', 'np.sqrt', (['mse'], {}), '(mse)\n', (2044, 2049), True, 'import numpy as np\n'), ((6283, 6319), 'shelve.open', 'shelve.open', (['"""calculated_data2"""', '"""c"""'], {}), "('calculated_data2', 'c')\n", (6294, 6319), False, 'import shelve\n'), ((2287, 2330), 'sklearn.metrics.mean_absolute_error', 'metrics.mean_absolute_error', (['torq', 'torq_est'], {}), '(torq, torq_est)\n', (2314, 2330), False, 'from sklearn import metrics\n'), ((2370, 2412), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['torq', 'torq_est'], {}), '(torq, torq_est)\n', (2396, 2412), False, 'from sklearn import metrics\n'), ((2702, 2742), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['torq', 'yPred2'], {}), '(torq, yPred2)\n', (2728, 2742), False, 'from sklearn import metrics\n'), ((2938, 2978), 'sklearn.metrics.mean_absolute_error', 'metrics.mean_absolute_error', (['torq', 'yPred'], {}), '(torq, yPred)\n', (2965, 2978), False, 'from sklearn import metrics\n'), ((3019, 3058), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['torq', 'yPred'], {}), '(torq, yPred)\n', (3045, 3058), False, 'from sklearn import metrics\n'), ((5374, 5402), 'numpy.array_str', 'np.array_str', (['K'], {'precision': '(2)'}), '(K, precision=2)\n', (5386, 5402), True, 'import numpy as np\n'), ((2465, 2507), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['torq', 'torq_est'], {}), '(torq, torq_est)\n', (2491, 2507), False, 'from sklearn import metrics\n'), ((2621, 2662), 'sklearn.metrics.mean_absolute_error', 'metrics.mean_absolute_error', (['torq', 'yPred2'], {}), '(torq, yPred2)\n', (2648, 2662), False, 'from sklearn import metrics\n'), ((2796, 2836), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['torq', 'yPred2'], {}), '(torq, yPred2)\n', (2822, 2836), False, 'from sklearn import metrics\n'), ((3112, 3151), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['torq', 'yPred'], {}), '(torq, yPred)\n', (3138, 3151), False, 'from sklearn import metrics\n')] |
import numpy as np
import random
from collections import defaultdict
class Agent:
def __init__(self, nA=6):
""" Initialize agent.
Params
======
- nA: number of actions available to the agent
"""
self.nA = nA
self.Q = defaultdict(lambda: np.zeros(self.nA))
self.episodes = 1
self.gamma = 0.77
self.alpha = 0.25
self.epsilon = 0.01
self.eps_decay = 0.9
def get_epsilon_greedy_action(self, state):
if random.random() > self.epsilon:
return np.argmax(self.Q[state])
else:
return random.choice(np.arange(self.nA))
def select_action(self, state):
""" Given the state, select an action.
Params
======
- state: the current state of the environment
Returns
=======
- action: an integer, compatible with the task's action space
"""
return self.get_epsilon_greedy_action(state)
def curr_func(self, state, action):
# for sarsa learning
#return self.Q[state][action]
# for Q learning
return max(self.Q[state])
def __update(self, state, action, reward, next_state, next_action):
Qsa_next = self.curr_func(next_state, next_action) if next_action is not None else 0.0
Qsa_current = self.Q[state][action]
target = reward + (self.gamma * Qsa_next)
return Qsa_current + self.alpha*(target - Qsa_current)
def step(self, state, action, reward, next_state, done):
""" Update the agent's knowledge, using the most recently sampled tuple.
Params
======
- state: the previous state of the environment
- action: the agent's previous choice of action
- reward: last reward received
- next_state: the current state of the environment
- done: whether the episode is complete (True or False)
"""
next_action = self.get_epsilon_greedy_action(next_state) if not done else None
self.Q[state][action] = self.__update(state, action, reward, next_state, next_action)
# after all updates, update episode
if done:
self.epsilon = self.epsilon*self.eps_decay | [
"random.random",
"numpy.argmax",
"numpy.zeros",
"numpy.arange"
] | [((515, 530), 'random.random', 'random.random', ([], {}), '()\n', (528, 530), False, 'import random\n'), ((566, 590), 'numpy.argmax', 'np.argmax', (['self.Q[state]'], {}), '(self.Q[state])\n', (575, 590), True, 'import numpy as np\n'), ((301, 318), 'numpy.zeros', 'np.zeros', (['self.nA'], {}), '(self.nA)\n', (309, 318), True, 'import numpy as np\n'), ((638, 656), 'numpy.arange', 'np.arange', (['self.nA'], {}), '(self.nA)\n', (647, 656), True, 'import numpy as np\n')] |
# -*- coding: UTF-8 -*-
"""
图像分类模型的训练主体
"""
import paddle.fluid as fluid
import numpy as np
import paddle
import reader
import os
import utils
import config
from ma_convcardseresnext import Ma_ConvCardSeResNeXt
def build_optimizer(parameter_list=None):
"""
构建优化器
:return:
"""
epoch = config.train_parameters["num_epochs"]
batch_size = config.train_parameters["train_batch_size"]
iters = config.train_parameters["train_image_count"] // batch_size
learning_strategy = config.train_parameters['sgd_strategy']
lr = learning_strategy['learning_rate']
boundaries = [int(epoch * i * iters) for i in learning_strategy["lr_epochs"]]
values = [i * lr for i in learning_strategy["lr_decay"]]
# utils.logger.info("use Adam optimizer, learning rate boundaries: {} values: {}".format(boundaries, values))
optimizer = fluid.optimizer.SGDOptimizer(learning_rate=fluid.layers.piecewise_decay(boundaries, values),
regularization=fluid.regularizer.L2Decay(0.00005),
parameter_list=parameter_list)
utils.logger.info("use Adam optimizer")
# optimizer = fluid.optimizer.Adam(learning_rate=0.001)
return optimizer
def load_params(model, optimizer):
"""
加载模型参数
:param model:
:return:
"""
if config.train_parameters["continue_train"] and os.path.exists(config.train_parameters['save_model_dir']+'.pdparams'):
utils.logger.info("load params from {}".format(config.train_parameters['save_model_dir']))
# params, _ = fluid.dygraph.load_persistables(config.train_parameters['save_model_dir'])
para_dict, opti_dict = fluid.dygraph.load_dygraph(config.train_parameters['save_model_dir'])
model.set_dict(para_dict)
if config.train_parameters["continue_train"] and os.path.exists(config.train_parameters['save_model_dir']+'.pdopt'):
optimizer.set_dict(opti_dict)
# model.load_dict(params)
def train():
"""
训练主体
:return:
"""
# 会自动根据当前 paddle 是CPU版本还是GPU版本选择运行硬件
# 如果是 GPU,默认使用第 0 块
# 如果希望指定使用,需要主动传入 place 变量,或者通过设置 CUDA_VISIBLE_DEVICES 环境变量控制可见显卡
utils.logger.info("start train")
with fluid.dygraph.guard():
epoch_num = config.train_parameters["num_epochs"]
# mobilenet = net(1.0, config.train_parameters['class_dim'])
net = Ma_ConvCardSeResNeXt(config.train_parameters['class_dim'])
optimizer = build_optimizer(parameter_list=net.parameters())
load_params(net, optimizer)
file_list = os.path.join(config.train_parameters['data_dir'], config.train_parameters['train_file_list'])
custom_reader = reader.custom_image_reader(file_list, config.train_parameters['data_dir'], mode='train')
train_reader = paddle.batch(custom_reader,
batch_size=config.train_parameters['train_batch_size'],
drop_last=True)
current_acc = 0.0
to_save_stat_dict = None
for current_epoch in range(epoch_num):
epoch_acc = 0.0
batch_count = 0
for batch_id, data in enumerate(train_reader()):
dy_x_data = np.array([x[0] for x in data]).astype('float32')
y_data = np.array([[x[1]] for x in data]).astype('int')
img = fluid.dygraph.to_variable(dy_x_data)
label = fluid.dygraph.to_variable(y_data)
label.stop_gradient = True
out, acc = net(img, label)
softmax_out = fluid.layers.softmax(out, use_cudnn=False)
loss = fluid.layers.cross_entropy(softmax_out, label)
avg_loss = fluid.layers.mean(loss)
# 通过这句话求出整个网络,所有参数的梯度
avg_loss.backward()
# 在优化器的指导下,每个参数根据自身梯度进行更新
optimizer.minimize(avg_loss)
net.clear_gradients()
batch_count += 1
epoch_acc += acc.numpy()
if batch_id % 5 == 0 and batch_id != 0:
utils.logger.info("loss at epoch {} step {}: {}, acc: {}"
.format(current_epoch, batch_id, avg_loss.numpy(), acc.numpy()))
epoch_acc /= batch_count
utils.logger.info("epoch {} acc: {}".format(current_epoch, epoch_acc))
if epoch_acc >= current_acc:
utils.logger.info("current epoch {} acc: {} better than last acc: {}, save model"
.format(current_epoch, epoch_acc, current_acc))
current_acc = epoch_acc
fluid.dygraph.save_dygraph(net.state_dict(), config.train_parameters['save_model_dir'])
fluid.dygraph.save_dygraph(optimizer.state_dict(), config.train_parameters['save_model_dir'])
utils.logger.info("train till end")
# for k, v in to_save_stat_dict.items():
# utils.logger.info("key:{} value:{}".format(k, v.numpy()))
if __name__ == "__main__":
train()
| [
"os.path.exists",
"paddle.fluid.dygraph.load_dygraph",
"paddle.fluid.dygraph.guard",
"paddle.fluid.dygraph.to_variable",
"paddle.fluid.layers.softmax",
"os.path.join",
"paddle.fluid.layers.cross_entropy",
"ma_convcardseresnext.Ma_ConvCardSeResNeXt",
"reader.custom_image_reader",
"paddle.fluid.laye... | [((1127, 1166), 'utils.logger.info', 'utils.logger.info', (['"""use Adam optimizer"""'], {}), "('use Adam optimizer')\n", (1144, 1166), False, 'import utils\n'), ((2183, 2215), 'utils.logger.info', 'utils.logger.info', (['"""start train"""'], {}), "('start train')\n", (2200, 2215), False, 'import utils\n'), ((1396, 1467), 'os.path.exists', 'os.path.exists', (["(config.train_parameters['save_model_dir'] + '.pdparams')"], {}), "(config.train_parameters['save_model_dir'] + '.pdparams')\n", (1410, 1467), False, 'import os\n'), ((1694, 1763), 'paddle.fluid.dygraph.load_dygraph', 'fluid.dygraph.load_dygraph', (["config.train_parameters['save_model_dir']"], {}), "(config.train_parameters['save_model_dir'])\n", (1720, 1763), True, 'import paddle.fluid as fluid\n'), ((1851, 1919), 'os.path.exists', 'os.path.exists', (["(config.train_parameters['save_model_dir'] + '.pdopt')"], {}), "(config.train_parameters['save_model_dir'] + '.pdopt')\n", (1865, 1919), False, 'import os\n'), ((2225, 2246), 'paddle.fluid.dygraph.guard', 'fluid.dygraph.guard', ([], {}), '()\n', (2244, 2246), True, 'import paddle.fluid as fluid\n'), ((2398, 2456), 'ma_convcardseresnext.Ma_ConvCardSeResNeXt', 'Ma_ConvCardSeResNeXt', (["config.train_parameters['class_dim']"], {}), "(config.train_parameters['class_dim'])\n", (2418, 2456), False, 'from ma_convcardseresnext import Ma_ConvCardSeResNeXt\n'), ((2583, 2681), 'os.path.join', 'os.path.join', (["config.train_parameters['data_dir']", "config.train_parameters['train_file_list']"], {}), "(config.train_parameters['data_dir'], config.train_parameters[\n 'train_file_list'])\n", (2595, 2681), False, 'import os\n'), ((2701, 2793), 'reader.custom_image_reader', 'reader.custom_image_reader', (['file_list', "config.train_parameters['data_dir']"], {'mode': '"""train"""'}), "(file_list, config.train_parameters['data_dir'],\n mode='train')\n", (2727, 2793), False, 'import reader\n'), ((2813, 2917), 'paddle.batch', 'paddle.batch', (['custom_reader'], {'batch_size': "config.train_parameters['train_batch_size']", 'drop_last': '(True)'}), "(custom_reader, batch_size=config.train_parameters[\n 'train_batch_size'], drop_last=True)\n", (2825, 2917), False, 'import paddle\n'), ((4888, 4923), 'utils.logger.info', 'utils.logger.info', (['"""train till end"""'], {}), "('train till end')\n", (4905, 4923), False, 'import utils\n'), ((901, 949), 'paddle.fluid.layers.piecewise_decay', 'fluid.layers.piecewise_decay', (['boundaries', 'values'], {}), '(boundaries, values)\n', (929, 949), True, 'import paddle.fluid as fluid\n'), ((1011, 1043), 'paddle.fluid.regularizer.L2Decay', 'fluid.regularizer.L2Decay', (['(5e-05)'], {}), '(5e-05)\n', (1036, 1043), True, 'import paddle.fluid as fluid\n'), ((3380, 3416), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['dy_x_data'], {}), '(dy_x_data)\n', (3405, 3416), True, 'import paddle.fluid as fluid\n'), ((3441, 3474), 'paddle.fluid.dygraph.to_variable', 'fluid.dygraph.to_variable', (['y_data'], {}), '(y_data)\n', (3466, 3474), True, 'import paddle.fluid as fluid\n'), ((3592, 3634), 'paddle.fluid.layers.softmax', 'fluid.layers.softmax', (['out'], {'use_cudnn': '(False)'}), '(out, use_cudnn=False)\n', (3612, 3634), True, 'import paddle.fluid as fluid\n'), ((3658, 3704), 'paddle.fluid.layers.cross_entropy', 'fluid.layers.cross_entropy', (['softmax_out', 'label'], {}), '(softmax_out, label)\n', (3684, 3704), True, 'import paddle.fluid as fluid\n'), ((3732, 3755), 'paddle.fluid.layers.mean', 'fluid.layers.mean', (['loss'], {}), '(loss)\n', (3749, 3755), True, 'import paddle.fluid as fluid\n'), ((3236, 3266), 'numpy.array', 'np.array', (['[x[0] for x in data]'], {}), '([x[0] for x in data])\n', (3244, 3266), True, 'import numpy as np\n'), ((3310, 3342), 'numpy.array', 'np.array', (['[[x[1]] for x in data]'], {}), '([[x[1]] for x in data])\n', (3318, 3342), True, 'import numpy as np\n')] |
import os
from sys import argv
import numpy as np
from statistics import variance,mean
# https://numpy.org/doc/stable/reference/generated/numpy.linalg.lstsq.html
path = argv[1]
pwd = os.environ["PWD"]+"/"
list_of_files = []
for root, dirs, files in os.walk(pwd + path):
for file in files:
list_of_files.append(os.path.join(root,file))
Q1 = []
Q2 = []
Q3 = []
Q4 = []
LN = []
UN = []
dists = [LN,UN]
list_of_qs = [Q1,Q2,Q3,Q4]
list_of_stigningstall = []
def calcStuff(filepath):
f = open(filepath, "r")
lines = f.readlines()
x = []
y = []
for line in lines[1:]:
s = line.strip().split(",")
x.append(int(s[-1]))
y.append(int(s[1]))
# Usikker på hva denne gjør, men den gjør at den funker
y = [num / max(y) for num in y]
A = np.vstack([x,np.ones(len(x))]).T
m, _ = np.linalg.lstsq(A,y, rcond=None)[0]
#print(f"{argv[1]}: ({m=}, {c=})")
# print(m)
if "LogNormal" in file:
LN.append(m)
if "Uniform" in file:
UN.append(m)
f.close()
list_of_files.sort()
for file in list_of_files:
calcStuff(file)
print(dists)
| [
"os.path.join",
"numpy.linalg.lstsq",
"os.walk"
] | [((250, 269), 'os.walk', 'os.walk', (['(pwd + path)'], {}), '(pwd + path)\n', (257, 269), False, 'import os\n'), ((840, 873), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'y'], {'rcond': 'None'}), '(A, y, rcond=None)\n', (855, 873), True, 'import numpy as np\n'), ((323, 347), 'os.path.join', 'os.path.join', (['root', 'file'], {}), '(root, file)\n', (335, 347), False, 'import os\n')] |
import sys
from preprocess.ConjointTriad import ConjointTriad
from preprocess.PreprocessUtils import readFasta
from preprocess.PreprocessUtils import AllvsAllSim
from preprocess.CTD_Composition import CTD_Composition
from preprocess.CTD_Transition import CTD_Transition
from preprocess.CTD_Distribution import CTD_Distribution
from preprocess.MMI import MMI
from preprocess.NMBrotoAC import NMBrotoAC
from preprocess.GearyAC import GearyAC
from preprocess.MoranAC import MoranAC
from preprocess.PSSMAAC import PSSMAAC
from preprocess.PSSMDPC import PSSMDPC
from preprocess.PSSMDCT import PSSMDCT
from preprocess.PSEAAC import PSEAAC
from preprocess.LDCTD import LDCTD
from preprocess.MCDCTD import MCDCTD
from preprocess.MLDCTD import MLDCTD
from preprocess.EGBW import EGBW
from preprocess.AutoCovariance import AutoCovariance
from preprocess.BergerEncoding import BergerEncoding
from preprocess.SkipGram import SkipGram
from preprocess.OneHotEncoding import OneHotEncoding
from preprocess.NumericEncoding import NumericEncoding
from preprocess.AAC import AAC
from preprocess.PairwiseDist import PairwiseDist
from preprocess.QuasiSequenceOrder import QuasiSequenceOrder
from preprocess.PSSMLST import PSSMLST
from preprocess.SkipWeightedConjointTriad import SkipWeightedConjointTriad
from preprocess.DWTAC import DWTAC
from preprocess.Chaos import Chaos
from preprocess.Random import Random
import numpy as np
import time
def createFeatures(folderName,featureSets,processPSSM=True,deviceType='cpu'):
t =time.time()
fastas = readFasta(folderName+'allSeqs.fasta')
print('fasta loaded',time.time()-t)
if 'AC30' in featureSets:
#Guo AC calculation
#calc AC
ac = AutoCovariance(fastas,lag=30,deviceType=deviceType)
f = open(folderName+'AC30.tsv','w')
for item in ac:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('AC30',time.time()-t)
if 'AC11' in featureSets:
#Guo AC calculation
#calc AC
ac = AutoCovariance(fastas,lag=11,deviceType=deviceType)
f = open(folderName+'AC11.tsv','w')
for item in ac:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('AC11',time.time()-t)
if 'conjointTriad' in featureSets or 'CT' in featureSets:
#Conjoint Triad
ct = ConjointTriad(fastas,deviceType=deviceType)
f = open(folderName+'ConjointTriad.tsv','w')
for item in ct:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('CT',time.time()-t)
if 'LD10_CTD' in featureSets:
#Composition/Transition/Distribution Using Conjoint Triad Features on LD encoding
(comp, tran, dist) = LDCTD(fastas)
lst1 = [comp,tran,dist]
lst2 = ['LD10_CTD_ConjointTriad_C.tsv','LD10_CTD_ConjointTriad_T.tsv','LD10_CTD_ConjointTriad_D.tsv']
for i in range(0,3):
f = open(folderName+lst2[i],'w')
for item in lst1[i]:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('LD10_CTD',time.time()-t)
if 'MCD5CTD' in featureSets:
#Composition/Transition/Distribution Using Conjoint Triad Features on MCD encoding
(comp, tran, dist) = MCDCTD(fastas,5)
lst1 = [comp,tran,dist]
lst2 = ['MCD5_CTD_ConjointTriad_C.tsv','MCD5_CTD_ConjointTriad_T.tsv','MCD5_CTD_ConjointTriad_D.tsv']
for i in range(0,3):
f = open(folderName+lst2[i],'w')
for item in lst1[i]:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('MCD5CTD',time.time()-t)
if 'MLD4CTD' in featureSets:
#Composition/Transform/Distribution Using Conjoint Triad Features on MLD encoding
(comp, tran, dist) = MLDCTD(fastas,4)
lst1 = [comp,tran,dist]
lst2 = ['MLD4_CTD_ConjointTriad_C.tsv','MLD4_CTD_ConjointTriad_T.tsv','MLD4_CTD_ConjointTriad_D.tsv']
for i in range(0,3):
f = open(folderName+lst2[i],'w')
for item in lst1[i]:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('MLD4CTD',time.time()-t)
if 'MCD4CTD' in featureSets:
#Composition/Transform/Distribution Using Conjoint Triad Features on MCD encoding
(comp, tran, dist) = MCDCTD(fastas,4)
lst1 = [comp,tran,dist]
lst2 = ['MCD4_CTD_ConjointTriad_C.tsv','MCD4_CTD_ConjointTriad_T.tsv','MCD4_CTD_ConjointTriad_D.tsv']
for i in range(0,3):
f = open(folderName+lst2[i],'w')
for item in lst1[i]:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('MCD4CTD',time.time()-t)
if 'PSAAC15' in featureSets or 'PSEAAC15' in featureSets:
#Li's PAAC used the first 3 variables from his moran AAC list, which appears to match what other authors have used
paac = PSEAAC(fastas,lag=15)
f = open(folderName+'PSAAC15.tsv','w')
for item in paac:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('PSAAC15',time.time()-t)
if 'PSAAC9' in featureSets or 'PSEAAC9' in featureSets:
#Li's PAAC used the first 3 variables from his moran AAC list, which appears to match what other authors have used
paac = PSEAAC(fastas,lag=9)
f = open(folderName+'PSAAC9.tsv','w')
for item in paac:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('PSAAC9',time.time()-t)
if 'PSAAC20' in featureSets or 'PSEAAC20' in featureSets:
#Li's PAAC used the first 3 variables from his moran AAC list, which appears to match what other authors have used
paac = PSEAAC(fastas,lag=20)
f = open(folderName+'PSAAC20.tsv','w')
for item in paac:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('PSAAC20',time.time()-t)
if 'MMI' in featureSets:
vals = MMI(fastas,deviceType=deviceType)
f = open(folderName+'MMI.tsv','w')
for item in vals:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('MMI',time.time()-t)
if 'Moran' in featureSets:
vals = MoranAC(fastas,deviceType=deviceType)
f = open(folderName+'MoranAC30.tsv','w')
for item in vals:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('Moran',time.time()-t)
if 'Geary' in featureSets:
vals = GearyAC(fastas,deviceType=deviceType)
f = open(folderName+'GearyAC30.tsv','w')
for item in vals:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('Geary',time.time()-t)
if 'PSSMAAC' in featureSets:
vals = PSSMAAC(fastas,folderName+'PSSM/',processPSSM=processPSSM,deviceType=deviceType)
processPSSM=False #don't try to compute PSSMs for any further variables utlizing PSSMs
f = open(folderName+'PSSMAAC.tsv','w')
for item in vals:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('PSSMAAC',time.time()-t)
if 'PSSMDPC' in featureSets:
vals = PSSMDPC(fastas,folderName+'PSSM/',processPSSM=processPSSM,deviceType=deviceType)
processPSSM=False #don't try to compute PSSMs for any further variables utlizing PSSMs
f = open(folderName+'PSSMDPC.tsv','w')
for item in vals:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('PSSMDPC',time.time()-t)
if 'EGBW11' in featureSets:
vals = EGBW(fastas,11,deviceType=deviceType)
f = open(folderName+'EGBW11.tsv','w')
for item in vals:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('EGBW11',time.time()-t)
if 'OneHotEncoding7' in featureSets:
OneHotEncoding(fastas,folderName+'OneHotEncoding7.encode')
print('OneHotEncoding7',time.time()-t)
if 'SkipGramAA7' in featureSets:
SkipGram(fastas,folderName+'SkipGramAA7H5.encode',hiddenSize=5,deviceType='cuda',fullGPU=True)
print('SkipGramAA7',time.time()-t)
if 'SkipGramAA25H20' in featureSets:
#Yao's 2019 paper mentions 25 unique amino acids in uniprot
#IUPAC defines B, Z, in addition to the standard 20 amino acids, and X, for any acid
#uniprot defines U and O as non-standard letters
#https://www.uniprot.org/help/sequences https://www.bioinformatics.org/sms2/iupac.html
groupings = 'A R N D C Q E G H I L K M F P S T W Y V B Z U O X'.split()
SkipGram(fastas,folderName+'SkipGramAA25H20.encode',groupings=groupings,hiddenSize=20,windowSize=4,deviceType='cuda',fullGPU=True)
print('SkipGramAA25H20',time.time()-t)
if 'OneHotEncoding24' in featureSets:
#note, Richoux's paper converts all unknown amino acids to X, which is the last group, so we are trying to do something similar
#23 amino acids
groupings = 'A R N D C Q E G H I L K M F P S T W Y V U B Z'.split()
#X amino acid, which includes all other amino acids. Using Uniprot, the only non-standard letters are U (matching Richoux's U, and O, which is unlisted in their paper).
groupings.append('JOX')
OneHotEncoding(fastas,folderName+'OneHotEncoding24.encode',groupings=groupings,deviceType='cpu')
print('OneHotEncoding24',time.time()-t)
if 'NumericEncoding22' in featureSets:
#note, Li's paper (Deep Neural Network Based Predictions of Protein Interactions Using Primary Sequences)
#they mention an encoding input dim of 23. Leaving 1 value for zero padding, there are only 20 standard and 2 non-standard values used by Uniprot, that I know of, so I am encoding using that.
groupings = 'A R N D C Q E G H I L K M F P S T W Y V U O'.split()
NumericEncoding(fastas,folderName+'NumericEncoding22.encode',groupings=groupings,deviceType='cpu')
print('NumericEncoding22',time.time()-t)
#encode 20 AAs, with non-overlapping windows of length 3, removing letters from the beginning with length doesn't divide equally
if 'NumericEncoding20Skip3' in featureSets:
aac = NumericEncoding(fastas,folderName+'NumericEncoding20Skip3.encode',groupings=None,groupLen=3,gap=3,truncate='left',deviceType='cpu')
print('NumericEncoding20Skip3',time.time()-t)
if 'AC14_30' in featureSets:
#use 7 additional aa properties, in addition to the standard 7, based on Chen's Work:Protein-protein interaction prediction using a hybrid feature representation and a stacked generalization scheme
#original 7 from GUO
aaIDs = ['GUO_H1','HOPT810101','KRIW790103','GRAR740102','CHAM820101','ROSG850103_GUO_SASA','GUO_NCI']
#new values from Chen
#PONN and KOLA are from
#HYDROPHOBIC PACKING AND SPATIAL ARRANGEMENT OF AMINO ACID RESIDUES IN GLOBULAR PROTEINS P.K. PONNUSWAMY, <NAME> and <NAME>
#Kolaskar AS, Tongaonkar PC. A Semiempirical method for prediction of antigenic determinants on protein antigens. FEBS Lett. 1990;276(1–2):172–4.
#PARJ and JANJ were already in amino acid index
#remaining 3 were copied from Chen's table
aaIDs += ['PARJ860101','PONN800101_CHEN_HYDRO','CHEN_FLEX','CHEN_ACCESS','JANJ780103','CHEN_TURNS','KOLA900101_CHEN_ANTE']
ac = AutoCovariance(fastas,lag=30,aaIDs=aaIDs,deviceType=deviceType)
f = open(folderName+'AC14_30.tsv','w')
for item in ac:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('AC14_30',time.time()-t)
#zhou lists these as his features, but its unclear which ones he uses for which algorithm
#I could not find features matching his first (Hydrophobicity scale) and last (Side-chain mass) attributes, so I'm using the general ones I have
zhouAAIds = ['GUO_H1','KRIW790103','WOEC730101','CHAM820101','DAYM780201','BIGC670101','ROSG850103_GUO_SASA','GUO_NCI','GUO_H1','HOPT810101','CHOU_SIDE_MASS']
if 'Geary_Zhao_30' in featureSets:
vals = GearyAC(fastas,aaIDs=zhouAAIds[0:8],deviceType=deviceType)
f = open(folderName+'Geary_Zhao_30.tsv','w')
for item in vals:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('Geary_Zhao_30',time.time()-t)
if 'NMBroto_Zhao_30' in featureSets:
vals = NMBrotoAC(fastas,aaIDs=zhouAAIds[0:8],deviceType=deviceType)
f = open(folderName+'NMBroto_Zhao_30.tsv','w')
for item in vals:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('NMBroto_Zhao_30',time.time()-t)
if 'Moran_Zhao_30' in featureSets:
vals = MoranAC(fastas,aaIDs=zhouAAIds[0:8],deviceType=deviceType)
f = open(folderName+'Moran_Zhao_30.tsv','w')
for item in vals:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('Moran_Zhao_30',time.time()-t)
#same as regular PSEAAC, shouldn't have added Zhao's name to it.
if 'PSEAAC_Zhao_30' in featureSets:
paac = PSEAAC(fastas,aaIDs=zhouAAIds[8:],lag=30)
f = open(folderName+'PSEAAC_Zhao_30.tsv','w')
for item in paac:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('PSEAAC_Zhao_30',time.time()-t)
if 'Grantham_Sequence_Order_30' in featureSets:
ac = PairwiseDist(fastas, pairwiseAAIDs=['Grantham'], calcType='SumSq', lag=30)
f = open(folderName+'Grantham_Sequence_Order_30.tsv','w')
for item in ac:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('Grantham_Sequence_Order_30',time.time()-t)
if 'Schneider_Sequence_Order_30' in featureSets:
ac = PairwiseDist(fastas, pairwiseAAIDs=['Schneider-Wrede'],calcType='SumSq', lag=30)
f = open(folderName+'Schneider_Sequence_Order_30.tsv','w')
for item in ac:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('Schneider_Sequence_Order_30',time.time()-t)
if 'Grantham_Quasi_30' in featureSets:
paac = QuasiSequenceOrder(fastas, pairwiseAAIDs=['Grantham'],lag=30)
f = open(folderName+'Grantham_Quasi_30.tsv','w')
for item in paac:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('Grantham_Quasi_30',time.time()-t)
if 'Schneider_Quasi_30' in featureSets:
paac = QuasiSequenceOrder(fastas, pairwiseAAIDs=['Schneider-Wrede'],lag=30)
f = open(folderName+'Schneider_Quasi_30.tsv','w')
for item in paac:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('Schneider_Quasi_30',time.time()-t)
if 'PSSMLST' in featureSets:
PSSMLST(fastas,folderName+'PSSM/',folderName,processPSSM=processPSSM)
processPSSM=False #don't try to compute PSSMs for any further variables utlizing PSSMs
print('PSSMLST',time.time()-t)
if 'SkipWeightedConjointTriad' in featureSets:
#Weighted Skip Conjoint Triad
#paper doesn't mention what weights are used, currently setting skip triad to half the weight of the sequence triad
ct = SkipWeightedConjointTriad(fastas,weights=[1,.5,.5],deviceType=deviceType)
f = open(folderName+'SkipWeightedConjointTriad.tsv','w')
for item in ct:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('SkipWeightedConjointTriad',time.time()-t)
if 'AAC20' in featureSets:
aac = AAC(fastas)
f = open(folderName+'AAC20.tsv','w')
for item in aac:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('AAC20',time.time()-t)
if 'AAC400' in featureSets:
aac = AAC(fastas,groupLen=2)
f = open(folderName+'AAC400.tsv','w')
for item in aac:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('AAC400',time.time()-t)
if 'DUMULTIGROUPCTD' in featureSets:
groupings = {}
groupings['Hydrophobicity'] = ['RKEDQN','GASTPHY','CLVIMFW']
groupings['Normalized_van_der_Waals_volume'] = ['GASTPD','NVEQIL','MHKFRYW']
groupings['Polarity'] = ['LIFWCMVY','PATGS','HQRKNED']
groupings['Polarizability'] = ['GASDT','CPNVEQIL','KMHFRYW']
groupings['Charge'] = ['KR','ANCQGHILMFPSTWYV','DE']
groupings['Secondary_structure'] = ['EALMQKRH','VIYCWFT','GNPSD']
groupings['Solvent_accessibility'] = ['ALFCGIVW','PKQEND','MPSTHY']
groupings['Surface_tension'] = ['GQDNAHR','KTSEC','ILMFPWYV']
groupings['Protein-protein_interface_hotspot_propensity-Bogan'] = ['DHIKNPRWY','EQSTGAMF','CLV']
groupings['Protein-protein_interface_propensity-Ma'] = ['CDFMPQRWY','AGHVLNST','EIK']
groupings['Protein-DNA_interface_propensity-Schneider'] = ['GKNQRSTY','ADEFHILVW','CMP']
groupings['Protein-DNA_interface_propensity-Ahmad'] = ['GHKNQRSTY','ADEFIPVW','CLM']
groupings['Protein-RNA_interface_propensity-Kim'] = ['HKMRY','FGILNPQSVW','CDEAT']
groupings['Protein-RNA_interface_propensity-Ellis'] = ['HGKMRSYW','AFINPQT','CDELV']
groupings['Protein-RNA_interface_propensity-Phipps'] = ['HKMQRS','ADEFGLNPVY','CITW']
groupings['Protein-ligand_binding_site_propensity_-Khazanov'] = ['CFHWY','GILNMSTR','AEDKPQV']
groupings['Protein-ligand_valid_binding_site_propensity_-Khazanov'] = ['CFHWYM','DGILNSTV','AEKPQR']
groupings['Propensity_for_protein-ligand_polar_&_aromatic_non-bonded_interactions-Imai'] = ['DEHRY','CFKMNQSTW','AGILPV']
groupings['Molecular_Weight'] = ['AGS','CDEHIKLMNQPTV','FRWY']
groupings['cLogP'] = ['RKDNEQH','PYSTGACV','WMFLI']
groupings['No_of_hydrogen_bond_donor_in_side_chain'] = ['HKNQR','DESTWY','ACGFILMPV']
groupings['No_of_hydrogen_bond_acceptor_in_side_chain'] = ['DEHNQR','KSTWY','ACGFILMPV']
groupings['Solubility_in_water'] = ['ACGKRT','EFHILMNPQSVW','DY']
groupings['Amino_acid_flexibility_index'] = ['EGKNQS','ADHIPRTV','CFLMWY']
for item in [(CTD_Composition,'DuMultiCTD_C'),(CTD_Transition,'DuMultiCTD_T'),(CTD_Distribution,'DuMultiCTD_D')]:
func = item[0]
vals = []
for feat in groupings:
results = func(fastas,groupings=groupings[feat])
for i in range(0,len(results[0])): #header row
results[0][i] = feat+'_'+results[0][i] #add feature name to each column in header row
results = np.asarray(results)
if len(vals) == 0:
vals.append(results)
else:
vals.append(results[:,1:])#remove protein names if not first group calculated
vals = np.hstack(vals).tolist()
f = open(folderName+item[1]+'.tsv','w')
for line in vals:
f.write('\t'.join(str(s) for s in line)+'\n')
f.close()
print('DUMULTIGROUPCTD',time.time()-t)
if 'APSAAC30_2' in featureSets:
#Note, Du's paper states W=0.5, but the standard is W=0.05. In theory, W should be lower with more attributes (due to amphipathic=True) to balance betters with AA counts.
#Currently leaving at 0.05, assuming this may be a typo. Can change later as necessary.
paac = PSEAAC(fastas,aaIDs=['GUO_H1','HOPT810101'],lag=30,amphipathic=True)
f = open(folderName+'APSAAC30.tsv','w')
for item in paac:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('APSAAC30_2',time.time()-t)
if 'JIA_DWT' in featureSets:
paac = DWTAC(fastas)
f = open(folderName+'DWTAC.tsv','w')
for item in paac:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('JIA_DWT',time.time()-t)
if 'NMBROTO_6_30' in featureSets:
vals = NMBrotoAC(fastas,aaIDs=['GUO_H1','KRIW790103','GRAR740102','CHAM820101','ROSG850103_GUO_SASA','GUO_NCI'],deviceType=deviceType)
f = open(folderName+'NMBroto_6_30.tsv','w')
for item in vals:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('NMBROTO_6_30',time.time()-t)
if 'PSSMDCT' in featureSets:
vals = PSSMDCT(fastas,folderName+'PSSM/',deviceType=deviceType,processPSSM=processPSSM)
processPSSM=False #don't try to compute PSSMs for any further variables utlizing PSSMs
f = open(folderName+'PSSMDCT.tsv','w')
for item in vals:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('PSSMDCT',time.time()-t)
if 'NMBROTO_9' in featureSets:
vals = NMBrotoAC(fastas,lag=9,deviceType=deviceType)
f = open(folderName+'NMBroto_9.tsv','w')
for item in vals:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('NMBROTO_9',time.time()-t)
if 'MORAN_9' in featureSets:
vals = MoranAC(fastas,lag=9,deviceType=deviceType)
f = open(folderName+'Moran_9.tsv','w')
for item in vals:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('MORAN_9',time.time()-t)
if 'GEARY_9' in featureSets:
vals = GearyAC(fastas,lag=9,deviceType=deviceType)
f = open(folderName+'GEARY_9.tsv','w')
for item in vals:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('GEARY_9',time.time()-t)
if 'PSEAAC_3' in featureSets:
vals = PSEAAC(fastas,lag=3,deviceType=deviceType)
f = open(folderName+'PSEAAC_3.tsv','w')
for item in vals:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('PSEAAC_3',time.time()-t)
if 'CHAOS' in featureSets:
vals = Chaos(fastas)
f = open(folderName+'Chaos.tsv','w')
for item in vals:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('CHAOS',time.time()-t)
if 'Random500' in featureSets:
vals = Random(fastas)
f = open(folderName+'Random500.tsv','w')
for item in vals:
f.write('\t'.join(str(s) for s in item)+'\n')
f.close()
print('Random500',time.time()-t)
if 'AllvsAllSim' in featureSets:
AllvsAllSim(folderName)
print('AllvsAllSim',time.time()-t) | [
"preprocess.EGBW.EGBW",
"preprocess.SkipGram.SkipGram",
"numpy.hstack",
"preprocess.AutoCovariance.AutoCovariance",
"preprocess.LDCTD.LDCTD",
"preprocess.GearyAC.GearyAC",
"preprocess.Chaos.Chaos",
"preprocess.AAC.AAC",
"preprocess.PSSMLST.PSSMLST",
"preprocess.QuasiSequenceOrder.QuasiSequenceOrde... | [((1544, 1555), 'time.time', 'time.time', ([], {}), '()\n', (1553, 1555), False, 'import time\n'), ((1567, 1606), 'preprocess.PreprocessUtils.readFasta', 'readFasta', (["(folderName + 'allSeqs.fasta')"], {}), "(folderName + 'allSeqs.fasta')\n", (1576, 1606), False, 'from preprocess.PreprocessUtils import readFasta\n'), ((1717, 1770), 'preprocess.AutoCovariance.AutoCovariance', 'AutoCovariance', (['fastas'], {'lag': '(30)', 'deviceType': 'deviceType'}), '(fastas, lag=30, deviceType=deviceType)\n', (1731, 1770), False, 'from preprocess.AutoCovariance import AutoCovariance\n'), ((2000, 2053), 'preprocess.AutoCovariance.AutoCovariance', 'AutoCovariance', (['fastas'], {'lag': '(11)', 'deviceType': 'deviceType'}), '(fastas, lag=11, deviceType=deviceType)\n', (2014, 2053), False, 'from preprocess.AutoCovariance import AutoCovariance\n'), ((2299, 2343), 'preprocess.ConjointTriad.ConjointTriad', 'ConjointTriad', (['fastas'], {'deviceType': 'deviceType'}), '(fastas, deviceType=deviceType)\n', (2312, 2343), False, 'from preprocess.ConjointTriad import ConjointTriad\n'), ((2649, 2662), 'preprocess.LDCTD.LDCTD', 'LDCTD', (['fastas'], {}), '(fastas)\n', (2654, 2662), False, 'from preprocess.LDCTD import LDCTD\n'), ((3128, 3145), 'preprocess.MCDCTD.MCDCTD', 'MCDCTD', (['fastas', '(5)'], {}), '(fastas, 5)\n', (3134, 3145), False, 'from preprocess.MCDCTD import MCDCTD\n'), ((3608, 3625), 'preprocess.MLDCTD.MLDCTD', 'MLDCTD', (['fastas', '(4)'], {}), '(fastas, 4)\n', (3614, 3625), False, 'from preprocess.MLDCTD import MLDCTD\n'), ((4088, 4105), 'preprocess.MCDCTD.MCDCTD', 'MCDCTD', (['fastas', '(4)'], {}), '(fastas, 4)\n', (4094, 4105), False, 'from preprocess.MCDCTD import MCDCTD\n'), ((4618, 4640), 'preprocess.PSEAAC.PSEAAC', 'PSEAAC', (['fastas'], {'lag': '(15)'}), '(fastas, lag=15)\n', (4624, 4640), False, 'from preprocess.PSEAAC import PSEAAC\n'), ((4992, 5013), 'preprocess.PSEAAC.PSEAAC', 'PSEAAC', (['fastas'], {'lag': '(9)'}), '(fastas, lag=9)\n', (4998, 5013), False, 'from preprocess.PSEAAC import PSEAAC\n'), ((5363, 5385), 'preprocess.PSEAAC.PSEAAC', 'PSEAAC', (['fastas'], {'lag': '(20)'}), '(fastas, lag=20)\n', (5369, 5385), False, 'from preprocess.PSEAAC import PSEAAC\n'), ((5586, 5620), 'preprocess.MMI.MMI', 'MMI', (['fastas'], {'deviceType': 'deviceType'}), '(fastas, deviceType=deviceType)\n', (5589, 5620), False, 'from preprocess.MMI import MMI\n'), ((5814, 5852), 'preprocess.MoranAC.MoranAC', 'MoranAC', (['fastas'], {'deviceType': 'deviceType'}), '(fastas, deviceType=deviceType)\n', (5821, 5852), False, 'from preprocess.MoranAC import MoranAC\n'), ((6053, 6091), 'preprocess.GearyAC.GearyAC', 'GearyAC', (['fastas'], {'deviceType': 'deviceType'}), '(fastas, deviceType=deviceType)\n', (6060, 6091), False, 'from preprocess.GearyAC import GearyAC\n'), ((6294, 6384), 'preprocess.PSSMAAC.PSSMAAC', 'PSSMAAC', (['fastas', "(folderName + 'PSSM/')"], {'processPSSM': 'processPSSM', 'deviceType': 'deviceType'}), "(fastas, folderName + 'PSSM/', processPSSM=processPSSM, deviceType=\n deviceType)\n", (6301, 6384), False, 'from preprocess.PSSMAAC import PSSMAAC\n'), ((6668, 6758), 'preprocess.PSSMDPC.PSSMDPC', 'PSSMDPC', (['fastas', "(folderName + 'PSSM/')"], {'processPSSM': 'processPSSM', 'deviceType': 'deviceType'}), "(fastas, folderName + 'PSSM/', processPSSM=processPSSM, deviceType=\n deviceType)\n", (6675, 6758), False, 'from preprocess.PSSMDPC import PSSMDPC\n'), ((7042, 7081), 'preprocess.EGBW.EGBW', 'EGBW', (['fastas', '(11)'], {'deviceType': 'deviceType'}), '(fastas, 11, deviceType=deviceType)\n', (7046, 7081), False, 'from preprocess.EGBW import EGBW\n'), ((7282, 7343), 'preprocess.OneHotEncoding.OneHotEncoding', 'OneHotEncoding', (['fastas', "(folderName + 'OneHotEncoding7.encode')"], {}), "(fastas, folderName + 'OneHotEncoding7.encode')\n", (7296, 7343), False, 'from preprocess.OneHotEncoding import OneHotEncoding\n'), ((7423, 7527), 'preprocess.SkipGram.SkipGram', 'SkipGram', (['fastas', "(folderName + 'SkipGramAA7H5.encode')"], {'hiddenSize': '(5)', 'deviceType': '"""cuda"""', 'fullGPU': '(True)'}), "(fastas, folderName + 'SkipGramAA7H5.encode', hiddenSize=5,\n deviceType='cuda', fullGPU=True)\n", (7431, 7527), False, 'from preprocess.SkipGram import SkipGram\n'), ((7968, 8110), 'preprocess.SkipGram.SkipGram', 'SkipGram', (['fastas', "(folderName + 'SkipGramAA25H20.encode')"], {'groupings': 'groupings', 'hiddenSize': '(20)', 'windowSize': '(4)', 'deviceType': '"""cuda"""', 'fullGPU': '(True)'}), "(fastas, folderName + 'SkipGramAA25H20.encode', groupings=groupings,\n hiddenSize=20, windowSize=4, deviceType='cuda', fullGPU=True)\n", (7976, 8110), False, 'from preprocess.SkipGram import SkipGram\n'), ((8608, 8714), 'preprocess.OneHotEncoding.OneHotEncoding', 'OneHotEncoding', (['fastas', "(folderName + 'OneHotEncoding24.encode')"], {'groupings': 'groupings', 'deviceType': '"""cpu"""'}), "(fastas, folderName + 'OneHotEncoding24.encode', groupings=\n groupings, deviceType='cpu')\n", (8622, 8714), False, 'from preprocess.OneHotEncoding import OneHotEncoding\n'), ((9168, 9276), 'preprocess.NumericEncoding.NumericEncoding', 'NumericEncoding', (['fastas', "(folderName + 'NumericEncoding22.encode')"], {'groupings': 'groupings', 'deviceType': '"""cpu"""'}), "(fastas, folderName + 'NumericEncoding22.encode', groupings=\n groupings, deviceType='cpu')\n", (9183, 9276), False, 'from preprocess.NumericEncoding import NumericEncoding\n'), ((9499, 9642), 'preprocess.NumericEncoding.NumericEncoding', 'NumericEncoding', (['fastas', "(folderName + 'NumericEncoding20Skip3.encode')"], {'groupings': 'None', 'groupLen': '(3)', 'gap': '(3)', 'truncate': '"""left"""', 'deviceType': '"""cpu"""'}), "(fastas, folderName + 'NumericEncoding20Skip3.encode',\n groupings=None, groupLen=3, gap=3, truncate='left', deviceType='cpu')\n", (9514, 9642), False, 'from preprocess.NumericEncoding import NumericEncoding\n'), ((10610, 10676), 'preprocess.AutoCovariance.AutoCovariance', 'AutoCovariance', (['fastas'], {'lag': '(30)', 'aaIDs': 'aaIDs', 'deviceType': 'deviceType'}), '(fastas, lag=30, aaIDs=aaIDs, deviceType=deviceType)\n', (10624, 10676), False, 'from preprocess.AutoCovariance import AutoCovariance\n'), ((11295, 11355), 'preprocess.GearyAC.GearyAC', 'GearyAC', (['fastas'], {'aaIDs': 'zhouAAIds[0:8]', 'deviceType': 'deviceType'}), '(fastas, aaIDs=zhouAAIds[0:8], deviceType=deviceType)\n', (11302, 11355), False, 'from preprocess.GearyAC import GearyAC\n'), ((11578, 11640), 'preprocess.NMBrotoAC.NMBrotoAC', 'NMBrotoAC', (['fastas'], {'aaIDs': 'zhouAAIds[0:8]', 'deviceType': 'deviceType'}), '(fastas, aaIDs=zhouAAIds[0:8], deviceType=deviceType)\n', (11587, 11640), False, 'from preprocess.NMBrotoAC import NMBrotoAC\n'), ((11868, 11928), 'preprocess.MoranAC.MoranAC', 'MoranAC', (['fastas'], {'aaIDs': 'zhouAAIds[0:8]', 'deviceType': 'deviceType'}), '(fastas, aaIDs=zhouAAIds[0:8], deviceType=deviceType)\n', (11875, 11928), False, 'from preprocess.MoranAC import MoranAC\n'), ((12217, 12260), 'preprocess.PSEAAC.PSEAAC', 'PSEAAC', (['fastas'], {'aaIDs': 'zhouAAIds[8:]', 'lag': '(30)'}), '(fastas, aaIDs=zhouAAIds[8:], lag=30)\n', (12223, 12260), False, 'from preprocess.PSEAAC import PSEAAC\n'), ((12497, 12571), 'preprocess.PairwiseDist.PairwiseDist', 'PairwiseDist', (['fastas'], {'pairwiseAAIDs': "['Grantham']", 'calcType': '"""SumSq"""', 'lag': '(30)'}), "(fastas, pairwiseAAIDs=['Grantham'], calcType='SumSq', lag=30)\n", (12509, 12571), False, 'from preprocess.PairwiseDist import PairwiseDist\n'), ((12835, 12920), 'preprocess.PairwiseDist.PairwiseDist', 'PairwiseDist', (['fastas'], {'pairwiseAAIDs': "['Schneider-Wrede']", 'calcType': '"""SumSq"""', 'lag': '(30)'}), "(fastas, pairwiseAAIDs=['Schneider-Wrede'], calcType='SumSq',\n lag=30)\n", (12847, 12920), False, 'from preprocess.PairwiseDist import PairwiseDist\n'), ((13172, 13234), 'preprocess.QuasiSequenceOrder.QuasiSequenceOrder', 'QuasiSequenceOrder', (['fastas'], {'pairwiseAAIDs': "['Grantham']", 'lag': '(30)'}), "(fastas, pairwiseAAIDs=['Grantham'], lag=30)\n", (13190, 13234), False, 'from preprocess.QuasiSequenceOrder import QuasiSequenceOrder\n'), ((13470, 13539), 'preprocess.QuasiSequenceOrder.QuasiSequenceOrder', 'QuasiSequenceOrder', (['fastas'], {'pairwiseAAIDs': "['Schneider-Wrede']", 'lag': '(30)'}), "(fastas, pairwiseAAIDs=['Schneider-Wrede'], lag=30)\n", (13488, 13539), False, 'from preprocess.QuasiSequenceOrder import QuasiSequenceOrder\n'), ((13760, 13834), 'preprocess.PSSMLST.PSSMLST', 'PSSMLST', (['fastas', "(folderName + 'PSSM/')", 'folderName'], {'processPSSM': 'processPSSM'}), "(fastas, folderName + 'PSSM/', folderName, processPSSM=processPSSM)\n", (13767, 13834), False, 'from preprocess.PSSMLST import PSSMLST\n'), ((14171, 14250), 'preprocess.SkipWeightedConjointTriad.SkipWeightedConjointTriad', 'SkipWeightedConjointTriad', (['fastas'], {'weights': '[1, 0.5, 0.5]', 'deviceType': 'deviceType'}), '(fastas, weights=[1, 0.5, 0.5], deviceType=deviceType)\n', (14196, 14250), False, 'from preprocess.SkipWeightedConjointTriad import SkipWeightedConjointTriad\n'), ((14480, 14491), 'preprocess.AAC.AAC', 'AAC', (['fastas'], {}), '(fastas)\n', (14483, 14491), False, 'from preprocess.AAC import AAC\n'), ((14689, 14712), 'preprocess.AAC.AAC', 'AAC', (['fastas'], {'groupLen': '(2)'}), '(fastas, groupLen=2)\n', (14692, 14712), False, 'from preprocess.AAC import AAC\n'), ((17942, 18014), 'preprocess.PSEAAC.PSEAAC', 'PSEAAC', (['fastas'], {'aaIDs': "['GUO_H1', 'HOPT810101']", 'lag': '(30)', 'amphipathic': '(True)'}), "(fastas, aaIDs=['GUO_H1', 'HOPT810101'], lag=30, amphipathic=True)\n", (17948, 18014), False, 'from preprocess.PSEAAC import PSEAAC\n'), ((18219, 18232), 'preprocess.DWTAC.DWTAC', 'DWTAC', (['fastas'], {}), '(fastas)\n', (18224, 18232), False, 'from preprocess.DWTAC import DWTAC\n'), ((18440, 18578), 'preprocess.NMBrotoAC.NMBrotoAC', 'NMBrotoAC', (['fastas'], {'aaIDs': "['GUO_H1', 'KRIW790103', 'GRAR740102', 'CHAM820101', 'ROSG850103_GUO_SASA',\n 'GUO_NCI']", 'deviceType': 'deviceType'}), "(fastas, aaIDs=['GUO_H1', 'KRIW790103', 'GRAR740102', 'CHAM820101',\n 'ROSG850103_GUO_SASA', 'GUO_NCI'], deviceType=deviceType)\n", (18449, 18578), False, 'from preprocess.NMBrotoAC import NMBrotoAC\n'), ((18782, 18872), 'preprocess.PSSMDCT.PSSMDCT', 'PSSMDCT', (['fastas', "(folderName + 'PSSM/')"], {'deviceType': 'deviceType', 'processPSSM': 'processPSSM'}), "(fastas, folderName + 'PSSM/', deviceType=deviceType, processPSSM=\n processPSSM)\n", (18789, 18872), False, 'from preprocess.PSSMDCT import PSSMDCT\n'), ((19160, 19207), 'preprocess.NMBrotoAC.NMBrotoAC', 'NMBrotoAC', (['fastas'], {'lag': '(9)', 'deviceType': 'deviceType'}), '(fastas, lag=9, deviceType=deviceType)\n', (19169, 19207), False, 'from preprocess.NMBrotoAC import NMBrotoAC\n'), ((19413, 19458), 'preprocess.MoranAC.MoranAC', 'MoranAC', (['fastas'], {'lag': '(9)', 'deviceType': 'deviceType'}), '(fastas, lag=9, deviceType=deviceType)\n', (19420, 19458), False, 'from preprocess.MoranAC import MoranAC\n'), ((19660, 19705), 'preprocess.GearyAC.GearyAC', 'GearyAC', (['fastas'], {'lag': '(9)', 'deviceType': 'deviceType'}), '(fastas, lag=9, deviceType=deviceType)\n', (19667, 19705), False, 'from preprocess.GearyAC import GearyAC\n'), ((19908, 19952), 'preprocess.PSEAAC.PSEAAC', 'PSEAAC', (['fastas'], {'lag': '(3)', 'deviceType': 'deviceType'}), '(fastas, lag=3, deviceType=deviceType)\n', (19914, 19952), False, 'from preprocess.PSEAAC import PSEAAC\n'), ((20156, 20169), 'preprocess.Chaos.Chaos', 'Chaos', (['fastas'], {}), '(fastas)\n', (20161, 20169), False, 'from preprocess.Chaos import Chaos\n'), ((20372, 20386), 'preprocess.Random.Random', 'Random', (['fastas'], {}), '(fastas)\n', (20378, 20386), False, 'from preprocess.Random import Random\n'), ((20593, 20616), 'preprocess.PreprocessUtils.AllvsAllSim', 'AllvsAllSim', (['folderName'], {}), '(folderName)\n', (20604, 20616), False, 'from preprocess.PreprocessUtils import AllvsAllSim\n'), ((1628, 1639), 'time.time', 'time.time', ([], {}), '()\n', (1637, 1639), False, 'import time\n'), ((1914, 1925), 'time.time', 'time.time', ([], {}), '()\n', (1923, 1925), False, 'import time\n'), ((2197, 2208), 'time.time', 'time.time', ([], {}), '()\n', (2206, 2208), False, 'import time\n'), ((2493, 2504), 'time.time', 'time.time', ([], {}), '()\n', (2502, 2504), False, 'import time\n'), ((2972, 2983), 'time.time', 'time.time', ([], {}), '()\n', (2981, 2983), False, 'import time\n'), ((3451, 3462), 'time.time', 'time.time', ([], {}), '()\n', (3460, 3462), False, 'import time\n'), ((3931, 3942), 'time.time', 'time.time', ([], {}), '()\n', (3940, 3942), False, 'import time\n'), ((4411, 4422), 'time.time', 'time.time', ([], {}), '()\n', (4420, 4422), False, 'import time\n'), ((4787, 4798), 'time.time', 'time.time', ([], {}), '()\n', (4796, 4798), False, 'import time\n'), ((5156, 5167), 'time.time', 'time.time', ([], {}), '()\n', (5165, 5167), False, 'import time\n'), ((5532, 5543), 'time.time', 'time.time', ([], {}), '()\n', (5541, 5543), False, 'import time\n'), ((5757, 5768), 'time.time', 'time.time', ([], {}), '()\n', (5766, 5768), False, 'import time\n'), ((5997, 6008), 'time.time', 'time.time', ([], {}), '()\n', (6006, 6008), False, 'import time\n'), ((6236, 6247), 'time.time', 'time.time', ([], {}), '()\n', (6245, 6247), False, 'import time\n'), ((6610, 6621), 'time.time', 'time.time', ([], {}), '()\n', (6619, 6621), False, 'import time\n'), ((6984, 6995), 'time.time', 'time.time', ([], {}), '()\n', (6993, 6995), False, 'import time\n'), ((7223, 7234), 'time.time', 'time.time', ([], {}), '()\n', (7232, 7234), False, 'import time\n'), ((7368, 7379), 'time.time', 'time.time', ([], {}), '()\n', (7377, 7379), False, 'import time\n'), ((7541, 7552), 'time.time', 'time.time', ([], {}), '()\n', (7550, 7552), False, 'import time\n'), ((8126, 8137), 'time.time', 'time.time', ([], {}), '()\n', (8135, 8137), False, 'import time\n'), ((8733, 8744), 'time.time', 'time.time', ([], {}), '()\n', (8742, 8744), False, 'import time\n'), ((9296, 9307), 'time.time', 'time.time', ([], {}), '()\n', (9305, 9307), False, 'import time\n'), ((9665, 9676), 'time.time', 'time.time', ([], {}), '()\n', (9674, 9676), False, 'import time\n'), ((10825, 10836), 'time.time', 'time.time', ([], {}), '()\n', (10834, 10836), False, 'import time\n'), ((11511, 11522), 'time.time', 'time.time', ([], {}), '()\n', (11520, 11522), False, 'import time\n'), ((11800, 11811), 'time.time', 'time.time', ([], {}), '()\n', (11809, 11811), False, 'import time\n'), ((12084, 12095), 'time.time', 'time.time', ([], {}), '()\n', (12093, 12095), False, 'import time\n'), ((12418, 12429), 'time.time', 'time.time', ([], {}), '()\n', (12427, 12429), False, 'import time\n'), ((12753, 12764), 'time.time', 'time.time', ([], {}), '()\n', (12762, 12764), False, 'import time\n'), ((13099, 13110), 'time.time', 'time.time', ([], {}), '()\n', (13108, 13110), False, 'import time\n'), ((13399, 13410), 'time.time', 'time.time', ([], {}), '()\n', (13408, 13410), False, 'import time\n'), ((13706, 13717), 'time.time', 'time.time', ([], {}), '()\n', (13715, 13717), False, 'import time\n'), ((13939, 13950), 'time.time', 'time.time', ([], {}), '()\n', (13948, 13950), False, 'import time\n'), ((14424, 14435), 'time.time', 'time.time', ([], {}), '()\n', (14433, 14435), False, 'import time\n'), ((14632, 14643), 'time.time', 'time.time', ([], {}), '()\n', (14641, 14643), False, 'import time\n'), ((14854, 14865), 'time.time', 'time.time', ([], {}), '()\n', (14863, 14865), False, 'import time\n'), ((17254, 17273), 'numpy.asarray', 'np.asarray', (['results'], {}), '(results)\n', (17264, 17273), True, 'import numpy as np\n'), ((17614, 17625), 'time.time', 'time.time', ([], {}), '()\n', (17623, 17625), False, 'import time\n'), ((18160, 18171), 'time.time', 'time.time', ([], {}), '()\n', (18169, 18171), False, 'import time\n'), ((18376, 18387), 'time.time', 'time.time', ([], {}), '()\n', (18385, 18387), False, 'import time\n'), ((18723, 18734), 'time.time', 'time.time', ([], {}), '()\n', (18732, 18734), False, 'import time\n'), ((19098, 19109), 'time.time', 'time.time', ([], {}), '()\n', (19107, 19109), False, 'import time\n'), ((19355, 19366), 'time.time', 'time.time', ([], {}), '()\n', (19364, 19366), False, 'import time\n'), ((19602, 19613), 'time.time', 'time.time', ([], {}), '()\n', (19611, 19613), False, 'import time\n'), ((19849, 19860), 'time.time', 'time.time', ([], {}), '()\n', (19858, 19860), False, 'import time\n'), ((20098, 20109), 'time.time', 'time.time', ([], {}), '()\n', (20107, 20109), False, 'import time\n'), ((20311, 20322), 'time.time', 'time.time', ([], {}), '()\n', (20320, 20322), False, 'import time\n'), ((20536, 20547), 'time.time', 'time.time', ([], {}), '()\n', (20545, 20547), False, 'import time\n'), ((20640, 20651), 'time.time', 'time.time', ([], {}), '()\n', (20649, 20651), False, 'import time\n'), ((17431, 17446), 'numpy.hstack', 'np.hstack', (['vals'], {}), '(vals)\n', (17440, 17446), True, 'import numpy as np\n')] |
import sys
import casadi
import numpy as np
import matplotlib.pyplot as plt
from car_model import calc_wheel_centric_velocities, create_car_model, calc_sigma_xy, calc_wheel_centric_forces, \
calc_wheel_physics
from car_sim_gen.constants import WheelConstants
from acados_template.acados_ocp_formulation_helper import get_symbol_idx
def main():
model, c, q = create_car_model()
vr = casadi.MX.sym("vr")
v_wx = casadi.MX.sym("v_wx")
v_wy = casadi.MX.sym("v_wy")
v_x = casadi.MX.sym("v_x")
v_y = casadi.MX.sym("v_y")
r = casadi.MX.sym("r")
delta = casadi.MX.sym("delta0")
mu_x = casadi.MX.sym("mu_x", c.n_wheels, 1)
mu_y = casadi.MX.sym("mu_y", c.n_wheels, 1)
Fz = casadi.MX.sym("Fz", c.n_wheels, 1)
x = casadi.vertcat(v_x, v_y, r)
u = casadi.vertcat(delta)
p = casadi.vertcat(Fz, mu_x, mu_y)
cw0: WheelConstants = c.wheel_constants[0]
mu_x_val = 0.5
mu_y_val = 0.4
p_val = [cw0.Fz0] * c.n_wheels + [mu_x_val] * c.n_wheels + [mu_y_val] * c.n_wheels
vx, vy = calc_wheel_centric_velocities(x, u, c, 0)
sigma_x, sigma_y = calc_sigma_xy(vr, v_wx, v_wy)
Fx, Fy = calc_wheel_centric_forces(sigma_x, sigma_y, p, c.wheel_constants[0], 0)
calc_slip = casadi.Function("calc_slip", [x, u], [vx, vy], dict())
calc_forces = casadi.Function("calc_forces", [vr, v_wx, v_wy, p], [Fx, Fy], dict())
Fx_i, Fx_w, Fy_i, car_torque_i = calc_wheel_physics(model, 0, c)
model_func = casadi.Function("model_func", [model.x, model.u, model.p], [Fx_i, Fy_i, car_torque_i])
vx_vals = np.linspace(0.0, 3.0)
torque_vals = []
Fx_vals = []
x_val = np.zeros(shape=(model.x.size()[0],))
u_val = np.zeros(shape=(model.u.size()[0],))
for vx_val in vx_vals:
x_val[get_symbol_idx(model.x, "v_x")] = vx_val
# x_val[get_symbol_idx(model.x, "r")] = vx_val
omega = x_val[get_symbol_idx(model.x, "omega")] = vx_val / c.wheel_constants[0].radius
dc = (c.Tm_emv * omega + c.Tm_drag * omega * omega * np.sign(omega)) / c.Tm_p
u_val[get_symbol_idx(model.x, "dc")] = dc
u_val[get_symbol_idx(model.x, "delta0")] = 0.1
u_val[get_symbol_idx(model.x, "delta1")] = 0.1
fx, fy, t = model_func(x_val, u_val, p_val)
Fx_vals.append(fx)
torque_vals.append(t)
plt.plot(vx_vals, torque_vals, label="torque")
plt.plot(vx_vals, Fx_vals, label="Fx")
plt.gca().set_xlabel("vx [m/s]")
plt.gca().set_ylabel("Fx [N] / Torque [Nm]")
plt.gca().set_title('Fy and Torque at a single wheel with increasing vx')
plt.legend()
plt.show()
if __name__ == '__main__':
main()
| [
"casadi.Function",
"matplotlib.pyplot.show",
"car_model.calc_wheel_centric_velocities",
"acados_template.acados_ocp_formulation_helper.get_symbol_idx",
"matplotlib.pyplot.gca",
"car_model.calc_sigma_xy",
"car_model.calc_wheel_physics",
"matplotlib.pyplot.plot",
"casadi.vertcat",
"numpy.linspace",
... | [((371, 389), 'car_model.create_car_model', 'create_car_model', ([], {}), '()\n', (387, 389), False, 'from car_model import calc_wheel_centric_velocities, create_car_model, calc_sigma_xy, calc_wheel_centric_forces, calc_wheel_physics\n'), ((399, 418), 'casadi.MX.sym', 'casadi.MX.sym', (['"""vr"""'], {}), "('vr')\n", (412, 418), False, 'import casadi\n'), ((430, 451), 'casadi.MX.sym', 'casadi.MX.sym', (['"""v_wx"""'], {}), "('v_wx')\n", (443, 451), False, 'import casadi\n'), ((463, 484), 'casadi.MX.sym', 'casadi.MX.sym', (['"""v_wy"""'], {}), "('v_wy')\n", (476, 484), False, 'import casadi\n'), ((495, 515), 'casadi.MX.sym', 'casadi.MX.sym', (['"""v_x"""'], {}), "('v_x')\n", (508, 515), False, 'import casadi\n'), ((526, 546), 'casadi.MX.sym', 'casadi.MX.sym', (['"""v_y"""'], {}), "('v_y')\n", (539, 546), False, 'import casadi\n'), ((555, 573), 'casadi.MX.sym', 'casadi.MX.sym', (['"""r"""'], {}), "('r')\n", (568, 573), False, 'import casadi\n'), ((586, 609), 'casadi.MX.sym', 'casadi.MX.sym', (['"""delta0"""'], {}), "('delta0')\n", (599, 609), False, 'import casadi\n'), ((621, 657), 'casadi.MX.sym', 'casadi.MX.sym', (['"""mu_x"""', 'c.n_wheels', '(1)'], {}), "('mu_x', c.n_wheels, 1)\n", (634, 657), False, 'import casadi\n'), ((669, 705), 'casadi.MX.sym', 'casadi.MX.sym', (['"""mu_y"""', 'c.n_wheels', '(1)'], {}), "('mu_y', c.n_wheels, 1)\n", (682, 705), False, 'import casadi\n'), ((715, 749), 'casadi.MX.sym', 'casadi.MX.sym', (['"""Fz"""', 'c.n_wheels', '(1)'], {}), "('Fz', c.n_wheels, 1)\n", (728, 749), False, 'import casadi\n'), ((758, 785), 'casadi.vertcat', 'casadi.vertcat', (['v_x', 'v_y', 'r'], {}), '(v_x, v_y, r)\n', (772, 785), False, 'import casadi\n'), ((794, 815), 'casadi.vertcat', 'casadi.vertcat', (['delta'], {}), '(delta)\n', (808, 815), False, 'import casadi\n'), ((824, 854), 'casadi.vertcat', 'casadi.vertcat', (['Fz', 'mu_x', 'mu_y'], {}), '(Fz, mu_x, mu_y)\n', (838, 854), False, 'import casadi\n'), ((1041, 1082), 'car_model.calc_wheel_centric_velocities', 'calc_wheel_centric_velocities', (['x', 'u', 'c', '(0)'], {}), '(x, u, c, 0)\n', (1070, 1082), False, 'from car_model import calc_wheel_centric_velocities, create_car_model, calc_sigma_xy, calc_wheel_centric_forces, calc_wheel_physics\n'), ((1106, 1135), 'car_model.calc_sigma_xy', 'calc_sigma_xy', (['vr', 'v_wx', 'v_wy'], {}), '(vr, v_wx, v_wy)\n', (1119, 1135), False, 'from car_model import calc_wheel_centric_velocities, create_car_model, calc_sigma_xy, calc_wheel_centric_forces, calc_wheel_physics\n'), ((1149, 1220), 'car_model.calc_wheel_centric_forces', 'calc_wheel_centric_forces', (['sigma_x', 'sigma_y', 'p', 'c.wheel_constants[0]', '(0)'], {}), '(sigma_x, sigma_y, p, c.wheel_constants[0], 0)\n', (1174, 1220), False, 'from car_model import calc_wheel_centric_velocities, create_car_model, calc_sigma_xy, calc_wheel_centric_forces, calc_wheel_physics\n'), ((1418, 1449), 'car_model.calc_wheel_physics', 'calc_wheel_physics', (['model', '(0)', 'c'], {}), '(model, 0, c)\n', (1436, 1449), False, 'from car_model import calc_wheel_centric_velocities, create_car_model, calc_sigma_xy, calc_wheel_centric_forces, calc_wheel_physics\n'), ((1467, 1557), 'casadi.Function', 'casadi.Function', (['"""model_func"""', '[model.x, model.u, model.p]', '[Fx_i, Fy_i, car_torque_i]'], {}), "('model_func', [model.x, model.u, model.p], [Fx_i, Fy_i,\n car_torque_i])\n", (1482, 1557), False, 'import casadi\n'), ((1569, 1590), 'numpy.linspace', 'np.linspace', (['(0.0)', '(3.0)'], {}), '(0.0, 3.0)\n', (1580, 1590), True, 'import numpy as np\n'), ((2319, 2365), 'matplotlib.pyplot.plot', 'plt.plot', (['vx_vals', 'torque_vals'], {'label': '"""torque"""'}), "(vx_vals, torque_vals, label='torque')\n", (2327, 2365), True, 'import matplotlib.pyplot as plt\n'), ((2370, 2408), 'matplotlib.pyplot.plot', 'plt.plot', (['vx_vals', 'Fx_vals'], {'label': '"""Fx"""'}), "(vx_vals, Fx_vals, label='Fx')\n", (2378, 2408), True, 'import matplotlib.pyplot as plt\n'), ((2577, 2589), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2587, 2589), True, 'import matplotlib.pyplot as plt\n'), ((2595, 2605), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2603, 2605), True, 'import matplotlib.pyplot as plt\n'), ((1769, 1799), 'acados_template.acados_ocp_formulation_helper.get_symbol_idx', 'get_symbol_idx', (['model.x', '"""v_x"""'], {}), "(model.x, 'v_x')\n", (1783, 1799), False, 'from acados_template.acados_ocp_formulation_helper import get_symbol_idx\n'), ((1887, 1919), 'acados_template.acados_ocp_formulation_helper.get_symbol_idx', 'get_symbol_idx', (['model.x', '"""omega"""'], {}), "(model.x, 'omega')\n", (1901, 1919), False, 'from acados_template.acados_ocp_formulation_helper import get_symbol_idx\n'), ((2060, 2089), 'acados_template.acados_ocp_formulation_helper.get_symbol_idx', 'get_symbol_idx', (['model.x', '"""dc"""'], {}), "(model.x, 'dc')\n", (2074, 2089), False, 'from acados_template.acados_ocp_formulation_helper import get_symbol_idx\n'), ((2110, 2143), 'acados_template.acados_ocp_formulation_helper.get_symbol_idx', 'get_symbol_idx', (['model.x', '"""delta0"""'], {}), "(model.x, 'delta0')\n", (2124, 2143), False, 'from acados_template.acados_ocp_formulation_helper import get_symbol_idx\n'), ((2165, 2198), 'acados_template.acados_ocp_formulation_helper.get_symbol_idx', 'get_symbol_idx', (['model.x', '"""delta1"""'], {}), "(model.x, 'delta1')\n", (2179, 2198), False, 'from acados_template.acados_ocp_formulation_helper import get_symbol_idx\n'), ((2413, 2422), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2420, 2422), True, 'import matplotlib.pyplot as plt\n'), ((2450, 2459), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2457, 2459), True, 'import matplotlib.pyplot as plt\n'), ((2499, 2508), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2506, 2508), True, 'import matplotlib.pyplot as plt\n'), ((2021, 2035), 'numpy.sign', 'np.sign', (['omega'], {}), '(omega)\n', (2028, 2035), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import math
import operator, collections
import numpy as np
import scipy.ndimage
import matplotlib.pyplot as plt
from features import extract_features
from save_features import feature_vector, dump, load, label_vec
data_dir = '../data/CXR_png_complete/'
# 0 - black, 255 - white
def profile_one_dim(im):
im = gray_level(im)
print(np.shape(im))
vertical_sum = np.sum(im, axis=0)/np.shape(im)[1]
fig = plt.figure(0)
fig.canvas.set_window_title('Projection Profile - ' + filename)
plt.plot(vertical_sum)
# plt.show()
P, X, Y = zone_division(im, vertical_sum)
density_symmetry, roughness_max, roughness_symmetry = extract_features(im, P, X, Y)
fv = feature_vector(density_symmetry, roughness_max, roughness_symmetry, filename)
all_vector.append(fv)
# print(all_vector)
def gray_level(im):
num_of_gray_levels = len(np.unique(im))
image_bit = math.log(num_of_gray_levels, 2)
'''
Initialise a gray_level_hist list with all zeros. Indices
denote the gray level, value at index denote the count.
'''
# VERY SLOW
# gray_level_hist = np.zeros(2**image_bit)
# for x in im:
# for y in x:
# gray_level_hist[y]+=1
# print(gray_level_hist)
unique, counts = np.unique(im, return_counts=True)
gray_level_hist_dict = dict(zip(unique, counts)) # keys denote gray_level, values denote gray level count
'''
background_value :- is the gray level at which the peak appears
close to the maximum value in the gray level histogram.
'''
background_value = max(gray_level_hist_dict.items(), key=operator.itemgetter(1))[0]
# print(background_value, gray_level_hist_dict[background_value])
normalized_im = np.divide(im, background_value)
return normalized_im
def zone_division(im, vertical_sum):
low = math.floor(0.25*len(vertical_sum))
high = math.floor(0.50*len(vertical_sum))
'''
mini = min(vertical_sum[low:high])
ind = list(vertical_sum).index(mini)
x_right = []
for x in im:
# print(x[ind])
x_right.append(255 - x[ind])
# print(x_right)
'''
'''
x_right = math.floor(len(vertical_sum)/4)
print(x_right, 'div')
vertical_profile_at_xright(im, x_right)
'''
# x_right = list(vertical_sum).index(max(vertical_sum[low:high]))
x_right = np.argmax(vertical_sum[low:high])
print(x_right, 'x_right')
vert_prof = vertical_profile_at_xright(im, x_right)
# For ytop
def ytop():
low = math.floor(0.05*len(vert_prof))
high = math.floor(0.50*len(vert_prof))
ytopv = min(vert_prof[low:high])
# ytopv = min(vert_prof[low:high])
# ytopi = vert_prof.index(ytopv)
ytopi = np.argmin(np.asarray(vert_prof[low:high])) + low
print(ytopi, 'y-top index')
fig = plt.figure(0)
fig.canvas.set_window_title('Vertical Profile at x_right - ' + filename)
plt.plot(vert_prof)
# plt.show()
return ytopi
# For ybottom
def ybottom():
low = math.floor(0.51*len(vert_prof))
high = math.floor(0.95*len(vert_prof))
vert_prof_derivative = np.zeros(len(vert_prof))
'''Calculate derivative using finite difference
f'(x) = f(x+h) - f(x)/h'''
h = 20
for i in range(0, len(vert_prof)-h):
vert_prof_derivative[i] = ((vert_prof[i+h] - vert_prof[i])/h)
ybottomv = min(vert_prof_derivative[low:high])
# ybottomi = list(vert_prof_derivative[low:high]).index(ybottomv) + low
ybottomi = np.argmin(np.asarray(vert_prof_derivative[low:high])) + low
print(ybottomi, 'y-bottom index')
fig = plt.figure(0)
fig.canvas.set_window_title('Vertical Profile Derivative at x_right - ' + filename)
plt.plot(vert_prof_derivative)
# plt.show()
return ybottomi
ytopi = ytop()
ybottomi = ybottom()
y1 = ytopi + math.floor(0.25*(ybottomi-ytopi))
y2 = ytopi + math.floor(0.5*(ybottomi-ytopi))
y3 = ytopi + math.floor(0.75*(ybottomi-ytopi))
# Y contains the indices at which the zones are divided.
Y = [ytopi, y1, y2, y3, ybottomi]
# print(Y)
'''Local zone based projection profile'''
Pz1, Pz2, Pz3, Pz4 = ([] for _ in range(4))
def chunks(start_row, end_row):
div_param = end_row - start_row + 1
return div_param
Pz1 = np.sum(im[ytopi:y1], axis=0)/chunks(ytopi, y1)
Pz2 = np.sum(im[y1:y2], axis=0)/chunks(y1, y2)
Pz3 = np.sum(im[y2:y3], axis=0)/chunks(y2, y3)
Pz4 = np.sum(im[y3:ybottomi], axis=0)/chunks(y3, ybottomi)
P = [Pz1, Pz2, Pz3, Pz4]
# print(P)
fig = plt.figure(0)
fig.canvas.set_window_title('Zone wise projection profile - ' + filename)
ax = plt.subplot(111)
# plt.plot(Pz1, 'r', Pz2, 'g', Pz3, 'b', Pz4, 'r--')
ax.plot(Pz1, 'r', label = 'Projection Profile for zone 1')
ax.plot(Pz2, 'g', label = 'Projection Profile for zone 2')
ax.plot(Pz3, 'b', label = 'Projection Profile for zone 3')
ax.plot(Pz4, 'r--', label = 'Projection Profile for zone 4')
ax.legend()
# plt.show()
X = points_vector(P, vertical_sum)
# print(X)
return P, X, Y
def points_vector(P, vertical_sum):
X = np.zeros((4, 5))
'''
X(4, 5) - 4 rows, one each for each local zone projection profile
X = [[xrrib xrlung xcenter xllung xlrib],
.
.
[xrrib xrlung xcenter xllung xlrib]]
'''
for i in range(0, len(P)):
# print(len(P[i]))
# x_center
low = math.floor(0.25*len(P[i]))
high = math.floor(0.75*len(P[i]))
xc = np.argmin(np.asarray(P[i][low:high])) + low
X[i][2] = xc
# xrlung
low = math.floor(0.125*len(P[i]))
high = xc
xrlung = np.argmax(np.asarray(P[i][low:high])) + low
X[i][1] = xrlung
# xllung
low = xc + 1
high = math.floor(0.875*len(P[i]))
xllung = np.argmax(np.asarray(P[i][low:high])) + low
X[i][3] = xllung
# xrrib
low = 0
high = xrlung
xrrib = np.argmin(np.asarray(P[i][low:high]))
X[i][0] = xrrib
# xlrib
low = xllung + 1
high = len(P[i]) - 1
xlrib = np.argmin(np.asarray(P[i][low:high])) + low
X[i][4] = xlrib
return X.astype(int)
def vertical_profile_at_xright(im, x_right):
vert_prof = []
for x in im:
vert_prof.append(x[x_right])
return vert_prof
if __name__ == '__main__':
global filename, all_vector
all_vector = []
count = 0
for image in os.listdir(data_dir):
filename = str(image)
print('Processing: {0}'.format(filename))
im = scipy.ndimage.imread(data_dir + image, flatten=True)
profile_one_dim(im)
print('Processed: {0}'.format(filename))
count += 1
print('Files processed: {0}'.format(count))
dump(all_vector, 'features_complete.pkl')
label_vec(all_vector) | [
"save_features.feature_vector",
"os.listdir",
"numpy.unique",
"math.floor",
"save_features.label_vec",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.argmax",
"math.log",
"matplotlib.pyplot.subplot",
"numpy.sum",
"matplotlib.pyplot.figure",
"numpy.zeros",
"operator.itemgetter",
"numpy... | [((478, 491), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (488, 491), True, 'import matplotlib.pyplot as plt\n'), ((564, 586), 'matplotlib.pyplot.plot', 'plt.plot', (['vertical_sum'], {}), '(vertical_sum)\n', (572, 586), True, 'import matplotlib.pyplot as plt\n'), ((709, 738), 'features.extract_features', 'extract_features', (['im', 'P', 'X', 'Y'], {}), '(im, P, X, Y)\n', (725, 738), False, 'from features import extract_features\n'), ((748, 825), 'save_features.feature_vector', 'feature_vector', (['density_symmetry', 'roughness_max', 'roughness_symmetry', 'filename'], {}), '(density_symmetry, roughness_max, roughness_symmetry, filename)\n', (762, 825), False, 'from save_features import feature_vector, dump, load, label_vec\n'), ((957, 988), 'math.log', 'math.log', (['num_of_gray_levels', '(2)'], {}), '(num_of_gray_levels, 2)\n', (965, 988), False, 'import math\n'), ((1314, 1347), 'numpy.unique', 'np.unique', (['im'], {'return_counts': '(True)'}), '(im, return_counts=True)\n', (1323, 1347), True, 'import numpy as np\n'), ((1783, 1814), 'numpy.divide', 'np.divide', (['im', 'background_value'], {}), '(im, background_value)\n', (1792, 1814), True, 'import numpy as np\n'), ((2399, 2432), 'numpy.argmax', 'np.argmax', (['vertical_sum[low:high]'], {}), '(vertical_sum[low:high])\n', (2408, 2432), True, 'import numpy as np\n'), ((4722, 4735), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (4732, 4735), True, 'import matplotlib.pyplot as plt\n'), ((4823, 4839), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (4834, 4839), True, 'import matplotlib.pyplot as plt\n'), ((5313, 5329), 'numpy.zeros', 'np.zeros', (['(4, 5)'], {}), '((4, 5))\n', (5321, 5329), True, 'import numpy as np\n'), ((6679, 6699), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (6689, 6699), False, 'import os\n'), ((6999, 7040), 'save_features.dump', 'dump', (['all_vector', '"""features_complete.pkl"""'], {}), "(all_vector, 'features_complete.pkl')\n", (7003, 7040), False, 'from save_features import feature_vector, dump, load, label_vec\n'), ((7045, 7066), 'save_features.label_vec', 'label_vec', (['all_vector'], {}), '(all_vector)\n', (7054, 7066), False, 'from save_features import feature_vector, dump, load, label_vec\n'), ((400, 412), 'numpy.shape', 'np.shape', (['im'], {}), '(im)\n', (408, 412), True, 'import numpy as np\n'), ((433, 451), 'numpy.sum', 'np.sum', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (439, 451), True, 'import numpy as np\n'), ((926, 939), 'numpy.unique', 'np.unique', (['im'], {}), '(im)\n', (935, 939), True, 'import numpy as np\n'), ((2885, 2898), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (2895, 2898), True, 'import matplotlib.pyplot as plt\n'), ((2988, 3007), 'matplotlib.pyplot.plot', 'plt.plot', (['vert_prof'], {}), '(vert_prof)\n', (2996, 3007), True, 'import matplotlib.pyplot as plt\n'), ((3738, 3751), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (3748, 3751), True, 'import matplotlib.pyplot as plt\n'), ((3852, 3882), 'matplotlib.pyplot.plot', 'plt.plot', (['vert_prof_derivative'], {}), '(vert_prof_derivative)\n', (3860, 3882), True, 'import matplotlib.pyplot as plt\n'), ((3991, 4028), 'math.floor', 'math.floor', (['(0.25 * (ybottomi - ytopi))'], {}), '(0.25 * (ybottomi - ytopi))\n', (4001, 4028), False, 'import math\n'), ((4042, 4078), 'math.floor', 'math.floor', (['(0.5 * (ybottomi - ytopi))'], {}), '(0.5 * (ybottomi - ytopi))\n', (4052, 4078), False, 'import math\n'), ((4092, 4129), 'math.floor', 'math.floor', (['(0.75 * (ybottomi - ytopi))'], {}), '(0.75 * (ybottomi - ytopi))\n', (4102, 4129), False, 'import math\n'), ((4455, 4483), 'numpy.sum', 'np.sum', (['im[ytopi:y1]'], {'axis': '(0)'}), '(im[ytopi:y1], axis=0)\n', (4461, 4483), True, 'import numpy as np\n'), ((4512, 4537), 'numpy.sum', 'np.sum', (['im[y1:y2]'], {'axis': '(0)'}), '(im[y1:y2], axis=0)\n', (4518, 4537), True, 'import numpy as np\n'), ((4563, 4588), 'numpy.sum', 'np.sum', (['im[y2:y3]'], {'axis': '(0)'}), '(im[y2:y3], axis=0)\n', (4569, 4588), True, 'import numpy as np\n'), ((4614, 4645), 'numpy.sum', 'np.sum', (['im[y3:ybottomi]'], {'axis': '(0)'}), '(im[y3:ybottomi], axis=0)\n', (4620, 4645), True, 'import numpy as np\n'), ((452, 464), 'numpy.shape', 'np.shape', (['im'], {}), '(im)\n', (460, 464), True, 'import numpy as np\n'), ((6193, 6219), 'numpy.asarray', 'np.asarray', (['P[i][low:high]'], {}), '(P[i][low:high])\n', (6203, 6219), True, 'import numpy as np\n'), ((1666, 1688), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (1685, 1688), False, 'import operator, collections\n'), ((2795, 2826), 'numpy.asarray', 'np.asarray', (['vert_prof[low:high]'], {}), '(vert_prof[low:high])\n', (2805, 2826), True, 'import numpy as np\n'), ((3631, 3673), 'numpy.asarray', 'np.asarray', (['vert_prof_derivative[low:high]'], {}), '(vert_prof_derivative[low:high])\n', (3641, 3673), True, 'import numpy as np\n'), ((5725, 5751), 'numpy.asarray', 'np.asarray', (['P[i][low:high]'], {}), '(P[i][low:high])\n', (5735, 5751), True, 'import numpy as np\n'), ((5885, 5911), 'numpy.asarray', 'np.asarray', (['P[i][low:high]'], {}), '(P[i][low:high])\n', (5895, 5911), True, 'import numpy as np\n'), ((6053, 6079), 'numpy.asarray', 'np.asarray', (['P[i][low:high]'], {}), '(P[i][low:high])\n', (6063, 6079), True, 'import numpy as np\n'), ((6342, 6368), 'numpy.asarray', 'np.asarray', (['P[i][low:high]'], {}), '(P[i][low:high])\n', (6352, 6368), True, 'import numpy as np\n')] |
import numpy as np
from . import torch_warp as t_warp
import torch
from torch.autograd import Variable
import scipy
# this file is to find the TVL1 energy of the optical flow vector
def compute_flow_gradient(flowvector,pixelposx,pixelposy,imgwidth,imgheight):
ux_grad = 0
uy_grad = 0
if pixelposx > 0 and pixelposx < (imgwidth-1):
ux_prev = flowvector[pixelposy,pixelposx-1][0]
ux_next = flowvector[pixelposy,pixelposx+1][0]
ux_grad = float(ux_next - ux_prev)/2.0
if pixelposy > 0 and pixelposy < (imgheight-1):
uy_prev = flowvector[pixelposy-1,pixelposx][1]
uy_next = flowvector[pixelposy+1,pixelposx][1]
uy_grad = float(uy_next - uy_prev)/2.0
return [ux_grad,uy_grad]
def compute_intensity_gradient(img_channel,pixel_x,pixel_y,img_width,img_height):
ix_grad = 0
iy_grad = 0
if pixel_x >= 0 and pixel_x < (img_width-1):
ix_next = img_channel[pixel_y][pixel_x+1]
ix_current = img_channel[pixel_y][pixel_x]
ix_grad = ix_next - ix_current
if pixel_y >= 0 and pixel_y < (img_height-1):
iy_next = img_channel[pixel_y+1][pixel_x]
iy_current = img_channel[pixel_y][pixel_x]
iy_grad = iy_next - iy_current
return (ix_grad , iy_grad)
def compute_flow_gradient_optimized(flowvector,pixelposx,pixelposy,imgwidth,imgheight):
ux_grad = 0.0
uy_grad = 0.0
if pixelposx > 0 and pixelposx < (imgwidth-1):
ux_grad = (flowvector[pixelposy,pixelposx+1][0] - flowvector[pixelposy,pixelposx-1][0])/2.0
if pixelposy > 0 and pixelposy < (imgheight-1):
uy_grad = (flowvector[pixelposy+1,pixelposx][1] - flowvector[pixelposy-1,pixelposx][1]) /2.0
return torch.Tensor([ux_grad,uy_grad])
# tvl1 energy of single image
def compute_tvl1_energy_optimized(img1,img2,flow):
img1 = img1.transpose(0,1).transpose(1,2)
img2 = img2.transpose(0,1).transpose(1,2)
flow = flow.transpose(0,1).transpose(1,2)
height, width, no_of_chans = img1.size()
wrapped_first_image = t_warp.warp_image_torch_optimized(img2,flow.data.clone())
grad_vec = torch.abs(wrapped_first_image - img1.data)
grad_vec = torch.norm(grad_vec,2,2)
imag_grad = grad_vec.sum()
ux_grad = (flow[:,2:,0]-flow[:,:width-2,0])/2.0
uy_grad = (flow[2:,:,1]-flow[:height-2,:,1])/2.0
ux_grad = ux_grad * ux_grad
uy_grad = uy_grad * uy_grad
sum = (ux_grad[1:-1, :] + uy_grad[:, 1:-1]).pow(0.5)
grad_loss = sum.sum() + ux_grad[0, :].sum() + ux_grad[height - 1, :].sum() + uy_grad[:, 0].sum() + uy_grad[:,
width - 1].sum()
energy = grad_loss+imag_grad
return energy
# This function is one which is used to compute the tvl1 energy associated with flow in batches
def compute_tvl1_energy_optimized_batch(img1,img2,flow,image_name='test',doTest=False,test_folder=''):
img1 = img1.transpose(1,2).transpose(2,3)
img2 = img2.transpose(1,2).transpose(2,3)
flow = flow.transpose(1,2).transpose(2,3)
batch,height, width, no_of_chans = img1.size()
# get the wrapped image
wrapped_first_image = t_warp.warp_image_torch_optimized_batch(img2,flow.data.clone())
# during inference phase write the wrapped and original image in a directory
if doTest==True:
scipy.misc.imsave(image_name +'_test_0.jpg', img1[0].data.numpy()+ np.array([0.411,0.432,0.45]))
scipy.misc.imsave(image_name +'_test_1.jpg', img2[0].data.numpy()+ np.array([0.411,0.432,0.45]))
scipy.misc.imsave(image_name +'_test_w.jpg', wrapped_first_image[0].numpy()+ np.array([0.411,0.432,0.45]))
# find the image intensity values between the wrapped and first image
grad_vec = torch.abs(wrapped_first_image - img1.data) * 255 * 0.15
# constant penalty of value '23' for the black pixels in all the channels
for i in range(batch):
for j in range(no_of_chans):
grad_vec[i,:,:,j][torch.eq(wrapped_first_image.sum(3),0)[i]] = 23
grad_vec = torch.norm(grad_vec,2,3)
# data fidelity term scalar for each batch sample
imag_grad = grad_vec.sum(2).sum(1)
# compute the flow smoothness
# compute the sum of ux_dx and ux_dy values from 1st to n-1 values for flow in x direction
ux_grad_x = (flow[:,:,1:,0] - flow[:,:,:width-1,0])
ux_grad_y = (flow[:,1:,:,0] - flow[:,:height-1,:,0])
ux_grad_x_mx = ux_grad_x * ux_grad_x
ux_grad_y_my = ux_grad_y * ux_grad_y
# compute the mod of ux_dx and ux_dy
# sum (1..n-1,1...n-1)
sum = (ux_grad_x_mx[:, :height - 1, :width - 1] + ux_grad_y_my[:, :height - 1, :width - 1]).pow(0.5)
sum_ux = sum.sum(2).sum(1) + ux_grad_x[:, height - 1, :].sum(1).float() + ux_grad_y[:, :, width - 1].sum(1).float()
# compute the sum of uy_dx and uy_dy values from 1st to n-1 values for flow in y direction
uy_grad_x = (flow[:,:,1:,1] - flow[:,:,:width-1,1])
uy_grad_y = (flow[:,1:,:,1] - flow[:,:height-1,:,1])
uy_grad_x_mx = uy_grad_x * uy_grad_x
uy_grad_y_my = uy_grad_y * uy_grad_y
sum = (uy_grad_x_mx[:, :height - 1, :width - 1] + uy_grad_y_my[:, :height - 1, :width - 1]).pow(0.5)
sum_uy = sum.sum(2).sum(1) + uy_grad_x[:, height - 1, :].sum(1).float() + uy_grad_y[:, :, width - 1].sum(1).float()
grad_loss = sum_ux + sum_uy
# total energy is sum of appearance constancy and flow smoothness
energy = grad_loss+Variable(imag_grad.cuda())
return energy
| [
"numpy.array",
"torch.norm",
"torch.abs",
"torch.Tensor"
] | [((1706, 1738), 'torch.Tensor', 'torch.Tensor', (['[ux_grad, uy_grad]'], {}), '([ux_grad, uy_grad])\n', (1718, 1738), False, 'import torch\n'), ((2104, 2146), 'torch.abs', 'torch.abs', (['(wrapped_first_image - img1.data)'], {}), '(wrapped_first_image - img1.data)\n', (2113, 2146), False, 'import torch\n'), ((2162, 2188), 'torch.norm', 'torch.norm', (['grad_vec', '(2)', '(2)'], {}), '(grad_vec, 2, 2)\n', (2172, 2188), False, 'import torch\n'), ((4047, 4073), 'torch.norm', 'torch.norm', (['grad_vec', '(2)', '(3)'], {}), '(grad_vec, 2, 3)\n', (4057, 4073), False, 'import torch\n'), ((3754, 3796), 'torch.abs', 'torch.abs', (['(wrapped_first_image - img1.data)'], {}), '(wrapped_first_image - img1.data)\n', (3763, 3796), False, 'import torch\n'), ((3413, 3443), 'numpy.array', 'np.array', (['[0.411, 0.432, 0.45]'], {}), '([0.411, 0.432, 0.45])\n', (3421, 3443), True, 'import numpy as np\n'), ((3518, 3548), 'numpy.array', 'np.array', (['[0.411, 0.432, 0.45]'], {}), '([0.411, 0.432, 0.45])\n', (3526, 3548), True, 'import numpy as np\n'), ((3633, 3663), 'numpy.array', 'np.array', (['[0.411, 0.432, 0.45]'], {}), '([0.411, 0.432, 0.45])\n', (3641, 3663), True, 'import numpy as np\n')] |
from abc import ABC, abstractmethod
from functools import partial
from typing import Union, Dict, Callable
from contextlib import suppress
from gzip import GzipFile
from pathlib import Path
import os
import json
import pickle
import numpy as np
from ..local import Storage
__all__ = (
'Serializer', 'SerializerError', 'ChainSerializer', 'DictSerializer',
'NumpySerializer', 'JsonSerializer', 'PickleSerializer',
)
class SerializerError(Exception):
pass
class Serializer(ABC):
@abstractmethod
def save(self, value, folder: Path):
""" Saves the ``value`` to ``folder`` """
@abstractmethod
def load(self, folder: Path, storage: Storage):
""" Loads the value from ``folder`` """
@staticmethod
def _load_file(storage: Storage, loader: Callable, path: Path, *args, **kwargs):
""" Useful function for loading files from storage """
with open(path, 'r') as key:
return storage.read(loader, key.read(), *args, **kwargs)
class ChainSerializer(Serializer):
def __init__(self, *serializers: Serializer):
self.serializers = serializers
def save(self, value, folder: Path):
for serializer in self.serializers:
with suppress(SerializerError):
return serializer.save(value, folder)
raise SerializerError(f'No serializer was able to save to {folder}.')
def load(self, folder: Path, storage: Storage):
for serializer in self.serializers:
with suppress(SerializerError):
return serializer.load(folder, storage)
raise SerializerError(f'No serializer was able to load from {folder}.')
class JsonSerializer(Serializer):
def save(self, value, folder: Path):
try:
value = json.dumps(value)
except TypeError as e:
raise SerializerError from e
with open(folder / 'value.json', 'w') as file:
file.write(value)
def load(self, folder: Path, storage: Storage):
paths = list(folder.iterdir())
if len(paths) != 1:
raise SerializerError
path, = paths
if path.name != 'value.json':
raise SerializerError
def loader(x):
with open(x, 'r') as file:
return json.load(file)
return self._load_file(storage, loader, folder / 'value.json')
class PickleSerializer(Serializer):
def save(self, value, folder):
try:
value = pickle.dumps(value)
except TypeError as e:
raise SerializerError from e
with open(folder / 'value.pkl', 'wb') as file:
file.write(value)
def load(self, folder: Path, storage: Storage):
paths = list(folder.iterdir())
if len(paths) != 1:
raise SerializerError
path, = paths
if path.name != 'value.pkl':
raise SerializerError
def loader(x):
with open(x, 'rb') as file:
return pickle.load(file)
return self._load_file(storage, loader, folder / 'value.pkl')
class NumpySerializer(Serializer):
def __init__(self, compression: Union[int, Dict[type, int]] = None):
self.compression = compression
def _choose_compression(self, value):
if isinstance(self.compression, int) or self.compression is None:
return self.compression
if isinstance(self.compression, dict):
for dtype in self.compression:
if np.issubdtype(value.dtype, dtype):
return self.compression[dtype]
def save(self, value, folder: Path):
value = np.asarray(value)
compression = self._choose_compression(value)
if compression is not None:
assert isinstance(compression, int)
with GzipFile(folder / 'value.npy.gz', 'wb', compresslevel=compression, mtime=0) as file:
np.save(file, value)
else:
np.save(folder / 'value.npy', value)
def load(self, folder: Path, storage: Storage):
paths = list(folder.iterdir())
if len(paths) != 1:
raise SerializerError
path, = paths
if path.name == 'value.npy':
loader = partial(np.load, allow_pickle=True)
elif path.name == 'value.npy.gz':
def loader(x):
with GzipFile(x, 'rb') as file:
return np.load(file, allow_pickle=True)
else:
raise SerializerError
return self._load_file(storage, loader, path)
class DictSerializer(Serializer):
def __init__(self, serializer: Serializer):
self.keys_filename = 'dict_keys.json'
self.serializer = serializer
def save(self, data: dict, folder: Path):
if not isinstance(data, dict):
raise SerializerError
# TODO: remove all if at least one iteration fails
keys_to_folder = {}
for sub_folder, (key, value) in enumerate(data.items()):
keys_to_folder[sub_folder] = key
os.makedirs(folder / str(sub_folder), exist_ok=True)
self.serializer.save(value, folder / str(sub_folder))
with open(folder / self.keys_filename, 'w+') as f:
json.dump(keys_to_folder, f)
def load(self, folder: Path, storage: Storage):
keys = folder / self.keys_filename
if not keys.exists():
raise SerializerError
def loader(x):
with open(x, 'r') as f:
return json.load(f)
keys_map = self._load_file(storage, loader, keys)
data = {}
for sub_folder, key in keys_map.items():
data[key] = self.serializer.load(folder / sub_folder, storage)
return data
| [
"pickle.dumps",
"json.dump",
"json.dumps",
"numpy.asarray",
"pickle.load",
"numpy.issubdtype",
"gzip.GzipFile",
"functools.partial",
"contextlib.suppress",
"json.load",
"numpy.load",
"numpy.save"
] | [((3641, 3658), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (3651, 3658), True, 'import numpy as np\n'), ((1778, 1795), 'json.dumps', 'json.dumps', (['value'], {}), '(value)\n', (1788, 1795), False, 'import json\n'), ((2483, 2502), 'pickle.dumps', 'pickle.dumps', (['value'], {}), '(value)\n', (2495, 2502), False, 'import pickle\n'), ((3963, 3999), 'numpy.save', 'np.save', (["(folder / 'value.npy')", 'value'], {}), "(folder / 'value.npy', value)\n", (3970, 3999), True, 'import numpy as np\n'), ((4235, 4270), 'functools.partial', 'partial', (['np.load'], {'allow_pickle': '(True)'}), '(np.load, allow_pickle=True)\n', (4242, 4270), False, 'from functools import partial\n'), ((5239, 5267), 'json.dump', 'json.dump', (['keys_to_folder', 'f'], {}), '(keys_to_folder, f)\n', (5248, 5267), False, 'import json\n'), ((1230, 1255), 'contextlib.suppress', 'suppress', (['SerializerError'], {}), '(SerializerError)\n', (1238, 1255), False, 'from contextlib import suppress\n'), ((1504, 1529), 'contextlib.suppress', 'suppress', (['SerializerError'], {}), '(SerializerError)\n', (1512, 1529), False, 'from contextlib import suppress\n'), ((2289, 2304), 'json.load', 'json.load', (['file'], {}), '(file)\n', (2298, 2304), False, 'import json\n'), ((2996, 3013), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (3007, 3013), False, 'import pickle\n'), ((3497, 3530), 'numpy.issubdtype', 'np.issubdtype', (['value.dtype', 'dtype'], {}), '(value.dtype, dtype)\n', (3510, 3530), True, 'import numpy as np\n'), ((3814, 3889), 'gzip.GzipFile', 'GzipFile', (["(folder / 'value.npy.gz')", '"""wb"""'], {'compresslevel': 'compression', 'mtime': '(0)'}), "(folder / 'value.npy.gz', 'wb', compresslevel=compression, mtime=0)\n", (3822, 3889), False, 'from gzip import GzipFile\n'), ((3915, 3935), 'numpy.save', 'np.save', (['file', 'value'], {}), '(file, value)\n', (3922, 3935), True, 'import numpy as np\n'), ((5511, 5523), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5520, 5523), False, 'import json\n'), ((4361, 4378), 'gzip.GzipFile', 'GzipFile', (['x', '"""rb"""'], {}), "(x, 'rb')\n", (4369, 4378), False, 'from gzip import GzipFile\n'), ((4415, 4447), 'numpy.load', 'np.load', (['file'], {'allow_pickle': '(True)'}), '(file, allow_pickle=True)\n', (4422, 4447), True, 'import numpy as np\n')] |
"""
MAPSCI: Multipole Approach of Predicting and Scaling Cross Interactions
Handles the primary functions
"""
import numpy as np
import scipy.optimize as spo
import logging
logger = logging.getLogger(__name__)
def calc_distance_array(bead_dict, tol=0.01, max_factor=2, lower_bound="rmin"):
r"""
Calculation of array for nondimensionalized distance array.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
bead_dict : dict
Dictionary of multipole parameters.
- sigma (float) Nondimensionalized size parameter, :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
tol : float, Optional, default=0.01
Ratio of absolute value of repulsive term over attractive term of the Mie potential to define minimum bound
max_factor : int, Optional, default=2
Factor to multiply minimum bound by to define maximum bound.
lower_bound : str, Optional, default='rmin'
Lower bound of distance array. Can be one of:
- rmin: the position of the potential well
- sigma: the size parameter
- tolerance: Uses 'tol' keyword to define the ratio between the attractive and repulsive terms of the Mie potential, note that if tol = 0.01 the lower bound will be ~2.5*sigma.
Returns
-------
r : numpy.ndarray
Array (or float) in [Å] or nondimensionalized, distance between two beads. :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
"""
if lower_bound == "rmin":
rm = mie_potential_minimum(bead_dict)
elif lower_bound == "sigma":
rm = bead_dict["sigma"]
elif lower_bound == "tolerance":
rm = bead_dict["sigma"] * (1 / tol)**(1 / (bead_dict["lambdar"] - bead_dict["lambdaa"]))
else:
raise ValueError("Method, {}, is not supported to calculating lower_bound of fitting/integration".format(lower_bound))
r_array = np.linspace(rm, max_factor * rm, num=10000)
return r_array
def mie_potential_minimum(bead_dict):
r"""
Calculate Mie potential minimum of potential well.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
bead_dict : dict
Dictionary of multipole parameters.
- sigma (float) Size parameter in [Å] or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
Returns
-------
rmin : float
Position of minimum of potential well
"""
return bead_dict["sigma"] * (bead_dict["lambdar"] / bead_dict["lambdaa"])**(1 / (bead_dict["lambdar"] - bead_dict["lambdaa"]))
def mie_combining_rules(bead1, bead2):
r"""
Calculate basic mixed parameters, where the energy parameter is calculated with the geometric mean
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
beadA : dict
Dictionary of multipole parameters for bead_A.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
beadB : dict
Dictionary of multipole parameters for bead_B.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
Returns
-------
beadAB : dict
Dictionary of multipole parameters for bead_B.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
"""
beadAB = {}
beadAB["sigma"] = (bead1["sigma"] + bead2["sigma"]) / 2
beadAB["lambdar"] = 3 + np.sqrt((bead1["lambdar"] - 3) * (bead2["lambdar"] - 3))
beadAB["lambdaa"] = 3 + np.sqrt((bead1["lambdaa"] - 3) * (bead2["lambdaa"] - 3))
beadAB["epsilon"] = np.sqrt(bead1["epsilon"] * bead2["epsilon"])
return beadAB
def calc_mie_attractive_potential(r, bead_dict, shape_factor_scale=False):
r"""
Calculation of attractive Mie potential.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
r : numpy.ndarray
Array (or float) in either [Å] or nondimensionalized distance between two beads. :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`, whatever is consistent with 'bead_dict'
bead_dict : dict
Dictionary of multipole parameters for bead_A.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- Sk (float) Shape factor
shape_factor_scale : bool, Optional, default=False
Scale energy parameter based on shape factor epsilon*Si*Sj
Returns
-------
potential : numpy.ndarray
Array of nondimensionalized potential between beads from Mie potential. Array is equal in length to "r". :math:`\phi'=\phi/(3k_{B}T)`
"""
if shape_factor_scale:
if "Sk" in bead_dict:
bead_dict["epsilon"] = bead_dict["epsilon"] * bead_dict["Sk"]**2
else:
raise ValueError("Shape factor was not provided in bead dictionary")
potential = -prefactor(bead_dict["lambdar"], bead_dict["lambdaa"]) * bead_dict["epsilon"] * (bead_dict["sigma"] /
r)**bead_dict["lambdaa"]
return potential
def calc_mie_repulsive_potential(r, bead_dict, shape_factor_scale=False):
r"""
Calculation of repulsive Mie potential.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
r : numpy.ndarray
Array (or float) in either [Å] or nondimensionalized distance between two beads. :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`, whatever is consistent with 'bead_dict'
bead_dict : dict
Dictionary of multipole parameters for bead_A.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- Sk (float) Shape factor
shape_factor_scale : bool, Optional, default=False
Scale energy parameter based on shape factor epsilon*Si*Sj
Returns
-------
potential : numpy.ndarray
Array of nondimensionalized potential between beads from Mie potential. Array is equal in length to "r". :math:`\phi'=\phi/(3k_{B}T)`
"""
if shape_factor_scale:
if "Sk" in bead_dict:
bead_dict["epsilon"] = bead_dict["epsilon"] * bead_dict["Sk"]**2
else:
raise ValueError("Shape factor was not provided in bead dictionary")
potential = prefactor(bead_dict["lambdar"], bead_dict["lambdaa"]) * bead_dict["epsilon"] * (bead_dict["sigma"] /
r)**bead_dict["lambdar"]
return potential
def prefactor(lamr, lama):
""" Calculation prefactor for Mie potential: :math:`C_{Mie}=\lambda_r/(\lambda_r-\lambda_a) (\lambda_r/\lambda_a)^{\lambda_a/(\lambda_r-\lambda_a)}`
"""
return lamr / (lamr - lama) * (lamr / lama)**(lama / (lamr - lama))
def calc_lambdaij_from_epsilonij(epsij, bead1, bead2):
r"""
Calculates cross-interaction exponents from cross interaction energy parameter
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
epsilonij : float
Fit energy parameter in [K] or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
beadA : dict
Dictionary of multipole parameters for bead_A.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- Sk (float) Shape factor
beadB : dict
Dictionary of multipole parameters for bead_B.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- Sk (float) Shape factor
Returns
-------
lambdar_new : float
Repulsive exponent
lambdaa_new : float
Attractive exponent
"""
sigmaij = np.mean([bead1["sigma"], bead2["sigma"]])
tmp = epsij * sigmaij**3 / np.sqrt(bead1["sigma"]**3 * bead2["sigma"]**3) / np.sqrt(
bead1["epsilon"] * bead2["epsilon"])
lamr_ij = 3 + tmp * np.sqrt((bead1["lambdar"] - 3) * (bead2["lambdar"] - 3))
lama_ij = 3 + tmp * np.sqrt((bead1["lambdaa"] - 3) * (bead2["lambdaa"] - 3))
return lamr_ij, lama_ij
def calc_epsilonij_from_lambda_aij(lambda_a, bead1, bead2):
r"""
Calculate cross-interaction energy parameter from self-interaction parameters and cross-interaction attractive exponent using from scaling with vdW attraction parameter
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
lambda_aij : float
Mixed attractive exponent from multipole combining rules
beadA : dict
Dictionary of multipole parameters for bead_A.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- Sk (float) Shape factor
beadB : dict
Dictionary of multipole parameters for bead_B.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- Sk (float) Shape factor
Returns
-------
epsilon_ij : float
Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
"""
tmp_sigma = np.sqrt(bead1["sigma"]**3 * bead2["sigma"]**3) / np.mean([bead1["sigma"], bead2["sigma"]])**3
tmp_lambda = (lambda_a - 3) / np.sqrt((bead1["lambdaa"] - 3) * (bead2["lambdaa"] - 3))
epsilon_ij = np.sqrt(bead1["epsilon"] * bead2["epsilon"]) * tmp_sigma * tmp_lambda
return epsilon_ij
def calc_lambdarij_from_lambda_aij(lambda_a, alpha_mie):
r"""
Calculate cross-interaction repulsive exponent from cross interaction attractive exponent and Mie 'vdW like' interaction parameter.
Parameters
----------
lambda_aij : float
Mixed attractive exponent from multipole combining rules
alpha_mie : float
This nondimensionalized attractive parameter for the Mie potential is related not only to the Mie exponents but also to the triple and critical temperatures of a substance.
Returns
-------
lambdar_new : float
Repulsive exponent
"""
lambda_r = spo.brentq(lambda x: alpha_mie - prefactor(x, lambda_a) * (1 / (lambda_a - 3) - 1 / (x - 3)),
lambda_a * 1.01,
1e+4,
xtol=1e-12)
return lambda_r
def calc_self_multipole_potential(r, polarizability, bead_dict, temperature=None, nondimensional=False):
r"""
Calculation of self-interaction potential using extended multipole expression, either with or without dimensions
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
r : numpy.ndarray
Array (or float) of nondimensionalized distance between two beads. :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
polarizability : float
Polarizability of bead in [:math:`Å^3`] or nondimensionalized with :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
bead_dict : dict
Dictionary of multipole parameters. Those parameters may be:
- charge (float) Charge of bead in [e], or nondimensionalized as :math:`q'=q/e`
- dipole (float) Dipole of bead in [Debye], or nondimensionalized as :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Quadrupole of bead in [Debye*Å], or nondimensionalized as :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Ionization_energy of bead in [kcal/mol], or nondimensionalized as :math:`I'=I/(3k_{B}T)`
- polarizability (float) Nondimensionalize polarizability of bead in [:math:`Å^3`]. :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
temperature : float, Optional, default=None
Temperature in [K] for adding and removing dimensions, if the parameters are nondimensionalized, this value isn't used.
nondimensional : bool, Optional, default=False
Indicates whether the given bead library has been nondimensionalized by :func:`~mapsci.multipole_mie_combining_rules.dict_dimensions`
Returns
-------
potential : numpy.ndarray
Multipole potential between beads based on multipole moments that is in [kcal/mol], or nondimensionalized as :math:`\phi'=\phi/(3k_{B}T)` Array is equal in length to "r".
"""
bead_dict["polarizability"] = polarizability
if not nondimensional:
if temperature == None:
logger.error("Temperature should be included when 'nondimensional' is False")
bead_dict_new = dict_dimensions(bead_dict.copy(), temperature, dimensions=False)
r = float_dimensions(r,"sigma",temperature,dimensions=False)
else:
bead_dict_new = bead_dict.copy()
t11 = -bead_dict_new["charge"]**2 * bead_dict_new["dipole"]**2
t12 = -bead_dict_new["charge"]**2
t21 = -3 * bead_dict_new["ionization_energy"] / 4
t22 = -2 * bead_dict_new["dipole"]**2
t23 = -bead_dict_new["dipole"]**4 - 3 * bead_dict_new["quadrupole"]**2 * bead_dict_new["charge"]**2 / 5
t31 = -3 * bead_dict_new["dipole"]**2 * bead_dict_new["quadrupole"]**2
t32 = -3 * bead_dict_new["quadrupole"]**2
t41 = -21 / 5 * bead_dict_new["quadrupole"]**4
potential = (t11 + bead_dict_new["polarizability"]*t12)/r**4 \
+ (t21*bead_dict_new["polarizability"]**2 + t22*bead_dict_new["polarizability"] + t23)/r**6 \
+ (t31 + bead_dict_new["polarizability"]*t32)/r**8 \
+ t41/r**10
if not nondimensional:
potential = float_dimensions(potential,"ionization_energy",temperature,dimensions=True)
return potential
def calc_polarizability(bead_library, temperature=None, distance_opts={}, calculation_method="fit", polarizability_opts={}, nondimensional=False, shape_factor_scale=False):
r"""
Calculation of polarizability for beads in the provided library that do not have one calculated. The multipole moments and Mie parameters must be provided for this purpose.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
bead_library : dict
Dictionary of beads and their dictionaries of multipole parameters. Those parameters may be:
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- charge (float) Charge of bead in [e], or nondimensionalized as :math:`q'=q/e`
- dipole (float) Dipole of bead in [Debye], or nondimensionalized as :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Quadrupole of bead in [Debye*Å], or nondimensionalized as :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Ionization_energy of bead in [kcal/mol], or nondimensionalized as :math:`I'=I/(3k_{B}T)`
temperature : float, Optional, default=None
Temperature in [K] for adding and removing dimensions, if the parameters are nondimensionalized, this value isn't used.
distance_opts : dict, Optional, default={}
Dictionary of keyword arguments for :func:`~mapsci.multipole_mie_combining_rules.calc_distance_array`
calculation_method : str, Optional, default="fit"
Method of calculating the polarizability, either 'fit' or 'analytical'
polarizability_opts : dict, Optional, default={}
Dictionary of keyword arguments for :func:`~mapsci.multipole_mie_combining_rules.fit_polarizability` or :func:`~mapsci.multipole_mie_combining_rules.solve_polarizability_integral`
nondimensional : bool, Optional, default=False
Indicates whether the given bead library has been nondimensionalized by :func:`~mapsci.multipole_mie_combining_rules.dict_dimensions`
shape_factor_scale : bool, Optional, default=False
Scale energy parameter based on shape factor epsilon*Si*Sj
Returns
-------
bead_library : dict
Dictionary of beads and their dictionaries of multipole parameters. The following parameter is added to the original dictionary:
- polarizability (float) Polarizability of bead in [:math:`Å^3`] or nondimensionalized as :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
"""
if not nondimensional:
if temperature == None:
logger.error("Temperature should be included when 'nondimensional' is False")
bead_library_new = dict_dimensions(bead_library.copy(), temperature, dimensions=False)
else:
bead_library_new = bead_library.copy()
tmp = [False if type(value)==dict else True for _,value in bead_library_new.items()]
if np.all(tmp):
bead_library_new = {"tmp": bead_library_new}
flag = True
elif np.any(tmp):
raise ValueError("Dictionary should be either a single beads parameters, or a dictionary of dictionaries containing the parameters of several beads.")
else:
flag = False
for i, bead in enumerate(bead_library_new.keys()):
if "polarizability" in bead_library_new[bead]:
logger.info("Using given polarizability value for bead, {}".format(bead))
r = calc_distance_array(bead_library_new[bead], **distance_opts)
if calculation_method == "fit":
pol_tmp, var = fit_polarizability(r, bead_library_new[bead], **polarizability_opts, nondimensional=True)
elif calculation_method == "analytical":
pol_tmp = solve_polarizability_integral(r[0], bead_library_new[bead], **polarizability_opts, nondimensional=True)
else:
raise ValueError("Given, {}, is not a valid calculation method, choose 'fit' or 'analytical'".format(calculation_method))
if np.isnan(pol_tmp):
raise ValueError("Error: Bead {} cannot fit suitable polarizability. Attractive exponent is most likely not suitable given the bead partial charges.")
bead_library_new[bead]["polarizability"] = pol_tmp
if not nondimensional:
bead_library_new = dict_dimensions(bead_library_new, temperature, dimensions=True)
if flag:
bead_library_new = bead_library_new["tmp"]
return bead_library_new
def fit_polarizability(r, bead_dict, temperature=None, nondimensional=False, tol=0.05, shape_factor_scale=False, plot_fit=False):
r"""
Calculation of polarizability by fitting the sum of multipole potentials to attractive term of Mie potential.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
r : numpy.ndarray
Array (or float) of distance between two beads. Reported in [Å] or nondimensionalized as :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
bead_dict : dict
Dictionary of multipole parameters.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- charge (float) Charge of bead in [e], or nondimensionalized as :math:`q'=q/e`
- dipole (float) Dipole of bead in [Debye], or nondimensionalized as :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Quadrupole of bead in [Debye*Å], or nondimensionalized as :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Ionization_energy of bead in [kcal/mol], or nondimensionalized as :math:`I'=I/(3k_{B}T)`
temperature : float, Optional, default=None
Temperature in [K] for adding and removing dimensions, if the parameters are nondimensionalized, this value isn't used.
nondimensional : bool, Optional, default=False
Indicates whether the given bead library has been nondimensionalized by :func:`~mapsci.multipole_mie_combining_rules.dict_dimensions`
tol : float, Optional, default=0.05
Ratio of variance of polarizability over polarizability, taken from curve-fit
shape_factor_scale : bool, Optional, default=False
Scale energy parameter based on shape factor epsilon*Si*Sj
plot_fit : bool, Optional, default=False
Plot Mie potential and Multipole potential for comparison.
Returns
-------
Polarizability : float
Polarizability of bead in [:math:`Å^3`] or nondimensionalized with :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
standard_dev : float
One standard deviation error for polarizability
"""
if not nondimensional:
if temperature == None:
logger.error("Temperature should be included when 'nondimensional' is False")
bead_dict_new = dict_dimensions(bead_dict.copy(), temperature, dimensions=False)
else:
bead_dict_new = bead_dict.copy()
w_mie = calc_mie_attractive_potential(r, bead_dict_new, shape_factor_scale=shape_factor_scale)
p0 = [1.e-6]
pol_tmp, var_matrix = spo.curve_fit(
lambda x, a: calc_self_multipole_potential(x,
a,
bead_dict_new,
nondimensional=True,
),
r,
w_mie,
p0=p0,
bounds=(0.0, np.inf))
if np.diag(var_matrix) / pol_tmp > tol:
_ = test_polarizability(pol_tmp, bead_dict_new, r, plot_fit=plot_fit, shape_factor_scale=shape_factor_scale)
polarizability = pol_tmp[0]
pol_variance = var_matrix[0][0]
if not nondimensional:
polarizability = float_dimensions(polarizability,"polarizability",temperature,dimensions=True)
pol_variance = float_dimensions(pol_variance,"polarizability",temperature,dimensions=True)
return polarizability, pol_variance
def test_polarizability(polarizability, bead_dict, r, plot_fit=False, shape_factor_scale=False):
r"""
If polarizability doesn't provide a good fit between multipole potential and Mie potential, use estimated polarizability to suggest a different attractive exponent and energy parameter.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
polarizability : float
Nondimensionalized polarizability of bead. :math:`\alpha'=\alpha (4 \pi \varepsilon_{0})^{2} 3k_{B}T e^{-6}`
bead_dict : dict
Dictionary of multipole parameters.
- epsilon (float) Nondimensionalized energy parameter, :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Nondimensionalized size parameter, :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- charge (float) Nondimensionalized charge of bead. :math:`q'=q/e`
- dipole (float) Nondimensionalized dipole of bead. :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Nondimensionalized quadrupole of bead. :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Nondimensionalized ionization_energy of bead. :math:`I'=I/(3k_{B}T)`
r : numpy.ndarray
Array (or float) of nondimensionalized distance between two beads. Nondimensionalized as :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
plot_fit : bool, Optional, default=False
Plot Mie potential and Multipole potential for comparison.
Returns
-------
epsilon_fit : float
Energy parameter with curve fit against multipole potential using fit polarizability
"""
bead_dict_new = bead_dict.copy()
bead_dict_new["polarizability"] = polarizability
output = fit_multipole_cross_interaction_parameter(bead_dict_new,
bead_dict_new,
distance_array=r,
nondimensional=True,
shape_factor_scale=shape_factor_scale)
logger.warning(
"Refitting attractive exponent with estimated polarizability of {} yields: lamba_a {}, epsilon {}".format(
bead_dict_new["polarizability"], output["lambdaa"], output["epsilon"]))
if plot_fit:
from mapsci.quick_plots import plot_potential, plot_multipole_potential
w_mie = calc_mie_attractive_potential(r, bead_dict_new, shape_factor_scale=shape_factor_scale)
plot_opts = {"label":"Mie", "color": "k", "linestyle": "--"}
plot_potential(r, w_mie, plot_opts=plot_opts, show=False)
bead_dict_plot = bead_dict_new.copy()
bead_dict_plot.update({"epsilon": output["epsilon"], "lambdaa": output["lambdaa"]})
w_mie_fit = calc_mie_attractive_potential(r, bead_dict_plot, shape_factor_scale=shape_factor_scale)
plot_opts = {"label":"Mie fit", "color": "r", "linestyle": "--"}
plot_potential(r, w_mie_fit, plot_opts=plot_opts, show=False)
multipole_terms = calc_cross_multipole_terms(bead_dict_new, bead_dict_new, nondimensional=True)
tmp = ["charge-dipole", "charge-induced_dipole", "induced_dipole-induced_dipole", "dipole-dipole", "dipole-induced_dipole", "charge-quadrupole", "dipole-quadrupole", "induced_dipole-quadrupole", "quadrupole-quadrupole"]
logger.debug(("{}: {{{}}}\n"*len(tmp)).format(*[val for val in tmp for _ in range(2)]).format(**dict(zip(tmp,A))))
potential, potential_terms = calc_cross_multipole_potential(r, multipole_terms, total_only=False, nondimensional=True)
plot_multipole_potential(r, potential, potential_terms=potential_terms)
return output["epsilon"]
def solve_polarizability_integral(sigma0, bead_dict0, shape_factor_scale=False, temperature=None, nondimensional=False):
r"""
Calculation of polarizability from multipole moments using explicit integral method.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
sigma0 : float
This lower bound of the integral dictates where we expect to start matching the multipole attractive term with that of Mie potential. Can be reported in [Å] or nondimensionalized as :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
bead_dict : dict
Dictionary of multipole parameters for bead_A.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- Sk (float) Shape factor
shape_factor_scale : bool, Optional, default=False
Scale energy parameter based on shape factor epsilon*Si*Sj
temperature : float, Optional, default=None
Temperature in [K] for adding and removing dimensions, if the parameters are nondimensionalized, this value isn't used.
nondimensional : bool, Optional, default=False
Indicates whether the given bead library has been nondimensionalized by :func:`~mapsci.multipole_mie_combining_rules.dict_dimensions`
Returns
-------
polarizability : float
Polarizability calculated from Mie and multipole potentials, integrated from sigma0 to infinity. Polarizability of bead in [:math:`Å^3`] or nondimensionalized with :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
"""
if not nondimensional:
if temperature == None:
logger.error("Temperature should be included when 'nondimensional' is False")
bead_dict = dict_dimensions(bead_dict0.copy(), temperature, dimensions=False)
sigma0 = float_dimensions(sigma0,"sigma",temperature,dimensions=False)
else:
bead_dict = bead_dict0.copy()
Cmie_int = mie_integral(bead_dict, sigma0=sigma0, shape_factor_scale=shape_factor_scale)
tmp1 = _obj_polarizability_from_integral(np.finfo("float").eps, bead_dict, Cmie_int, sigma0)
tmp2 = _obj_polarizability_from_integral(1, bead_dict, Cmie_int, sigma0)
if tmp1 * tmp2 < 0:
polarizability = spo.brentq(_obj_polarizability_from_integral,
np.finfo("float").eps,
1,
args=(bead_dict, Cmie_int, sigma0),
xtol=1e-12)
else:
polarizability = np.nan
if not nondimensional and not np.isnan(polarizability):
polarizability = float_dimensions(polarizability,"polarizability",temperature,dimensions=True)
return polarizability
def calc_cross_multipole_terms(bead1, bead2, temperature=None, nondimensional=False):
r"""
Calculation of terms for nondimensionalized cross-interaction potential from multipole moments.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
bead1 : dict
Dictionary of multipole parameters for bead_A.
- charge (float) Charge of bead in [e], or nondimensionalized as :math:`q'=q/e`
- dipole (float) Dipole of bead in [Debye], or nondimensionalized as :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Quadrupole of bead in [Debye*Å], or nondimensionalized as :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Ionization_energy of bead in [kcal/mol], or nondimensionalized as :math:`I'=I/(3k_{B}T)`
- polarizability (float) Polarizability of bead in [:math:`Å^3`] or nondimensionalized with :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
bead2 : dict
Dictionary of multipole parameters for bead_B.
- charge (float) Charge of bead in [e], or nondimensionalized as :math:`q'=q/e`
- dipole (float) Dipole of bead in [Debye], or nondimensionalized as :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Quadrupole of bead in [Debye*Å], or nondimensionalized as :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Ionization_energy of bead in [kcal/mol], or nondimensionalized as :math:`I'=I/(3k_{B}T)`
- polarizability (float) Polarizability of bead in [:math:`Å^3`] or nondimensionalized with :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
temperature : float, Optional, default=None
Temperature in [K] for adding and removing dimensions, if the parameters are nondimensionalized, this value isn't used.
nondimensional : bool, Optional, default=False
Indicates whether the given bead library has been nondimensionalized by :func:`~mapsci.multipole_mie_combining_rules.dict_dimensions`
Returns
-------
multipole_terms : numpy.ndarray
This list of nine terms terms corresponds to the coefficients the various multipole interactions: charge-dipole, charge-induced_dipole, induced_dipole-induced_dipole, dipole-dipole, dipole-induced_dipole, charge-quadrupole, dipole-quadrupole, induced_dipole-quadrupole, quadrupole-quadrupole
Note: This are ALWAYS nondimensionalized
"""
if not nondimensional:
if temperature == None:
logger.error("Temperature should be included when 'nondimensional' is False")
beadA = dict_dimensions(bead1.copy(), temperature, dimensions=False)
beadB = dict_dimensions(bead2.copy(), temperature, dimensions=False)
else:
beadA = bead1.copy()
beadB = bead2.copy()
t11 = (beadA['charge']**2. * beadB['dipole']**2 + beadB['charge']**2. * beadA['dipole']**2.) / 2.0
t12 = (beadA['charge']**2. * beadB['polarizability'] + beadB['charge']**2. * beadA['polarizability']) / 2.0
I = beadA['ionization_energy'] * beadB['ionization_energy'] / (beadA['ionization_energy'] +
beadB['ionization_energy'])
t21 = 3. * I * beadA['polarizability'] * beadB['polarizability'] / 2.
t22 = beadA['dipole']**2. * beadB['dipole']**2.
t23 = beadA['polarizability'] * beadB['dipole']**2. + beadB['polarizability'] * beadA['dipole']**2.
t24 = 3. * (beadA['quadrupole']**2. * beadB['charge']**2. + beadB['quadrupole']**2. * beadA['charge']**2.) / 10.
t31 = 3. / 2. * (beadA['dipole']**2. * beadB['quadrupole']**2. + beadB['dipole']**2. * beadA['quadrupole']**2.)
t32 = 3. / 2. * (beadA['quadrupole']**2. * beadB['polarizability'] +
beadB['quadrupole']**2. * beadA['polarizability'])
t41 = 21. / 5. * beadA['quadrupole']**2. * beadB['quadrupole']**2.
multipole_terms = np.array([t11, t12, t21, t22, t23, t24, t31, t32, t41], dtype=object)
return multipole_terms
def condense_multipole_terms(multipole_terms):
r"""
The various multipole interactions take place at various orders of distances, ranging from r^(-4) to r^(-10) by orders of 2. This function will take the output of ``calc_cross_multipole_terms`` and combine the appropriate terms to produce 4 coefficients, one for each order of r.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
multipole_terms : numpy.ndarray
This list of nine terms terms corresponds to the coefficients the various multipole interactions: charge-dipole, charge-induced_dipole, induced_dipole-induced_dipole, dipole-dipole, dipole-induced_dipole, charge-quadrupole, dipole-quadrupole, induced_dipole-quadrupole, quadrupole-quadrupole
Returns
-------
new_multipole_terms : numpy.ndarray
This list of terms corresponds to the coefficients for r to the order of -4, -6, -8, and -10, respectively.
"""
new_multipole_terms = np.zeros(4)
new_multipole_terms[0] = np.sum(multipole_terms[:1])
new_multipole_terms[1] = np.sum(multipole_terms[2:6])
new_multipole_terms[2] = np.sum(multipole_terms[6:8])
new_multipole_terms[3] = np.sum(multipole_terms[8])
return new_multipole_terms
def calc_cross_multipole_potential(r, multipole_terms, nondimensional=False, temperature=None, total_only=True):
r"""
Calculation of nondimensionalized cross-interaction potential from multipole moments.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
r : numpy.ndarray
Array (or float) of nondimensionalized distance between two beads. :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
multipole_terms : numpy.ndarray
This can be either a list of terms corresponds to the coefficients for r to the order of -4, -6, -8, and -10, or a list of nine terms terms corresponding to the coefficients the various multipole interactions.
temperature : float, Optional, default=None
Temperature in [K] for adding and removing dimensions, if the parameters are nondimensionalized, this value isn't used.
nondimensional : bool, Optional, default=False
Indicates whether the given bead library has been nondimensionalized by :func:`~mapsci.multipole_mie_combining_rules.dict_dimensions`
total_only : bool, Optional, default=True
If true, only the overall potential is returned. This is useful for parameter fitting. If False, the potential for each term is returned in a numpy array.
Returns
-------
potential : numpy.ndarray
Array of nondimensionalized potential between beads based on multipole moments. Array is equal in length to "r". :math:`\phi'=\phi/(3k_{B}T)` or in kcal/mol
potential_terms : numpy.ndarray, Optional
2D array of terms involved in multipole moment. Could be 4 terms relating to orders of r from -4 to -10 by steps of 2, or could be the individual contributions. Either dimensionalized or in kcal/mol
Only provided if ``total_only`` is False
"""
if np.size(multipole_terms) == 4:
potential_terms = np.array([
-multipole_terms[0] / r**4., -multipole_terms[1] / r**6., -multipole_terms[2] / r**8.,
-multipole_terms[3] / r**10.
])
elif np.size(multipole_terms) == 9:
potential_terms = np.array([
-multipole_terms[0] / r**4., -multipole_terms[1] / r**4., -multipole_terms[2] / r**6.,
-multipole_terms[3] / r**6., -multipole_terms[4] / r**6., -multipole_terms[5] / r**6.,
-multipole_terms[6] / r**8., -multipole_terms[7] / r**8., -multipole_terms[8] / r**10.
])
else:
raise ValueError(
"Multipole terms input should be either of length 4 or length 9 for the supported interaction types.")
potential = np.sum(potential_terms, axis=0)
if not nondimensional:
potential = float_dimensions(potential, "ionization_energy", temperature)
potential_terms = float_dimensions(potential_terms, "ionization_energy", temperature)
if total_only:
return potential
else:
return potential, potential_terms
def _obj_polarizability_from_integral(polarizability, bead_dict, Cintegral, sigma0):
r"""
Objective function used to determine the polarizability from multipole and Mie integrals from some minimum to infinity
Parameters
----------
polarizability : float
Guess in nondimensionalized polarizability with :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
bead_dict : dict
Dictionary of multipole parameters for bead_A.
- charge (float) Charge nondimensionalized as :math:`q'=q/e`
- dipole (float) Dipole nondimensionalized as :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Quadrupole nondimensionalized as :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Ionization_energy nondimensionalized as :math:`I'=I/(3k_{B}T)`
Cintegral : float
The Mie integral is set equal to the sum of the multipole potential contributions to determine the polarizability.
sigma0 : float
Lower bound of the integral, reported in nondimensionalized as :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
Returns
-------
obj_value : float
Difference between multipole term and Mie potential term integral
"""
dict_tmp = bead_dict.copy()
dict_tmp["polarizability"] = polarizability
Cmultipole, _ = multipole_integral(dict_tmp, dict_tmp, sigma0=sigma0, nondimensional=True)
return Cmultipole - Cintegral
def partial_polarizability(bead_dict0, temperature=None, sigma0=None, lower_bound="rmin", nondimensional=False):
r"""
Calculate partial derivative with respect to multipole moments. This is useful in estimating the error.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
bead_dict : dict
Dictionary of multipole parameters for bead_A.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- charge (float) Charge of bead in [e], or nondimensionalized as :math:`q'=q/e`
- dipole (float) Dipole of bead in [Debye], or nondimensionalized as :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Quadrupole of bead in [Debye*Å], or nondimensionalized as :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Ionization_energy of bead in [kcal/mol], or nondimensionalized as :math:`I'=I/(3k_{B}T)`
- polarizability (float) Polarizability of bead in [:math:`Å^3`] or nondimensionalized with :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
temperature : float, Optional, default=298
Temperature in [K] for adding and removing dimensions, if the parameters are nondimensionalized, this value isn't used.
sigma0 : float, Optional, default=None
This lower bound of the integral dictates where the lower bound of the definite integral is. Can be reported in [Å] or nondimensionalized as :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
lower_bound : str, Optional, default='rmin'
Lower bound of distance array. Used only when sigma0 is None. Can be one of:
- rmin: the position of the potential well
- sigma: the size parameter
nondimensional : bool, Optional, default=False
Indicates whether the given bead library has been nondimensionalized by :func:`~mapsci.multipole_mie_combining_rules.dict_dimensions`
Returns
-------
partial_dict : dict
Partial derivative with respect to multipole moments
"""
if not nondimensional:
if temperature is None:
temperature = 298
logger.info("Using default temperature of 298 K")
bead_dict = dict_dimensions(bead_dict0.copy(), temperature, dimensions=False)
else:
bead_dict = bead_dict0.copy()
if sigma0 is None:
if lower_bound == "rmin":
rm = mie_potential_minimum(bead_dict)
elif lower_bound == "sigma":
rm = bead_dict["sigma"]
else:
rm = float_dimensions(sigma0,"sigma",temperature,dimensions=False)
a = -2 / bead_dict['ionization_energy'] * (bead_dict['charge']**2. * rm**2 + 2 * bead_dict['dipole']**2 / 3 +
3 * bead_dict['quadrupole']**2.0 * rm**2)
b = 4 / bead_dict['ionization_energy']**2 * (
bead_dict['charge']**4. * rm**4 + 4 * bead_dict['charge']**2. * bead_dict['dipole']**2 * rm**2 / 3 +
6 * bead_dict['quadrupole']**2. * bead_dict['charge']**2. / 5 + 4 / 9 * bead_dict['dipole']**4 + 4 / 5 *
bead_dict['dipole']**2 * bead_dict['quadrupole']**2.0 / rm**2 + 9 / 25 * bead_dict['quadrupole']**4.0 / rm**4)
c = 4 / bead_dict['ionization_energy'] * (
bead_dict['charge']**2. * bead_dict['dipole']**2 * rm**2 + bead_dict['dipole']**4 / 3 +
bead_dict['quadrupole']**2. * bead_dict['charge']**2. / 5 +
3 / 5 * bead_dict['quadrupole']**2. * bead_dict['dipole']**2. / rm**2 +
3 / 5 * bead_dict['quadrupole']**4.0 / rm**4 - prefactor(bead_dict['lambdar'], bead_dict['lambdaa']) /
(bead_dict['lambdaa'] - 3) * bead_dict['epsilon'] * bead_dict['sigma']**bead_dict['lambdaa'] / rm**
(bead_dict['lambdaa'] - 6))
partial_dict = {}
for key in bead_dict0:
if key == "ionization_energy":
partial_dict[key] = -(a + np.sqrt(b - c)) / bead_dict['ionization_energy']
elif key == "charge":
tmp1 = 4 / bead_dict['ionization_energy']**2 * (
4 * bead_dict['charge']**3 * rm**4 + 8 / 3 * bead_dict['charge'] * bead_dict['dipole']**2 * rm**2 +
bead_dict['charge'] * bead_dict['quadrupole']**2 * 12 / 5)
tmp2 = 8 / bead_dict['ionization_energy'] * (bead_dict['charge'] * bead_dict['dipole']**2 * rm**2 +
bead_dict['charge'] * bead_dict['quadrupole']**2 / 5)
partial_dict[key] = -4 * bead_dict['charge'] * rm**2 / bead_dict['ionization_energy'] + (tmp1 - tmp2) / (
2 * np.sqrt(b - c))
elif key == "dipole":
tmp1 = 4 / bead_dict['ionization_energy']**2 * (
8 / 3 * bead_dict['charge']**2 * rm**2 * bead_dict['dipole'] + 16 / 9 * bead_dict['dipole']**3 +
8 / 5 * bead_dict['dipole'] * bead_dict['quadrupole']**2 / rm**2)
tmp2 = 8 / bead_dict['ionization_energy'] * (
bead_dict['charge'] * bead_dict['dipole']**2 * rm**2 + 4 / 3 * bead_dict['dipole']**3 +
3 / 5 * bead_dict['dipole'] * bead_dict['quadrupole']**2 / rm**2)
partial_dict[key] = -8 / 3 * bead_dict['dipole'] / bead_dict['ionization_energy'] + (tmp1 - tmp2) / (
2 * np.sqrt(b - c))
elif key == "quadrupole":
tmp1 = 4 / bead_dict['ionization_energy']**2 * (12 / 5 * bead_dict['charge']**2 * bead_dict['quadrupole'] +
8 / 5 * bead_dict['dipole']**2 * bead_dict['quadrupole'] /
rm**2 + 36 / 25 * bead_dict['quadrupole']**3 / rm**4)
tmp2 = 4 / bead_dict['ionization_energy'] * (2 / 5 * bead_dict['charge']**2 * bead_dict['quadrupole'] + 6 /
5 * bead_dict['dipole']**2 * bead_dict['quadrupole'] / rm**2 +
12 / 5 * bead_dict['quadrupole']**3 / rm**4)
partial_dict[key] = -12 / 5 * bead_dict['quadrupole'] / bead_dict['ionization_energy'] / rm**2 + (
tmp1 - tmp2) / (2 * np.sqrt(b - c))
if not nondimensional:
for key in partial_dict:
if key != "charge":
tmp = float_dimensions(partial_dict[key], key, temperature, dimensions=True)
else:
tmp = partial_dict[key]
partial_dict[key] = float_dimensions(tmp, "polarizability", temperature)
return partial_dict
def partial_energy_parameter(beadA,
beadB,
temperature=None,
nondimensional=False,
lower_bound="rmin",
distance_opts={},
polarizability_opts={},
shape_factor_scale=False,
sigma0=None):
r"""
Calculate partial derivative with respect to multipole moments. This is useful in estimating the error.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
beadA : dict
Dictionary of multipole parameters for bead_A.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- charge (float) Charge of bead in [e], or nondimensionalized as :math:`q'=q/e`
- dipole (float) Dipole of bead in [Debye], or nondimensionalized as :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Quadrupole of bead in [Debye*Å], or nondimensionalized as :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Ionization_energy of bead in [kcal/mol], or nondimensionalized as :math:`I'=I/(3k_{B}T)`
- polarizability (float) Polarizability of bead in [:math:`Å^3`] or nondimensionalized with :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
beadB : dict
Dictionary of multipole parameters for bead_B.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- charge (float) Charge of bead in [e], or nondimensionalized as :math:`q'=q/e`
- dipole (float) Dipole of bead in [Debye], or nondimensionalized as :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Quadrupole of bead in [Debye*Å], or nondimensionalized as :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Ionization_energy of bead in [kcal/mol], or nondimensionalized as :math:`I'=I/(3k_{B}T)`
- polarizability (float) Polarizability of bead in [:math:`Å^3`] or nondimensionalized with :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
distance_opts : dict, Optional, default={}
Dictionary of keyword arguments for :func:`~mapsci.multipole_mie_combining_rules.calc_distance_array`
temperature : float, Optional, default=298
Temperature in [K] for adding and removing dimensions, if the parameters are nondimensionalized, this value isn't used.
nondimensional : bool, Optional, default=False
Indicates whether the given bead library has been nondimensionalized by :func:`~mapsci.multipole_mie_combining_rules.dict_dimensions`
sigma0 : float, Optional, default=None
This lower bound of the integral dictates where the lower bound of the definite integral is. Can be reported in [Å] or nondimensionalized as :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
shape_factor_scale : bool, Optional, default=False
Scale energy parameter based on shape factor epsilon*Si*Sj
polarizability_opts : dict, Optional, default={}
Dictionary of keyword arguments used in :func:`~mapsci.multipole_mie_combining_rules.calc_polarizability`
lower_bound : str, Optional, default='rmin'
Lower bound of distance array. Can be one of:
- rmin: the position of the potential well
- sigma: the size parameter
Returns
-------
partial_dict : dict
Partial derivative with respect to multipole moments
"""
if not nondimensional:
if temperature is None:
temperature = 298
logger.info("Using default temperature of 298 K")
bead1 = dict_dimensions(beadA.copy(), temperature, dimensions=False)
bead2 = dict_dimensions(beadB.copy(), temperature, dimensions=False)
else:
bead1 = beadA.copy()
bead2 = beadB.copy()
bead_dict = {"0": bead1, "1": bead2}
polarizability_opts.update({"shape_factor_scale":shape_factor_scale})
bead_dict = calc_polarizability(bead_dict,
distance_opts=distance_opts,
calculation_method="analytical",
polarizability_opts=polarizability_opts,
nondimensional=True)
beadAB = mie_combining_rules(bead1, bead2)
if sigma0 is None:
if lower_bound == "rmin":
rm = mie_potential_minimum(beadAB)
elif lower_bound == "sigma":
rm = beadAB["sigma"]
else:
rm = float_dimensions(sigma0,"sigma",temperature,dimensions=False)
tmp = prefactor(beadAB["lambdar"],
beadAB["lambdaa"]) / (beadAB["lambdaa"] - 3) * beadAB["sigma"]**beadAB["lambdaa"] / rm**(beadAB["lambdaa"] - 3)
if shape_factor_scale:
tmp = tmp * beadA["Sk"] * beadB["Sk"]
partial_dict = {"0": {}, "1": {}}
for i in [0, 1]:
key1 = str(1 * i)
key2 = str(1 - i)
for key in bead_dict[key1]:
if key == "ionization_energy":
I = bead_dict[key2]['ionization_energy']**2 / (bead_dict[key1]['ionization_energy'] +
bead_dict[key2]['ionization_energy'])**2
partial_dict[key1][
key] = bead_dict[key1]['polarizability'] * bead_dict[key2]['polarizability'] / rm**3 / 2 / tmp
elif key == "charge":
tmp1 = bead_dict[key1]['charge'] / rm * (bead_dict[key2]['polarizability'] +
bead_dict[key2]['dipole']**2)
tmp2 = bead_dict[key1]['charge'] * bead_dict[key2]['quadrupole']**2 / rm**3 / 10
partial_dict[key1][key] = (tmp1 + tmp2) / tmp
elif key == "dipole":
tmp1 = bead_dict[key2]['charge']**2 * bead_dict[key1]['dipole'] / rm
tmp2 = 2 / 3 * bead_dict[key1]['dipole'] / rm**3 * (bead_dict[key2]['dipole']**2 +
bead_dict[key2]['polarizability'])
tmp3 = 3 / 5 / rm**5 * bead_dict[key1]['dipole'] * bead_dict[key2]['quadrupole']**2
partial_dict[key1][key] = (tmp1 + tmp2 + tmp3) / tmp
elif key == "quadrupole":
tmp1 = bead_dict[key2]['charge']**2 * bead_dict[key1]['quadrupole'] / rm**3 / 5
tmp2 = 3 / 5 * bead_dict[key1]['quadrupole'] / rm**5 * (bead_dict[key2]['dipole']**2 +
bead_dict[key2]['polarizability'])
tmp3 = 6 / 5 / rm**7 * bead_dict[key1]['quadrupole'] * bead_dict[key2]['quadrupole']**2
partial_dict[key1][key] = (tmp1 + tmp2 + tmp3) / tmp
elif key == "polarizability":
I = bead_dict[key1]['ionization_energy'] * bead_dict[key2]['ionization_energy'] / (
bead_dict[key1]['ionization_energy'] + bead_dict[key2]['ionization_energy'])
tmp1 = bead_dict[key2]['charge']**2 / rm / 2
tmp2 = 1 / 3 / rm**3 * (bead_dict[key2]['dipole']**2 + 3 / 2 * bead_dict[key2]['polarizability'] * I)
tmp3 = 3 / 10 / rm**5 * bead_dict[key2]['quadrupole']**2
partial_dict[key1][key] = (tmp1 + tmp2 + tmp3) / tmp
if not nondimensional:
for key in partial_dict[key1]:
if key != "charge":
tmp1 = float_dimensions(partial_dict[key1][key], key, temperature, dimensions=True)
else:
tmp1 = partial_dict[key1][key]
partial_dict[key1][key] = float_dimensions(tmp1, "epsilon", temperature)
return partial_dict
def multipole_integral(beadA, beadB, sigma0=None, lower_bound="rmin", multipole_terms=None, temperature=None, nondimensional=False):
r"""
Calculate the integral of the multipole potential from a given minimum to infinity. Units in those of :math:`\epsilon/\sigma^3`
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
beadA : dict
Dictionary of multipole parameters for bead_A.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- Sk (float) Shape factor
beadB : dict
Dictionary of multipole parameters for bead_B.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- Sk (float) Shape factor
sigma0 : float, Optional, default=None
This lower bound of the integral dictates where we expect to start matching the multipole attractive term with that of Mie potential. Can be reported in [Å] or nondimensionalized as :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`. If None, the value taken from 'lower_bound' will be used
lower_bound : str, Optional, default='rmin'
Lower bound of distance array. Can be one of:
- rmin: the position of the potential well
- sigma: the size parameter
multipole_terms : numpy.ndarray, Optional, default=None
This list of terms corresponds to the coefficients for r to the order of -4, -6, -8, and -10, respectively. If not provided, this quantity will be calculated. These are ALWAYS dimensionless
temperature : float, Optional, default=None
Temperature in [K] for adding and removing dimensions, if the parameters are nondimensionalized, this value isn't used.
nondimensional : bool, Optional, default=False
Indicates whether the given bead library has been nondimensionalized by :func:`~mapsci.multipole_mie_combining_rules.dict_dimensions`
Returns
-------
Cmultipole : float
Sum of integral terms. Either in [kcal/mol] or dimensionless :math:`C_{multi}'=C_{multi}/(3k_{B}T)`
multipole_int_terms : numpy.ndarray
This list of terms corresponds to the terms involved in calculation of the energy parameter, epsilon. These terms sum to equal epsilon. Either in [kcal/mol] or dimensionless :math:`C_{multi}'=C_{multi}/(3k_{B}T)`
"""
if lower_bound == None and sigma0 == None:
raise ValueError("Either a lower bound for integration must be provided with the keyword 'sigma0', or specified with 'lower_bound'")
if not nondimensional:
if temperature is None:
logger.error("Temperature should be included when 'nondimensional' is False")
bead1 = dict_dimensions(beadA.copy(), temperature, dimensions=False)
bead2 = dict_dimensions(beadB.copy(), temperature, dimensions=False)
if sigma0 != None:
sigma0 = float_dimensions(sigma0, "sigma", temperature, dimensions=False)
else:
bead1 = beadA.copy()
bead2 = beadB.copy()
if sigma0 == None:
bead_dict = mie_combining_rules(bead1, bead2)
if lower_bound == "rmin":
sigma0 = mie_potential_minimum(bead_dict)
elif lower_bound == "sigma":
sigma0 = bead_dict["sigma"]
if multipole_terms is None:
multipole_terms = calc_cross_multipole_terms(bead1, bead2, nondimensional=True)
if np.size(multipole_terms) == 4:
integral = -4 * np.pi * np.array([sigma0**(-1), sigma0**(-3) / 3, sigma0**(-5) / 5, sigma0**(-7) / 7])
elif np.size(multipole_terms) == 9:
integral = -4 * np.pi * np.array([
sigma0**(-1), sigma0**(-1), sigma0**(-3) / 3, sigma0**(-3) / 3, sigma0**(-3) / 3, sigma0**(-3) / 3,
sigma0**(-5) / 5, sigma0**(-5) / 5, sigma0**(-7) / 7
])
else:
raise ValueError(
"Multipole terms input should be either of length 4 or length 9 for the supported interaction types.")
multipole_int_terms0 = integral * multipole_terms
Cmultipole = np.sum(multipole_int_terms0)
if not nondimensional:
for tmp in ["epsilon", "sigma", "sigma", "sigma"]:
Cmultipole = float_dimensions(Cmultipole,tmp,temperature,dimensions=True)
multipole_int_terms0 = float_dimensions(multipole_int_terms0,tmp,temperature,dimensions=True)
return Cmultipole, multipole_int_terms0
def solve_multipole_cross_interaction_integral(sigma0,
beadA,
beadB,
multipole_terms=None,
shape_factor_scale=False,
temperature=None,
nondimensional=False,
beadAB=None):
r"""
Calculation of nondimensionalized cross-interaction potential from multipole moments.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
sigma0 : float
This lower bound of the integral dictates where we expect to start matching the multipole attractive term with that of Mie potential. Can be reported in [Å] or nondimensionalized as :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
beadA : dict
Dictionary of multipole parameters for bead_A.
- epsilon (float) Nondimensionalized energy parameter, :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Nondimensionalized size parameter, :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- Sk (float) Shape factor
beadB : dict
Dictionary of multipole parameters for bead_B.
- epsilon (float) Nondimensionalized energy parameter, :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Nondimensionalized size parameter, :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- Sk (float) Shape factor
multipole_terms : numpy.ndarray, Optional, default=None
This list of terms corresponds to the coefficients for r to the order of -4, -6, -8, and -10, respectively. If not provided, this quantity will be calculated. These are ALWAYS nondimensionalized
shape_factor_scale : bool, Optional, default=False
Scale energy parameter based on shape factor epsilon*Si*Sj
beadAB : dict
Dictionary of mixed Mie parameters for beadA and beadB.
- epsilon (float) Nondimensionalized energy parameter, :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Nondimensionalized size parameter, :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
temperature : float, Optional, default=None
Temperature in [K] for adding and removing dimensions, if the parameters are nondimensionalized, this value isn't used.
nondimensional : bool, Optional, default=False
Indicates whether the given bead library has been nondimensionalized by :func:`~mapsci.multipole_mie_combining_rules.dict_dimensions`
Returns
-------
epsilon : float
Cross interaction parameter from analytical solution of extended combining rules. Array is equal in length to "r". Either in reduced units :math:`C_{multi}'=C_{multi}/k_{B}` or dimensionless :math:`C_{multi}'=C_{multi}/(3k_{B}T)`
multipole_int_terms : numpy.ndarray
This list of terms corresponds to the terms involved in calculation of the energy parameter, epsilon. Adding these terms together produces the attractive term of the Mie potential, from which the energy parameter can be derived. Always dimensionless :math:`C_{multi}'=C_{multi}/(3k_{B}T)`
"""
if not nondimensional:
if temperature is None:
logger.error("Temperature should be included when 'nondimensional' is False")
bead1 = dict_dimensions(beadA.copy(), temperature, dimensions=False)
bead2 = dict_dimensions(beadB.copy(), temperature, dimensions=False)
sigma0 = float_dimensions(sigma0,"sigma",temperature,dimensions=False)
else:
bead1 = beadA.copy()
bead2 = beadB.copy()
if beadAB is None:
beadAB_new = mie_combining_rules(bead1, bead2)
else:
if not nondimensional:
beadAB_new = dict_dimensions(beadAB.copy(), temperature, dimensions=False)
else:
beadAB_new = beadAB.copy()
eps_tmp = beadAB_new["epsilon"]
Cmultipole, multipole_int_terms0 = multipole_integral(bead1, bead2, sigma0=sigma0, multipole_terms=multipole_terms, nondimensional=True)
eps_min = eps_tmp / 20
eps_max = eps_tmp * 2
epsilon = spo.brentq(_obj_energy_parameter_from_integral,
eps_min,
eps_max,
args=(bead1, bead2, beadAB_new, Cmultipole, sigma0, shape_factor_scale),
xtol=1e-12)
if not nondimensional:
epsilon = float_dimensions(epsilon,"epsilon",temperature,dimensions=True)
return epsilon, multipole_int_terms0
def _obj_energy_parameter_from_integral(eps0, beadA, beadB, beadAB, Cintegral, sigma0, shape_factor_scale=False):
r"""
Objective function used to fit energy parameter to integral of multipole moment
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
epsilon : float
Guess in nondimensionalized energy parameter in [kcal/mol], :math:`\epsilon'=\epsilon/(3k_{B}T)`
bead1 : dict
Dictionary of multipole parameters for bead_A.
- epsilon (float) Nondimensionalized energy parameter, :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Nondimensionalized size parameter, :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- Sk (float) Shape factor
bead2 : dict
Dictionary of multipole parameters for bead_B.
- epsilon (float) Nondimensionalized energy parameter, :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Nondimensionalized size parameter, :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- Sk (float) Shape factor
beadAB : dict
Dictionary of mixed Mie parameters for bead1 and bead2.
- epsilon (float) Nondimensionalized energy parameter, :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Nondimensionalized size parameter, :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
Cintegral : float
This sum of the multipole integrals is set equal to the attractive term of the integrated Mie potential to determine the energy parameter.
sigma0 : float
The lower bound of the integral, can be reported in [Å] or nondimensionalized as :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
shape_factor_scale : bool, Optional, default=False
Scale energy parameter based on shape factor epsilon*Si*Sj
Returns
-------
obj_value : float
Difference between multipole term and Mie potential term integral
"""
Cint = mie_integral(beadAB, sigma0=sigma0, shape_factor_scale=shape_factor_scale)
return eps0 * Cint / beadAB["epsilon"] - Cintegral
def mie_integral(beadAB, sigma0=None, lower_bound="rmin", shape_factor_scale=False):
r"""
Calculate the integral of the attractive term in the Mie potential from the given minimum value to infinity. Units in those of :math:`\epsilon/\sigma^3`
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
beadAB : dict
Dictionary of mixed Mie parameters for bead1 and bead2.
- epsilon (float) Nondimensionalized energy parameter, :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Nondimensionalized size parameter, :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
sigma0 : float, Optional, default=None
This lower bound of the integral dictates where we expect to start matching the multipole attractive term with that of Mie potential. Can be reported in [Å] or nondimensionalized as :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`. If None, the value taken from 'lower_bound' will be used
lower_bound : str, Optional, default='rmin'
Lower bound of distance array. Can be one of:
- rmin: the position of the potential well
- sigma: the size parameter
shape_factor_scale : bool, Optional, default=False
Scale energy parameter based on shape factor epsilon*Si*Sj
Returns
-------
Cintegral : float
Value of the definite Mie integral from sigma0 to infinity
"""
if lower_bound == None and sigma0 == None:
raise ValueError("Either a lower bound for integration must be provided with the keyword 'sigma0', or specified with 'lower_bound'")
if sigma0 == None:
if lower_bound == "rmin":
sigma0 = mie_potential_minimum(beadAB)
elif lower_bound == "sigma":
sigma0 = beadAB["sigma"]
if shape_factor_scale:
if "Sk" in beadAB:
beadAB["epsilon"] = beadAB["epsilon"] * beadAB["Sk"]**2
else:
raise ValueError("Shape factor was not provided in bead dictionary")
integral = -4 * np.pi * beadAB["epsilon"] * prefactor(
beadAB["lambdar"],
beadAB["lambdaa"]) * beadAB["sigma"]**beadAB["lambdaa"] / sigma0**(beadAB["lambdaa"] - 3) / (beadAB["lambdaa"] - 3)
return integral
def fit_multipole_cross_interaction_parameter(beadA,
beadB,
distance_opts={},
distance_array=None,
shape_factor_scale=False,
nondimensional=False,
temperature=None):
r"""
Calculation of nondimensionalized cross-interaction parameter for the Mie potential.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
beadA : dict
Dictionary of Mie and multipole parameters for bead_A.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- charge (float) Charge of bead in [e], or nondimensionalized as :math:`q'=q/e`
- dipole (float) Dipole of bead in [Debye], or nondimensionalized as :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Quadrupole of bead in [Debye*Å], or nondimensionalized as :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Ionization_energy of bead in [kcal/mol], or nondimensionalized as :math:`I'=I/(3k_{B}T)`
- polarizability (float) Polarizability of bead in [:math:`Å^3`] or nondimensionalized with :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
beadB : dict
Dictionary of Mie and multipole parameters for bead_B.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- charge (float) Charge of bead in [e], or nondimensionalized as :math:`q'=q/e`
- dipole (float) Dipole of bead in [Debye], or nondimensionalized as :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Quadrupole of bead in [Debye*Å], or nondimensionalized as :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Ionization_energy of bead in [kcal/mol], or nondimensionalized as :math:`I'=I/(3k_{B}T)`
- polarizability (float) Polarizability of bead in [:math:`Å^3`] or nondimensionalized with :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
shape_factor_scale : bool, Optional, default=False
Scale energy parameter based on shape factor epsilon*Si*Sj
temperature : float, Optional, default=None
Temperature in [K] for adding and removing dimensions, if the parameters are nondimensionalized, this value isn't used.
nondimensional : bool, Optional, default=False
Indicates whether the given bead library has been nondimensionalized by :func:`~mapsci.multipole_mie_combining_rules.dict_dimensions`
distance_opts : dict, Optional, default={}
Dictionary of keyword arguments for :func:`~mapsci.multipole_mie_combining_rules.calc_distance_array`
distance_array : numpy.ndarray, Optional, default=None
Array (or float) in either [Å] or nondimensionalized distance between two beads. :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`, whatever is consistent with 'bead_dict'. If None, 'distance_opts' are used to generate the array.
Returns
-------
output_dict : dict
Dictionary of:
- epsilon (float) Fit energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`. Calculated from fit lambda and van der Waals attraction parameter.
- kij (float) Binary interaction parameter for fit energy parameter, where :math:`\epsilon_{fit}=(1-k_{ij})\sqrt{\epsilon_i\epsilon_j}`
- sigma (float) Size parameter taken at mean, reported in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Fit repulsive exponent, calculated as K/epsilon_fit
- lambdaa (float) Fit attractive exponent
- lambdaa_variance (float) Variance in attractive exponent during fitting process
- epsilon_saft (float) Energy parameter from SAFT method of scaling with geometric mean, scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- kij (float) Binary interaction parameter for SAFT prediction of energy parameter, where epsilon_saft = :math:`\epsilon_{saft}=(1-k_{ij,saft})\sqrt{\epsilon_i\epsilon_j}`
- K (float) Equal to :math:`C_{Mie}\epsilon_{fit}`, used in fitting process. Used to calculate lambdar.
- K_variance (float) Variance in calculation of dummy variable K
"""
if not nondimensional:
if temperature is None:
logger.error("Temperature should be included with 'nondimensional' is False")
bead1 = dict_dimensions(beadA.copy(), temperature, dimensions=False)
bead2 = dict_dimensions(beadB.copy(), temperature, dimensions=False)
else:
bead1 = beadA.copy()
bead2 = beadB.copy()
# Set-up Mie parameters
beadAB = mie_combining_rules(bead1, bead2)
Cmie = prefactor(beadAB["lambdar"], beadAB["lambdaa"])
if shape_factor_scale:
if "Sk" not in beadA:
beadA["Sk"] = 1.0
if "Sk" not in beadB:
beadB["Sk"] = 1.0
multipole_terms = calc_cross_multipole_terms(bead1, bead2, nondimensional=True)
# From curve fit
if distance_array is None:
r = calc_distance_array(beadAB, **distance_opts)
else:
r = distance_array
w_multipole, potential_terms = calc_cross_multipole_potential(r, multipole_terms, total_only=False, nondimensional=True)
# ___________ VDW parameter combining _______________
params, var_matrix = spo.curve_fit(lambda x, K, lambdaa: log_mie_attractive(
r, bead1, bead2, lambda_a=lambdaa, Kprefactor=K, shape_factor_scale=shape_factor_scale),
r,
np.log(-w_multipole),
p0=[beadAB["epsilon"] * Cmie, beadAB["lambdaa"]],
bounds=(0.0, np.inf))
K = params[0]
lambdaa_fit = params[1]
eps_fit = calc_epsilonij_from_lambda_aij(lambdaa_fit, bead1, bead2)
if K / eps_fit < 1.01:
raise ValueError(
"A suitable repulsive exponent cannot be calculated using the following cross interaction parameters:\n epsilon: {}, lambdaa: {}, Cmie: {} < 1.0\n Check self-interaction parameters above. A common cause could be poorly fit polarizability because a partial charge was assigned to an bead where it's Mie potential is fit to expect dipole to be the highest order."
.format(float_dimensions(eps_fit, "epsilon", temperature), lambdaa_fit, K / eps_fit))
else:
try:
lambdar_fit = spo.brentq(lambda x: K / eps_fit - prefactor(x, lambdaa_fit), lambdaa_fit * 1.01, 1e+4, xtol=1e-12)
except:
raise ValueError("This shouldn't happen, check given parameters.")
# Save output
if not nondimensional:
tmp = beadAB["epsilon"] * np.sqrt(bead1["sigma"]**3 * bead2["sigma"]**3) / beadAB["sigma"]**3
beadAB["epsilon_saft"] = float_dimensions(tmp,"epsilon",temperature,dimensions=True)
beadAB["epsilon"] = float_dimensions(eps_fit,"epsilon",temperature,dimensions=True)
beadAB["K"] = float_dimensions(K,"epsilon",temperature,dimensions=True)
beadAB["K_variance"] = float_dimensions(var_matrix[1][1],"epsilon",temperature,dimensions=True)
beadAB["sigma"] = float_dimensions(beadAB["sigma"],"sigma",temperature,dimensions=True)
else:
beadAB["epsilon_saft"] = beadAB["epsilon"] * np.sqrt(
bead1["sigma"]**3 * bead2["sigma"]**3) / beadAB["sigma"]**3
beadAB["epsilon"] = eps_fit
beadAB["K"] = K
beadAB["K_variance"] = var_matrix[1][1]
beadAB["lambdaa"] = lambdaa_fit
beadAB["lambdaa_variance"] = var_matrix[0][0]
beadAB["lambdar"] = lambdar_fit
beadAB["kij_saft"] = 1 - beadAB["epsilon_saft"] / np.sqrt(bead1["epsilon"]*bead2["epsilon"])
beadAB["kij"] = 1 - beadAB["epsilon"] / np.sqrt(bead1["epsilon"]*bead2["epsilon"])
return beadAB
def log_mie_attractive(r, bead1, bead2, lambda_a=None, Kprefactor=None, epsilon=None, shape_factor_scale=False):
r"""
Calculate the log of the attractive term of the Mie potential. This linearizes the curve for the fitting process
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
r : numpy.ndarray
Array (or float) of nondimensionalized distance between two beads. :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
bead1 : dict
Dictionary of multipole parameters for bead_A.
- epsilon (float) Nondimensionalized energy parameter, :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Nondimensionalized size parameter, :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- Sk (float) Shape factor
bead2 : dict
Dictionary of multipole parameters for bead_B. If provided, the mixed energy parameter is fit.
- epsilon (float) Nondimensionalized energy parameter, :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Nondimensionalized size parameter, :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- Sk (float) Shape factor
epsilon : float, Optional, default=None
The energy parameter for the Mie potential, if not specified the combining rule from `Lafitte 2013 <https://doi.org/10.1063/1.4819786>`_ is used
lambda_a : float, Optional, default=None
The cross interaction attractive exponent, if not specified the combining rule from `Lafitte 2013 <https://doi.org/10.1063/1.4819786>`_ is used
Kprefactor : float, Optional, default=None
Total prefactor of Mie potential equal to the energy parameters times the Mie prefactor, C. If not specified, the value using the combining rules from `Lafitte 2013 <https://doi.org/10.1063/1.4819786>`_ is used.
shape_factor_scale : bool, Optional, default=False
Scale energy parameter based on shape factor epsilon*Si*Sj
Returns
-------
log_potential : numpy.ndarray
The potential array for the given value of epsilon
"""
beadAB = mie_combining_rules(bead1, bead2)
sigma = beadAB["sigma"]
lambda_r = beadAB["lambdar"]
if epsilon is not None and lambda_a is not None:
# Assume lambdar follows normal combining rules
Kprefactor = epsilon * prefactor(lambda_r, lambda_a)
elif epsilon is not None and Kprefactor is not None:
raise ValueError("Specifying 'epsilon' and 'Kprefactor' is redundant.")
elif epsilon is not None:
# Assume both exponents follow normal combining rules
lambda_a = beadAB["lambdaa"]
Kprefactor = epsilon * prefactor(lambda_r, lambda_a)
elif lambda_a is not None and Kprefactor is None:
# Assume lambdar follows normal combining rules, epsilon can be derived from 1 fluid combining rule
epsilon = calc_epsilonij_from_lambda_aij(lambda_a, bead1, bead2)
Kprefactor = epsilon * prefactor(lambda_r, lambda_a)
elif lambda_a is None and Kprefactor is not None:
# Assume lambdaa follows normal combining rules
lambda_a = beadAB["lambdaa"]
if shape_factor_scale:
Kprefactor = Kprefactor * bead1["Sk"] * bead2["Sk"]
return np.log(Kprefactor) + lambda_a * np.log(sigma / r)
def calc_self_mie_from_multipole(bead_dict,
mie_vdw=None,
temperature=298,
lambda_r=12,
distance_opts={},
distance_array=None,
polarizability_opts={},
shape_factor_scale=False,
nondimensional=False):
r"""
Calculation of self-interaction parameters for the Mie potential from multipole moments.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
bead_dict : dict
Dictionary of Mie and multipole parameters for bead_A.
- epsilon (float) Energy parameter scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Size parameter in [Å], or nondimensionalized as :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- charge (float) Charge of bead in [e], or nondimensionalized as :math:`q'=q/e`
- dipole (float) Dipole of bead in [Debye], or nondimensionalized as :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Quadrupole of bead in [Debye*Å], or nondimensionalized as :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Ionization_energy of bead in [kcal/mol], or nondimensionalized as :math:`I'=I/(3k_{B}T)`
- polarizability (float) Polarizability of bead in [:math:`Å^3`] or nondimensionalized with :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
mie_vdw : float, Optional, default=None
This nondimensionalized attractive parameter for the Mie potential is related not only to the Mie exponents but also to the triple and critical temperatures of a substance. It can be used to specify the repulsive exponent, otherwise a value of 12 is assumed
lambda_r : float, Optional, default=12
Assumed repulsive exponent. This quantity can be changed later as long as the energy parameter is scaled accordingly.
temperature : float, Optional, default=298
Temperature in [K] for adding and removing dimensions, if the parameters are nondimensionalized, this value isn't used.
shape_factor_scale : bool, Optional, default=False
Scale energy parameter based on shape factor epsilon*Si*Sj
distance_opts : dict, Optional, default={}
Optional keywords for creating r array used for calculation or fitting
polarizability_opts : dict, Optional, default={}
Dictionary of keyword arguments for :func:`~mapsci.multipole_mie_combining_rules.fit_polarizability` or :func:`~mapsci.multipole_mie_combining_rules.solve_polarizability_integral`
nondimensional : bool, Optional, default=False
Indicates whether the given bead library has been nondimensionalized by :func:`~mapsci.multipole_mie_combining_rules.dict_dimensions`
distance_array : numpy.ndarray, Optional, default=None
Array (or float) in either [Å] or nondimensionalized distance between two beads. :math:`r'=r (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`, whatever is consistent with 'bead_dict'. If None, 'distance_opts' are used to generate the array.
Returns
-------
cross_dict : dict
Dictionary with energy parameter and exponents for Mie cross interaction between the given beads.
- epsilon (float) Fit energy parameter, scaled by :math:`k_{B}` in [K], or nondimensionalized as :math:`\epsilon'=\epsilon/(3k_{B}T)`
- lambdar (float) Repulsive exponent, if mie_vdw is provided, otherwise this is the same value that was given.
- lambdaa (float) Fit attractive exponent
"""
if not nondimensional:
logger.info("Calculating cross-interaction parameter with temperature of {}.".format(temperature))
bead_dict_new = dict_dimensions(bead_dict.copy(), temperature, dimensions=False)
else:
bead_dict_new = bead_dict.copy()
if shape_factor_scale:
if "Sk" not in bead_dict_new:
bead_dict_new["Sk"] = 1.0
if "polarizability" not in bead_dict_new:
logger.debug("Calculating polarizability")
polarizability_opts.update({"shape_factor_scale":shape_factor_scale})
bead_dict_new = calc_polarizability(bead_dict_new, distance_opts=distance_opts, calculation_method="fit", polarizability_opts=polarizability_opts, nondimensional=True)
multipole_terms = calc_cross_multipole_terms(bead_dict_new, bead_dict_new, nondimensional=True)
if distance_array is None:
r = calc_distance_array(bead_dict_new, **distance_opts)
else:
r = distance_array
w_multipole, potential_terms = calc_cross_multipole_potential(r, multipole_terms, total_only=False, nondimensional=True)
Cmie = prefactor(bead_dict_new["lambdar"], bead_dict_new["lambdaa"])
params, var_matrix = spo.curve_fit(lambda x, K, lambdaa: log_mie_attractive(
r, bead_dict_new, bead_dict_new, lambda_a=lambdaa, Kprefactor=K, shape_factor_scale=shape_factor_scale),
r,
np.log(-w_multipole),
p0=[bead_dict_new["epsilon"] * Cmie, bead_dict_new["lambdaa"]],
bounds=(0.0, np.inf))
K = params[0]
bead_dict_new["lambdaa"] = params[1]
if mie_vdw is not None:
logger.info("Overwrite given lambdar with Mie potential relationship to vdW like parameter.")
bead_dict_new["lambdar"] = calc_lambdarij_from_lambda_aij(bead_dict_new["lambdaa"], mie_vdw)
if shape_factor_scale:
bead_dict_new["epsilon"] = K / prefactor(bead_dict_new["lambdar"], bead_dict_new["lambdaa"]) / bead_dict_new["Sk"]**2
else:
bead_dict_new["epsilon"] = K / prefactor(bead_dict_new["lambdar"], bead_dict_new["lambdaa"])
if not nondimensional:
bead_dict_new = dict_dimensions(bead_dict_new, temperature, dimensions=True)
return bead_dict_new
def extended_combining_rules_fitting(bead_library, temperature, shape_factor_scale=False, distance_opts={}, polarizability_opts={}):
r"""
Calculate and output the cross-interaction parameters for the provided dictionary of beads utilizing the Mie potential.
Parameters
----------
bead_library : dict
Dictionary of dictionaries with Mie and multipole parameters for each bead in the desired system.
- epsilon (float) [K] Energy parameter scaled by Boltzmann constant
- sigma (float) [Å] Size parameter
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- polarizability (float) [:math:`Å^3`] This quantity is used as a free parameter in combining rule
- charge (float) [-] Charge of bead fragment in elementary charges
- dipole (float) [Debye] Dipole moment of bead fragment
- quadrupole (float) [Debye*Å] Quadrupole moment of bead fragment
- ionization_energy (float) [kcal/mol] Ionization energy of bead fragment
temperature : float
The temperature in [K] of the system
shape_factor_scale : bool, Optional, default=False
Scale energy parameter based on shape factor epsilon*Si*Sj
distance_opts : dict, Optional, default={}
Optional keywords for creating r array used for calculation or fitting
polarizability_opts : dict, Optional, default={}
Dictionary of keyword arguments used in :func:`~mapsci.multipole_mie_combining_rules.calc_polarizability`
Returns
-------
cross_dict : dict
Dictionary with "epsilon" value for cross interaction for the given beads.
summary : dict
Dictionary of bead types and details of their interactions with each of the other bead types. For each pair a dictionary entry is present for:
- epsilon_saft (float) cross interaction with SAFT combining rules
- kij_saft (float) binary interaction parameter for the energy parameter with SAFT combining rules
- epsilon (float) cross interaction from multipole curve fit
- kij (float) binary interaction parameter from multipole curve fit
- lambdar (float) repulsive exponent from multipole curve fit
- lambdaa (float) attractive exponent from multipole curve fit
- polarizability_* (float) polarizabilities for the two beads
"""
bead_library_new = dict_dimensions(bead_library.copy(), temperature, dimensions=False)
polarizability_opts.update({"shape_factor_scale":shape_factor_scale})
bead_library_new = calc_polarizability(bead_library_new,
distance_opts=distance_opts,
polarizability_opts=polarizability_opts,
nondimensional=True)
# Calculate cross interaction file
dict_cross = {}
dict_summary = {}
beads = list(bead_library_new.keys())
for i, bead1 in enumerate(beads):
if len(beads[i + 1:]) > 0:
dict_cross[bead1] = {}
dict_summary[bead1] = {}
for bead2 in beads[i + 1:]:
cross_out = fit_multipole_cross_interaction_parameter(bead_library_new[bead1],
bead_library_new[bead2],
distance_opts=distance_opts,
shape_factor_scale=shape_factor_scale,
nondimensional=True,
temperature=temperature)
pol_i = float_dimensions(bead_library_new[bead1]["polarizability"], "polarizability", temperature)
pol_j = float_dimensions(bead_library_new[bead2]["polarizability"], "polarizability", temperature)
epsilon_saft = float_dimensions(cross_out["epsilon_saft"], "epsilon", temperature)
epsilon_fit = float_dimensions(cross_out["epsilon"], "epsilon", temperature)
dict_cross[bead1][bead2] = {
"epsilon": cross_out["epsilon"],
"lambdar": cross_out["lambdar"],
"lambdaa": cross_out["lambdaa"]
}
dict_summary[bead1][bead2] = {
"epsilon_saft": epsilon_saft,
"kij_saft": cross_out["kij_saft"],
"epsilon": epsilon_fit,
"kij": cross_out["kij"],
"lambdar": cross_out["lambdar"],
"lambdaa": cross_out["lambdaa"],
"polarizability_"+bead1: pol_i,
"polarizability_"+bead2: pol_j,
}
dict_cross = dict_dimensions(dict_cross.copy(), temperature)
return dict_cross, dict_summary
def extended_combining_rules_analytical(bead_library, temperature, shape_factor_scale=False, distance_opts={}, polarizability_opts={}):
r"""
Calculate and output the cross-interaction energy parameter for the provided dictionary of beads utilizing the Mie potential, using the Analytical (i.e. integral) method
Parameters
----------
bead_library : dict
Dictionary of dictionaries with Mie and multipole parameters for each bead in the desired system.
- epsilon (float) [K] Energy parameter scaled by Boltzmann constant
- sigma (float) [Å] Size parameter
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- polarizability (float) [:math:`Å^3`] This quantity is used as a free parameter in combining rule
- charge (float) [-] Charge of bead fragment in elementary charges
- dipole (float) [Debye] Dipole moment of bead fragment
- quadrupole (float) [Debye*Å] Quadrupole moment of bead fragment
- ionization_energy (float) [kcal/mol] Ionization energy of bead fragment
temperature : float
The temperature in [K] of the system
shape_factor_scale : bool, Optional, default=False
Scale energy parameter based on shape factor epsilon*Si*Sj
distance_opts : dict, Optional, default={}
Optional keywords for creating r array used for calculation or fitting
polarizability_opts : dict, Optional, default={}
Dictionary of keyword arguments used in :func:`~mapsci.multipole_mie_combining_rules.calc_polarizability`
Returns
-------
cross_dict : dict
Dictionary with `epsilon` value for cross interaction between given beads.
summary : dict
Dictionary of bead types and details of their interactions with each of the other bead types. For each pair a dictionary entry is present for:
- epsilon_saft (float) cross interaction with SAFT combining rules
- kij_saft (float) binary interaction parameter for the energy parameter with SAFT combining rules
- epsilon (float) cross interaction from multipole analytical solution
- kij (float) binary interaction parameter from multipole analytical solution
- lambdar (float) repulsive exponent from SAFT combining rules
- lambdaa (float) attractive exponent from SAFT combining rules
- polarizability_* (float) polarizabilities for the two beads
"""
if temperature == None or np.isnan(temperature):
raise ValueError("Temperature must be a real number, given {}.".format(temperature))
bead_library_new = dict_dimensions(bead_library.copy(), temperature, dimensions=False)
polarizability_opts.update({"shape_factor_scale":shape_factor_scale})
bead_library_new = calc_polarizability(bead_library_new,
distance_opts=distance_opts,
polarizability_opts=polarizability_opts,
calculation_method="analytical",
nondimensional=True)
# Calculate cross interaction file
dict_cross = {}
dict_summary = {}
beads = list(bead_library_new.keys())
for i, bead1 in enumerate(beads):
beadA = bead_library_new[bead1]
if len(beads[i + 1:]) > 0:
dict_cross[bead1] = {}
dict_summary[bead1] = {}
for bead2 in beads[i + 1:]:
beadB = bead_library_new[bead2]
beadAB = mie_combining_rules(beadA, beadB)
r = calc_distance_array(beadAB, **distance_opts)
epsilon_tmp, terms = solve_multipole_cross_interaction_integral(r[0],
beadA,
beadB,
nondimensional=True,
shape_factor_scale=shape_factor_scale)
pol_i = float_dimensions(beadA["polarizability"], "polarizability", temperature)
pol_j = float_dimensions(beadB["polarizability"], "polarizability", temperature)
eps_saft_tmp = beadAB["epsilon"] * np.sqrt(beadA["sigma"]**3 * beadB["sigma"]**3) / beadAB["sigma"]**3
epsilon_saft = float_dimensions(eps_saft_tmp, "epsilon", temperature)
epsilon_analytical = float_dimensions(epsilon_tmp, "epsilon", temperature)
kij_saft = 1 - eps_saft_tmp / beadAB["epsilon"]
kij_analytical = 1 - epsilon_tmp / beadAB["epsilon"]
dict_cross[bead1][bead2] = {"epsilon": epsilon_tmp}
dict_summary[bead1][bead2] = {
"epsilon_saft": epsilon_saft,
"kij_saft": kij_saft,
"epsilon": epsilon_analytical,
"kij": kij_analytical,
"lambdar": beadAB["lambdar"],
"lambdaa": beadAB["lambdaa"],
"polarizability_{}".format(bead1): pol_i,
"polarizability_{}".format(bead2): pol_j,
}
dict_cross = dict_dimensions(dict_cross.copy(), temperature)
return dict_cross, dict_summary
def dict_dimensions(parameters, temperature, dimensions=True, conv_custom={}):
r"""
Obtain instructions for systems used in calculation.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
parameters : dict
This dictionary of bead types contains a dictionary of parameters for each.
- epsilon (float) Nondimensionalize energy parameter in [K], :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Nondimensionalize size parameter in [Å], :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- polarizability (float) Nondimensionalize polarizability of bead in [:math:`Å^3`]. :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
- charge (float) Nondimensionalize charge of bead in [e]. :math:`q'=q/e`
- dipole (float) Nondimensionalize dipole of bead in [Debye]. :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Nondimensionalize quadrupole of bead in [Debye*Å]. :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Nondimensionalize ionization_energy of bead in [kcal/mol]. :math:`I'=I/(3k_{B}T)`
temperature : float
The temperature of the system
dimensions : bool, Optional, default=True
If true, will add SI-units to multipole parameters, if False, will nondimensionalize.
conv_custom : dict, Optional, default={}
This dictionary may have the same parameter names used for the beads and overwrite default values.
Returns
-------
new_parameters : dict
This dictionary of bead types contains a dictionary of parameters for each.
"""
if temperature == None or np.isnan(temperature):
raise ValueError("Temperature must be a real number, given {}.".format(temperature))
# Nondimensionalize Parameters
C_l = 1e+10 # [Ang/m]
C_D = 3.33564e-20 # [C*Ang/Debye]
C_e = 6.9477e-21 # [J / kcal/mol]
C_eV = 1.602176565e-19 # [J/eV]
e0 = 8.854187817e-12 * C_e / C_l # [C^2/(J*m)] to [C^2*mol/(kcal*Ang)]
kb = 1.38064852e-23 / C_e # [J/K] to [kcal/(mol*K)] Boltzmann constant
perm = (4 * np.pi * e0)**2 # [C^2*mol/(kcal*Ang)]^2
K = 3 * kb * temperature # [kcal/mol]
conv = {"epsilon": 1/(3*temperature), \
"ionization_energy": 1/K,
"sigma": np.sqrt(perm)*K/C_eV**2, \
"dipole": C_D*np.sqrt(perm)*K/C_eV**3, \
"quadrupole": C_D*perm*K**2/C_eV**5, \
"charge":1, \
"polarizability": 4*np.pi*e0*perm*K**3/C_eV**6}
# "polarizability": perm*K**3/C_eV**6} Using the polarizability is in large units
for key, value in conv_custom.items():
conv[key] = value
new_parameters = {}
for k1, v1 in parameters.items():
new_parameters[k1] = {}
if type(v1) == dict:
for k2, v2 in v1.items():
if type(v2) == dict:
new_parameters[k1][k2] = {}
for k3, v3 in v2.items():
if k3 in conv:
if dimensions:
new_parameters[k1][k2][k3] = v3 / conv[k3]
else:
new_parameters[k1][k2][k3] = v3 * conv[k3]
else:
new_parameters[k1][k2][k3] = v3
else:
if k2 in conv:
if dimensions:
new_parameters[k1][k2] = v2 / conv[k2]
else:
new_parameters[k1][k2] = v2 * conv[k2]
else:
new_parameters[k1][k2] = v2
else:
if k1 in conv:
if dimensions:
new_parameters[k1] = v1 / conv[k1]
else:
new_parameters[k1] = v1 * conv[k1]
else:
new_parameters[k1] = v1
return new_parameters
def float_dimensions(parameter, parameter_type, temperature, dimensions=True, conv_custom={}):
r"""
Obtain instructions for systems used in calculation.
Nondimensional parameters are scaled using the following physical constants: vacuum permittivity, :math:`\varepsilon_{0}`, Boltzmann constant, :math:`k_{B}`, and elementary charge, :math:`e`.
Parameters
----------
parameter : float
Value of parameter to be converted.
parameter_type : str
Parameter name, can be:
- epsilon (float) Nondimensionalize energy parameter in [K], :math:`\epsilon'=\epsilon/(3k_{B}T)`
- sigma (float) Nondimensionalize size parameter in [Å], :math:`\sigma'=\sigma (4 \pi \varepsilon_{0}) 3k_{B}T e^{-2}`
- lambdar (float) Repulsive exponent
- lambdaa (float) Attractive exponent
- polarizability (float) Nondimensionalize polarizability of bead in [:math:`Å^3`]. :math:`\alpha'=\alpha (4 \pi \varepsilon_{0}) 3k_{B}T e^{-6}`, where the dimensionalized version is the polarizability volume
- charge (float) Nondimensionalize charge of bead in [e]. :math:`q'=q/e`
- dipole (float) Nondimensionalize dipole of bead in [Debye]. :math:`\mu'=\mu (4 \pi \varepsilon_{0}) 3k_{B}T e^{-3}`
- quadrupole (float) Nondimensionalize quadrupole of bead in [Debye*Å]. :math:`Q'=Q (4 \pi \varepsilon_{0})^{2} (3k_{B}T)^{2} e^{-5}`
- ionization_energy (float) Nondimensionalize ionization_energy of bead in [kcal/mol]. :math:`I'=I/(3k_{B}T)`
temperature : float
The temperature of the system
dimensions : bool, Optional, default=True
If true, will add SI-units to multipole parameters, if False, will nondimensionalize.
conv_custom : dict, Optional, default={}
This dictionary may have the same parameter names used for the beads and overwrite default values.
Returns
-------
new_parameter : float
Converted parameter
"""
if temperature == None or np.isnan(temperature):
raise ValueError("Temperature must be a real number, given {}.".format(temperature))
# Nondimensionalize Parameters
C_l = 1e+10 # [Ang/m]
C_D = 3.33564e-20 # [C*Ang/Debye]
C_e = 6.9477e-21 # [J / kcal/mol]
C_eV = 1.602176565e-19 # [J/eV]
e0 = 8.854187817e-12 * C_e / C_l # [C^2/(J*m)] to [C^2*mol/(kcal*Ang)]
kb = 1.38064852e-23 / C_e # [J/K] to [kcal/(mol*K)] Boltzmann constant
perm = (4 * np.pi * e0)**2 # [C^2*mol/(kcal*Ang)]^2
K = 3 * kb * temperature # [kcal/mol]
conv = {"epsilon": 1/(3*temperature), \
"ionization_energy": 1/K,
"sigma": np.sqrt(perm)*K/C_eV**2, \
"dipole": C_D*np.sqrt(perm)*K/C_eV**3, \
"quadrupole": C_D*perm*K**2/C_eV**5, \
"charge":1, \
"polarizability": 4*np.pi*e0*perm*K**3/C_eV**6}
for key, value in conv_custom.items():
conv[key] = value
if parameter_type in conv:
if dimensions:
new_parameter = parameter / conv[parameter_type]
else:
new_parameter = parameter * conv[parameter_type]
else:
raise KeyError("Parameter, {}, is not supported. Must be one of: {}".format(parameter_type, list(conv.keys())))
return new_parameter
| [
"logging.getLogger",
"numpy.mean",
"numpy.sqrt",
"scipy.optimize.brentq",
"numpy.size",
"numpy.log",
"numpy.any",
"mapsci.quick_plots.plot_potential",
"numpy.diag",
"numpy.array",
"numpy.linspace",
"numpy.zeros",
"numpy.sum",
"numpy.isnan",
"numpy.finfo",
"numpy.all",
"mapsci.quick_p... | [((186, 213), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (203, 213), False, 'import logging\n'), ((2156, 2199), 'numpy.linspace', 'np.linspace', (['rm', '(max_factor * rm)'], {'num': '(10000)'}), '(rm, max_factor * rm, num=10000)\n', (2167, 2199), True, 'import numpy as np\n'), ((5069, 5113), 'numpy.sqrt', 'np.sqrt', (["(bead1['epsilon'] * bead2['epsilon'])"], {}), "(bead1['epsilon'] * bead2['epsilon'])\n", (5076, 5113), True, 'import numpy as np\n'), ((10694, 10735), 'numpy.mean', 'np.mean', (["[bead1['sigma'], bead2['sigma']]"], {}), "([bead1['sigma'], bead2['sigma']])\n", (10701, 10735), True, 'import numpy as np\n'), ((20909, 20920), 'numpy.all', 'np.all', (['tmp'], {}), '(tmp)\n', (20915, 20920), True, 'import numpy as np\n'), ((37979, 38048), 'numpy.array', 'np.array', (['[t11, t12, t21, t22, t23, t24, t31, t32, t41]'], {'dtype': 'object'}), '([t11, t12, t21, t22, t23, t24, t31, t32, t41], dtype=object)\n', (37987, 38048), True, 'import numpy as np\n'), ((39207, 39218), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (39215, 39218), True, 'import numpy as np\n'), ((39249, 39276), 'numpy.sum', 'np.sum', (['multipole_terms[:1]'], {}), '(multipole_terms[:1])\n', (39255, 39276), True, 'import numpy as np\n'), ((39306, 39334), 'numpy.sum', 'np.sum', (['multipole_terms[2:6]'], {}), '(multipole_terms[2:6])\n', (39312, 39334), True, 'import numpy as np\n'), ((39364, 39392), 'numpy.sum', 'np.sum', (['multipole_terms[6:8]'], {}), '(multipole_terms[6:8])\n', (39370, 39392), True, 'import numpy as np\n'), ((39422, 39448), 'numpy.sum', 'np.sum', (['multipole_terms[8]'], {}), '(multipole_terms[8])\n', (39428, 39448), True, 'import numpy as np\n'), ((42226, 42257), 'numpy.sum', 'np.sum', (['potential_terms'], {'axis': '(0)'}), '(potential_terms, axis=0)\n', (42232, 42257), True, 'import numpy as np\n'), ((64610, 64638), 'numpy.sum', 'np.sum', (['multipole_int_terms0'], {}), '(multipole_int_terms0)\n', (64616, 64638), True, 'import numpy as np\n'), ((69638, 69798), 'scipy.optimize.brentq', 'spo.brentq', (['_obj_energy_parameter_from_integral', 'eps_min', 'eps_max'], {'args': '(bead1, bead2, beadAB_new, Cmultipole, sigma0, shape_factor_scale)', 'xtol': '(1e-12)'}), '(_obj_energy_parameter_from_integral, eps_min, eps_max, args=(\n bead1, bead2, beadAB_new, Cmultipole, sigma0, shape_factor_scale), xtol\n =1e-12)\n', (69648, 69798), True, 'import scipy.optimize as spo\n'), ((4903, 4959), 'numpy.sqrt', 'np.sqrt', (["((bead1['lambdar'] - 3) * (bead2['lambdar'] - 3))"], {}), "((bead1['lambdar'] - 3) * (bead2['lambdar'] - 3))\n", (4910, 4959), True, 'import numpy as np\n'), ((4988, 5044), 'numpy.sqrt', 'np.sqrt', (["((bead1['lambdaa'] - 3) * (bead2['lambdaa'] - 3))"], {}), "((bead1['lambdaa'] - 3) * (bead2['lambdaa'] - 3))\n", (4995, 5044), True, 'import numpy as np\n'), ((10816, 10860), 'numpy.sqrt', 'np.sqrt', (["(bead1['epsilon'] * bead2['epsilon'])"], {}), "(bead1['epsilon'] * bead2['epsilon'])\n", (10823, 10860), True, 'import numpy as np\n'), ((12751, 12801), 'numpy.sqrt', 'np.sqrt', (["(bead1['sigma'] ** 3 * bead2['sigma'] ** 3)"], {}), "(bead1['sigma'] ** 3 * bead2['sigma'] ** 3)\n", (12758, 12801), True, 'import numpy as np\n'), ((12879, 12935), 'numpy.sqrt', 'np.sqrt', (["((bead1['lambdaa'] - 3) * (bead2['lambdaa'] - 3))"], {}), "((bead1['lambdaa'] - 3) * (bead2['lambdaa'] - 3))\n", (12886, 12935), True, 'import numpy as np\n'), ((21004, 21015), 'numpy.any', 'np.any', (['tmp'], {}), '(tmp)\n', (21010, 21015), True, 'import numpy as np\n'), ((21970, 21987), 'numpy.isnan', 'np.isnan', (['pol_tmp'], {}), '(pol_tmp)\n', (21978, 21987), True, 'import numpy as np\n'), ((29322, 29379), 'mapsci.quick_plots.plot_potential', 'plot_potential', (['r', 'w_mie'], {'plot_opts': 'plot_opts', 'show': '(False)'}), '(r, w_mie, plot_opts=plot_opts, show=False)\n', (29336, 29379), False, 'from mapsci.quick_plots import plot_potential, plot_multipole_potential\n'), ((29708, 29769), 'mapsci.quick_plots.plot_potential', 'plot_potential', (['r', 'w_mie_fit'], {'plot_opts': 'plot_opts', 'show': '(False)'}), '(r, w_mie_fit, plot_opts=plot_opts, show=False)\n', (29722, 29769), False, 'from mapsci.quick_plots import plot_potential, plot_multipole_potential\n'), ((30363, 30434), 'mapsci.quick_plots.plot_multipole_potential', 'plot_multipole_potential', (['r', 'potential'], {'potential_terms': 'potential_terms'}), '(r, potential, potential_terms=potential_terms)\n', (30387, 30434), False, 'from mapsci.quick_plots import plot_potential, plot_multipole_potential\n'), ((41454, 41478), 'numpy.size', 'np.size', (['multipole_terms'], {}), '(multipole_terms)\n', (41461, 41478), True, 'import numpy as np\n'), ((41511, 41655), 'numpy.array', 'np.array', (['[-multipole_terms[0] / r ** 4.0, -multipole_terms[1] / r ** 6.0, -\n multipole_terms[2] / r ** 8.0, -multipole_terms[3] / r ** 10.0]'], {}), '([-multipole_terms[0] / r ** 4.0, -multipole_terms[1] / r ** 6.0, -\n multipole_terms[2] / r ** 8.0, -multipole_terms[3] / r ** 10.0])\n', (41519, 41655), True, 'import numpy as np\n'), ((63974, 63998), 'numpy.size', 'np.size', (['multipole_terms'], {}), '(multipole_terms)\n', (63981, 63998), True, 'import numpy as np\n'), ((81794, 81814), 'numpy.log', 'np.log', (['(-w_multipole)'], {}), '(-w_multipole)\n', (81800, 81814), True, 'import numpy as np\n'), ((87614, 87632), 'numpy.log', 'np.log', (['Kprefactor'], {}), '(Kprefactor)\n', (87620, 87632), True, 'import numpy as np\n'), ((93182, 93202), 'numpy.log', 'np.log', (['(-w_multipole)'], {}), '(-w_multipole)\n', (93188, 93202), True, 'import numpy as np\n'), ((101479, 101500), 'numpy.isnan', 'np.isnan', (['temperature'], {}), '(temperature)\n', (101487, 101500), True, 'import numpy as np\n'), ((106419, 106440), 'numpy.isnan', 'np.isnan', (['temperature'], {}), '(temperature)\n', (106427, 106440), True, 'import numpy as np\n'), ((110749, 110770), 'numpy.isnan', 'np.isnan', (['temperature'], {}), '(temperature)\n', (110757, 110770), True, 'import numpy as np\n'), ((10767, 10817), 'numpy.sqrt', 'np.sqrt', (["(bead1['sigma'] ** 3 * bead2['sigma'] ** 3)"], {}), "(bead1['sigma'] ** 3 * bead2['sigma'] ** 3)\n", (10774, 10817), True, 'import numpy as np\n'), ((10894, 10950), 'numpy.sqrt', 'np.sqrt', (["((bead1['lambdar'] - 3) * (bead2['lambdar'] - 3))"], {}), "((bead1['lambdar'] - 3) * (bead2['lambdar'] - 3))\n", (10901, 10950), True, 'import numpy as np\n'), ((10975, 11031), 'numpy.sqrt', 'np.sqrt', (["((bead1['lambdaa'] - 3) * (bead2['lambdaa'] - 3))"], {}), "((bead1['lambdaa'] - 3) * (bead2['lambdaa'] - 3))\n", (10982, 11031), True, 'import numpy as np\n'), ((12800, 12841), 'numpy.mean', 'np.mean', (["[bead1['sigma'], bead2['sigma']]"], {}), "([bead1['sigma'], bead2['sigma']])\n", (12807, 12841), True, 'import numpy as np\n'), ((12953, 12997), 'numpy.sqrt', 'np.sqrt', (["(bead1['epsilon'] * bead2['epsilon'])"], {}), "(bead1['epsilon'] * bead2['epsilon'])\n", (12960, 12997), True, 'import numpy as np\n'), ((25922, 25941), 'numpy.diag', 'np.diag', (['var_matrix'], {}), '(var_matrix)\n', (25929, 25941), True, 'import numpy as np\n'), ((33012, 33029), 'numpy.finfo', 'np.finfo', (['"""float"""'], {}), "('float')\n", (33020, 33029), True, 'import numpy as np\n'), ((33531, 33555), 'numpy.isnan', 'np.isnan', (['polarizability'], {}), '(polarizability)\n', (33539, 33555), True, 'import numpy as np\n'), ((41682, 41706), 'numpy.size', 'np.size', (['multipole_terms'], {}), '(multipole_terms)\n', (41689, 41706), True, 'import numpy as np\n'), ((41739, 42058), 'numpy.array', 'np.array', (['[-multipole_terms[0] / r ** 4.0, -multipole_terms[1] / r ** 4.0, -\n multipole_terms[2] / r ** 6.0, -multipole_terms[3] / r ** 6.0, -\n multipole_terms[4] / r ** 6.0, -multipole_terms[5] / r ** 6.0, -\n multipole_terms[6] / r ** 8.0, -multipole_terms[7] / r ** 8.0, -\n multipole_terms[8] / r ** 10.0]'], {}), '([-multipole_terms[0] / r ** 4.0, -multipole_terms[1] / r ** 4.0, -\n multipole_terms[2] / r ** 6.0, -multipole_terms[3] / r ** 6.0, -\n multipole_terms[4] / r ** 6.0, -multipole_terms[5] / r ** 6.0, -\n multipole_terms[6] / r ** 8.0, -multipole_terms[7] / r ** 8.0, -\n multipole_terms[8] / r ** 10.0])\n', (41747, 42058), True, 'import numpy as np\n'), ((64037, 64115), 'numpy.array', 'np.array', (['[sigma0 ** -1, sigma0 ** -3 / 3, sigma0 ** -5 / 5, sigma0 ** -7 / 7]'], {}), '([sigma0 ** -1, sigma0 ** -3 / 3, sigma0 ** -5 / 5, sigma0 ** -7 / 7])\n', (64045, 64115), True, 'import numpy as np\n'), ((64125, 64149), 'numpy.size', 'np.size', (['multipole_terms'], {}), '(multipole_terms)\n', (64132, 64149), True, 'import numpy as np\n'), ((83901, 83945), 'numpy.sqrt', 'np.sqrt', (["(bead1['epsilon'] * bead2['epsilon'])"], {}), "(bead1['epsilon'] * bead2['epsilon'])\n", (83908, 83945), True, 'import numpy as np\n'), ((83988, 84032), 'numpy.sqrt', 'np.sqrt', (["(bead1['epsilon'] * bead2['epsilon'])"], {}), "(bead1['epsilon'] * bead2['epsilon'])\n", (83995, 84032), True, 'import numpy as np\n'), ((87646, 87663), 'numpy.log', 'np.log', (['(sigma / r)'], {}), '(sigma / r)\n', (87652, 87663), True, 'import numpy as np\n'), ((33272, 33289), 'numpy.finfo', 'np.finfo', (['"""float"""'], {}), "('float')\n", (33280, 33289), True, 'import numpy as np\n'), ((64188, 64361), 'numpy.array', 'np.array', (['[sigma0 ** -1, sigma0 ** -1, sigma0 ** -3 / 3, sigma0 ** -3 / 3, sigma0 ** \n -3 / 3, sigma0 ** -3 / 3, sigma0 ** -5 / 5, sigma0 ** -5 / 5, sigma0 **\n -7 / 7]'], {}), '([sigma0 ** -1, sigma0 ** -1, sigma0 ** -3 / 3, sigma0 ** -3 / 3, \n sigma0 ** -3 / 3, sigma0 ** -3 / 3, sigma0 ** -5 / 5, sigma0 ** -5 / 5,\n sigma0 ** -7 / 7])\n', (64196, 64361), True, 'import numpy as np\n'), ((82939, 82989), 'numpy.sqrt', 'np.sqrt', (["(bead1['sigma'] ** 3 * bead2['sigma'] ** 3)"], {}), "(bead1['sigma'] ** 3 * bead2['sigma'] ** 3)\n", (82946, 82989), True, 'import numpy as np\n'), ((83535, 83585), 'numpy.sqrt', 'np.sqrt', (["(bead1['sigma'] ** 3 * bead2['sigma'] ** 3)"], {}), "(bead1['sigma'] ** 3 * bead2['sigma'] ** 3)\n", (83542, 83585), True, 'import numpy as np\n'), ((107070, 107083), 'numpy.sqrt', 'np.sqrt', (['perm'], {}), '(perm)\n', (107077, 107083), True, 'import numpy as np\n'), ((111400, 111413), 'numpy.sqrt', 'np.sqrt', (['perm'], {}), '(perm)\n', (111407, 111413), True, 'import numpy as np\n'), ((107123, 107136), 'numpy.sqrt', 'np.sqrt', (['perm'], {}), '(perm)\n', (107130, 107136), True, 'import numpy as np\n'), ((111453, 111466), 'numpy.sqrt', 'np.sqrt', (['perm'], {}), '(perm)\n', (111460, 111466), True, 'import numpy as np\n'), ((48532, 48546), 'numpy.sqrt', 'np.sqrt', (['(b - c)'], {}), '(b - c)\n', (48539, 48546), True, 'import numpy as np\n'), ((103366, 103416), 'numpy.sqrt', 'np.sqrt', (["(beadA['sigma'] ** 3 * beadB['sigma'] ** 3)"], {}), "(beadA['sigma'] ** 3 * beadB['sigma'] ** 3)\n", (103373, 103416), True, 'import numpy as np\n'), ((49224, 49238), 'numpy.sqrt', 'np.sqrt', (['(b - c)'], {}), '(b - c)\n', (49231, 49238), True, 'import numpy as np\n'), ((49904, 49918), 'numpy.sqrt', 'np.sqrt', (['(b - c)'], {}), '(b - c)\n', (49911, 49918), True, 'import numpy as np\n'), ((50796, 50810), 'numpy.sqrt', 'np.sqrt', (['(b - c)'], {}), '(b - c)\n', (50803, 50810), True, 'import numpy as np\n')] |
import cv2
import sys
import numpy as np
def videoAnnotate(vin):
print('video in file: {}'.format(vin))
inFile = cv2.VideoCapture(vin)
fOutname = '_'.join(['combined', vin])
print('video in file: {}'.format(vin))
print('video out file: {}'.format(fOutname))
#check if the input file opened successfully
if (inFile.isOpened() == False):
print("Error opening video stream on file")
#define the codec and create videowriter object
imgCnt = 0
fps = 20
frame_size = (int(inFile.get(3)), int(inFile.get(4))) #tuple(result.shape[1::-1])
print("frame_size: {}".format(frame_size))
writer = cv2.VideoWriter(fOutname,
cv2.VideoWriter_fourcc(*'MP4V'), fps, frame_size, True)
#read until video is completed
while(inFile.isOpened()):
#Capture frame by frame
ret, frame = inFile.read()
if ret == True:
#display frame
#plt.imshow(frame)
#plt.show()
#increment the image count
imgCnt = imgCnt + 1
"""
if (imgCnt < 10):
padding = "00"
elif (imgCnt < 100):
padding = "0"
else:
padding = ""
"""
fname = str(imgCnt) #"".join([padding, str(imgCnt)])
fname = "_".join(["image", fname])
imgFname = "/".join(["videoImgs", fname])
imgFname = ".".join([imgFname, "jpg"])
#read 2nd image from another directory
imgFname2 = "/".join(["imgs", fname])
imgFname2 = ".".join([imgFname2, "jpg"])
print("2nd image file name: {}".format(imgFname2))
frame2 = cv2.imread(imgFname2)
#combine 2 images into 1 image
#create an empty matrix
vis = np.uint8(np.zeros([1440,2560,3]))
#paste input image over to the empty matrix
vis[:720, :1280, :3] = frame
#paste 2nd input image into the matrix
vis[:720, 1280:2560, :3] = frame2[:720, :1280, :3]
#copy vis to frame
frame = vis
cv2.imwrite(imgFname, frame)
#reduce the size of the frame to (720, 1280)
frame = cv2.resize(frame, (1280,720))
writer.write(frame)
else:
#if no frame break while loop
writer.release()
print("end of mp4 video file conversion")
break
#main
#vin = "MOvI0003.mp4"
#videoAnnotate(vin)
if (sys.argv[1] is not None):
vin = sys.argv[1]
videoAnnotate(vin)
else:
print("Please input the path of the input file")
| [
"cv2.imwrite",
"numpy.zeros",
"cv2.VideoCapture",
"cv2.VideoWriter_fourcc",
"cv2.resize",
"cv2.imread"
] | [((134, 155), 'cv2.VideoCapture', 'cv2.VideoCapture', (['vin'], {}), '(vin)\n', (150, 155), False, 'import cv2\n'), ((716, 747), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MP4V'"], {}), "(*'MP4V')\n", (738, 747), False, 'import cv2\n'), ((1810, 1831), 'cv2.imread', 'cv2.imread', (['imgFname2'], {}), '(imgFname2)\n', (1820, 1831), False, 'import cv2\n'), ((2296, 2324), 'cv2.imwrite', 'cv2.imwrite', (['imgFname', 'frame'], {}), '(imgFname, frame)\n', (2307, 2324), False, 'import cv2\n'), ((2404, 2434), 'cv2.resize', 'cv2.resize', (['frame', '(1280, 720)'], {}), '(frame, (1280, 720))\n', (2414, 2434), False, 'import cv2\n'), ((1943, 1968), 'numpy.zeros', 'np.zeros', (['[1440, 2560, 3]'], {}), '([1440, 2560, 3])\n', (1951, 1968), True, 'import numpy as np\n')] |
import tarfile
from datetime import timedelta
from pathlib import Path
from time import perf_counter
import PIL.Image
import h5py
import numpy as np
from tqdm import tqdm
from torchdata.logger import log
from torchdata.utils import download_file, remote_file, md5sum
MPII_Joint_Names = ['right_ankle', 'right_knee', 'right_hip', 'left_hip',
'left_knee', 'left_ankle', 'pelvis', 'spine',
'neck', 'head_top', 'right_wrist', 'right_elbow',
'right_shoulder', 'left_shoulder', 'left_elbow', 'left_wrist']
MPII_Joint_Parents = [1, 2, 6, 6, 3, 4, 6, 6, 7, 8, 11, 12, 8, 8, 13, 14]
MPII_Joint_Horizontal_Flips = [5, 4, 3, 2, 1, 0, 6, 7, 8, 9, 15, 14, 13, 12, 11, 10]
# Per-channel mean and standard deviation values for input images
# Channel order: red, green, blue
MPII_Image_Mean = [0.440442830324173, 0.4440267086029053, 0.4326828420162201]
MPII_Image_Stddev = [0.24576245248317719, 0.24096255004405975, 0.2468130737543106]
MPII_Files = {
# Archive file containing the images (12 GiB)
'mpii_human_pose_v1.tar.gz': {
'url': 'https://datasets.d2.mpi-inf.mpg.de/andriluka14cvpr/mpii_human_pose_v1.tar.gz',
'md5': 'b6bc9c6869d3f035a5570b2e68ec84c4',
},
# All annotations (23 MiB)
'annot.h5': {
'url': 'https://github.com/princeton-vl/pose-hg-train/raw/4637618a1b162d80436bfd0b557833b5824cbb21/data/mpii/annot.h5',
'md5': 'c0d0ba453709e37d632b4d4059e2799c',
},
# Validation set annotations (1 MiB)
'valid.h5': {
'url': 'https://github.com/princeton-vl/pose-hg-train/raw/4637618a1b162d80436bfd0b557833b5824cbb21/data/mpii/annot/valid.h5',
'md5': 'd88b6828485168c1fb4c79a21995fdef',
},
}
def validate_mpii_data_dir(data_dir, thorough=False):
data_dir = Path(data_dir)
assert data_dir.is_dir()
assert (data_dir / 'images').is_dir()
assert (data_dir / 'annot.h5').is_file()
assert (data_dir / 'valid.h5').is_file()
if thorough:
assert len(list((data_dir / 'images').glob('*.jpg'))) == 24984
assert md5sum(data_dir / 'annot.h5', quiet=True) == MPII_Files['annot.h5']['md5']
assert md5sum(data_dir / 'valid.h5', quiet=True) == MPII_Files['valid.h5']['md5']
def install_mpii_dataset(data_dir, quiet=False, force=False):
"""Download and extract the MPII Human Pose dataset.
Args:
data_dir (str): The destination directory for installation.
quiet (bool): If true, don't show progress bars. Other output may be suppressed by
configuring the log level on `torchdata.logger.log`.
force (bool): If true, skip checking whether the dataset is already installed.
"""
if not force:
try:
# Exit early if it looks like the dataset has already been downloaded and extracted
validate_mpii_data_dir(data_dir, thorough=True)
return
except:
pass
start_time = perf_counter()
data_dir = Path(data_dir).absolute()
log.info('Installing the MPII Human Pose dataset in {}:'.format(data_dir))
data_dir.mkdir(parents=True, exist_ok=True)
log.info('[1/3] Gathering files...')
val_annots_file = data_dir / 'valid.h5'
download_file(dest_path=val_annots_file, quiet=quiet, **MPII_Files['valid.h5'])
all_annots_file = data_dir / 'annot.h5'
download_file(dest_path=all_annots_file, quiet=quiet, **MPII_Files['annot.h5'])
with remote_file(**MPII_Files['mpii_human_pose_v1.tar.gz'], quiet=quiet) as img_archive:
with tarfile.open(img_archive, 'r:gz') as tar:
log.info('[2/3] Loading archive metadata...')
subdir_members = [member for member in tar.getmembers()
if member.name.startswith('./images/')]
log.info('[3/3] Extracting images...')
if quiet:
progress_bar = None
else:
progress_bar = tqdm(iterable=subdir_members, ascii=True, leave=False)
subdir_members = progress_bar
tar.extractall(path=str(data_dir), members=subdir_members)
if progress_bar:
progress_bar.close()
duration_seconds = round(perf_counter() - start_time)
log.info('Installation finished in {}.'.format(str(timedelta(seconds=duration_seconds))))
def transform_keypoints(keypoints, matrix):
"""Transform 2D keypoints using the given 3x3 transformation matrix."""
pad_width = [(0, 0)] * (np.ndim(keypoints) - 1) + [(0, 1)]
keypoints = np.pad(keypoints, pad_width, 'constant', constant_values=1)
transformed_keypoints = np.matmul(keypoints, matrix.T)[..., :2]
return transformed_keypoints
def normalised_coordinate_transform(size):
return np.array([
[2 / size, 0, 1 / size - 1],
[0, 2 / size, 1 / size - 1],
[0, 0, 1],
])
class MpiiData:
"""A helper class for working with MPII Human Pose data.
Args:
data_dir: The directory containing installed MPII data.
"""
def __init__(self, data_dir):
self.data_dir = Path(data_dir)
validate_mpii_data_dir(self.data_dir)
with h5py.File(self.data_dir / 'annot.h5', 'r') as f:
self.image_indices = f['/index'].value.astype(np.uint32)
self.person_indices = f['/person'].value.astype(np.uint32)
self.is_train = f['/istrain'].value.astype(np.bool)
self.image_names = [imgname.tostring().decode('ascii').split('\0')[0]
for imgname in f['/imgname'].value.astype(np.uint8)]
self.subject_centres = f['/center'].value.astype(np.int32)
self.subject_scales = f['/scale'].value.astype(np.float64)
self.keypoints = f['/part'].value.astype(np.float64)
tmp = self.keypoints[..., 0] * self.keypoints[..., 1]
self.keypoint_masks = np.not_equal(tmp, 0, out=np.ndarray(tmp.shape, dtype=np.uint8))
self.keypoint_visibilities = f['/visible'].value.astype(np.uint8)
self.head_lengths = f['/normalize'].value.astype(np.float64)
with h5py.File(self.data_dir / 'valid.h5', 'r') as f:
val_image_indices = f['/index'].value.astype(np.uint32)
val_person_indices = f['/person'].value.astype(np.uint32)
self.train_indices = []
self.val_indices = []
self.test_indices = []
# Separate the example indices into train, validation, and test sets
for i in range(len(self.image_indices)):
if self.is_train[i]:
val_pos = len(self.val_indices)
if(
val_pos < len(val_image_indices)
and self.image_indices[i] == val_image_indices[val_pos]
and self.person_indices[i] == val_person_indices[val_pos]
):
self.val_indices.append(i)
else:
self.train_indices.append(i)
else:
self.test_indices.append(i)
def __len__(self):
return len(self.image_indices)
def subset_indices(self, subset):
if subset == 'train':
return self.train_indices
if subset == 'val':
return self.val_indices
if subset == 'trainval':
return self.train_indices + self.val_indices
if subset == 'test':
return self.test_indices
raise Exception('unrecognised subset: {}'.format(subset))
def load_image(self, index):
"""Load the full original image."""
image_name = self.image_names[index]
image = PIL.Image.open(self.data_dir / 'images' / image_name, 'r')
return image
def get_bounding_box(self, index):
"""Return a bounding box for the subject in image space.
Based on the cropping scheme of A. Newell et al.
Args:
index (int): Example index.
Returns:
Bounding box coordinates as a (left, upper, right, lower) tuple.
"""
scale = self.subject_scales[index]
cx = self.subject_centres[index, 0]
cy = self.subject_centres[index, 1] + scale * 15
half_size = (scale * 125) # = (scale * 1.25 * 200) / 2
return (cx - half_size, cy - half_size, cx + half_size, cy + half_size)
def load_cropped_image(self, index, size=384, margin=0):
"""Load a cropped version of the image centred on the subject."""
# +---------------+
# | |
# |margin |
# | +---+ |
# |<--->|BB | |
# | +---+ |
# | >| |< |
# | size |
# +---------------+
bb = self.get_bounding_box(index)
pad = margin * (bb[2] - bb[0]) / size
crop_box = [bb[0] - pad, bb[1] - pad, bb[2] + pad, bb[3] + pad]
out_size = size + 2 * margin
image = self.load_image(index)
image = image.crop(crop_box)
image.thumbnail((out_size, out_size), PIL.Image.BILINEAR)
if image.width != out_size:
image = image.resize((out_size, out_size), PIL.Image.BILINEAR)
return image
def get_crop_transform(self, index, size=384, margin=0):
"""Build the matrix which transforms points from original to cropped image space."""
bb = self.get_bounding_box(index)
pad = margin * (bb[2] - bb[1]) / size
crop_box = [bb[0] - pad, bb[1] - pad, bb[2] + pad, bb[3] + pad]
out_size = size + 2 * margin
k = out_size / (crop_box[2] - crop_box[0])
m = np.eye(3, 3)
m = np.matmul([
[1, 0, -crop_box[0]],
[0, 1, -crop_box[1]],
[0, 0, 1],
], m)
m = np.matmul([
[k, 0, k / 2 - 0.5],
[0, k, k / 2 - 0.5],
[0, 0, 1],
], m)
return m
def get_bb_transform(self, index):
"""Get the matrix for image space to normalised bounding box space transformation.
In normalised space, (-1, -1) is the top-left corner of the bounding box, and (1, 1) is the
bottom-right.
"""
bb = self.get_bounding_box(index)
m = np.eye(3, 3)
m = np.matmul([
[1, 0, -bb[0]],
[0, 1, -bb[1]],
[0, 0, 1],
], m)
return np.matmul(normalised_coordinate_transform(bb[2] - bb[0]), m)
| [
"numpy.eye",
"tarfile.open",
"pathlib.Path",
"tqdm.tqdm",
"time.perf_counter",
"numpy.ndim",
"h5py.File",
"torchdata.logger.log.info",
"numpy.array",
"datetime.timedelta",
"numpy.matmul",
"numpy.ndarray",
"torchdata.utils.remote_file",
"numpy.pad",
"torchdata.utils.download_file",
"tor... | [((1798, 1812), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (1802, 1812), False, 'from pathlib import Path\n'), ((2959, 2973), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (2971, 2973), False, 'from time import perf_counter\n'), ((3146, 3182), 'torchdata.logger.log.info', 'log.info', (['"""[1/3] Gathering files..."""'], {}), "('[1/3] Gathering files...')\n", (3154, 3182), False, 'from torchdata.logger import log\n'), ((3231, 3310), 'torchdata.utils.download_file', 'download_file', ([], {'dest_path': 'val_annots_file', 'quiet': 'quiet'}), "(dest_path=val_annots_file, quiet=quiet, **MPII_Files['valid.h5'])\n", (3244, 3310), False, 'from torchdata.utils import download_file, remote_file, md5sum\n'), ((3359, 3438), 'torchdata.utils.download_file', 'download_file', ([], {'dest_path': 'all_annots_file', 'quiet': 'quiet'}), "(dest_path=all_annots_file, quiet=quiet, **MPII_Files['annot.h5'])\n", (3372, 3438), False, 'from torchdata.utils import download_file, remote_file, md5sum\n'), ((4532, 4591), 'numpy.pad', 'np.pad', (['keypoints', 'pad_width', '"""constant"""'], {'constant_values': '(1)'}), "(keypoints, pad_width, 'constant', constant_values=1)\n", (4538, 4591), True, 'import numpy as np\n'), ((4749, 4828), 'numpy.array', 'np.array', (['[[2 / size, 0, 1 / size - 1], [0, 2 / size, 1 / size - 1], [0, 0, 1]]'], {}), '([[2 / size, 0, 1 / size - 1], [0, 2 / size, 1 / size - 1], [0, 0, 1]])\n', (4757, 4828), True, 'import numpy as np\n'), ((3448, 3515), 'torchdata.utils.remote_file', 'remote_file', ([], {'quiet': 'quiet'}), "(**MPII_Files['mpii_human_pose_v1.tar.gz'], quiet=quiet)\n", (3459, 3515), False, 'from torchdata.utils import download_file, remote_file, md5sum\n'), ((4620, 4650), 'numpy.matmul', 'np.matmul', (['keypoints', 'matrix.T'], {}), '(keypoints, matrix.T)\n', (4629, 4650), True, 'import numpy as np\n'), ((5081, 5095), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (5085, 5095), False, 'from pathlib import Path\n'), ((9591, 9603), 'numpy.eye', 'np.eye', (['(3)', '(3)'], {}), '(3, 3)\n', (9597, 9603), True, 'import numpy as np\n'), ((9616, 9685), 'numpy.matmul', 'np.matmul', (['[[1, 0, -crop_box[0]], [0, 1, -crop_box[1]], [0, 0, 1]]', 'm'], {}), '([[1, 0, -crop_box[0]], [0, 1, -crop_box[1]], [0, 0, 1]], m)\n', (9625, 9685), True, 'import numpy as np\n'), ((9745, 9812), 'numpy.matmul', 'np.matmul', (['[[k, 0, k / 2 - 0.5], [0, k, k / 2 - 0.5], [0, 0, 1]]', 'm'], {}), '([[k, 0, k / 2 - 0.5], [0, k, k / 2 - 0.5], [0, 0, 1]], m)\n', (9754, 9812), True, 'import numpy as np\n'), ((10198, 10210), 'numpy.eye', 'np.eye', (['(3)', '(3)'], {}), '(3, 3)\n', (10204, 10210), True, 'import numpy as np\n'), ((10223, 10280), 'numpy.matmul', 'np.matmul', (['[[1, 0, -bb[0]], [0, 1, -bb[1]], [0, 0, 1]]', 'm'], {}), '([[1, 0, -bb[0]], [0, 1, -bb[1]], [0, 0, 1]], m)\n', (10232, 10280), True, 'import numpy as np\n'), ((2077, 2118), 'torchdata.utils.md5sum', 'md5sum', (["(data_dir / 'annot.h5')"], {'quiet': '(True)'}), "(data_dir / 'annot.h5', quiet=True)\n", (2083, 2118), False, 'from torchdata.utils import download_file, remote_file, md5sum\n'), ((2167, 2208), 'torchdata.utils.md5sum', 'md5sum', (["(data_dir / 'valid.h5')"], {'quiet': '(True)'}), "(data_dir / 'valid.h5', quiet=True)\n", (2173, 2208), False, 'from torchdata.utils import download_file, remote_file, md5sum\n'), ((2989, 3003), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (2993, 3003), False, 'from pathlib import Path\n'), ((3545, 3578), 'tarfile.open', 'tarfile.open', (['img_archive', '"""r:gz"""'], {}), "(img_archive, 'r:gz')\n", (3557, 3578), False, 'import tarfile\n'), ((3599, 3644), 'torchdata.logger.log.info', 'log.info', (['"""[2/3] Loading archive metadata..."""'], {}), "('[2/3] Loading archive metadata...')\n", (3607, 3644), False, 'from torchdata.logger import log\n'), ((3795, 3833), 'torchdata.logger.log.info', 'log.info', (['"""[3/3] Extracting images..."""'], {}), "('[3/3] Extracting images...')\n", (3803, 3833), False, 'from torchdata.logger import log\n'), ((4208, 4222), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (4220, 4222), False, 'from time import perf_counter\n'), ((5156, 5198), 'h5py.File', 'h5py.File', (["(self.data_dir / 'annot.h5')", '"""r"""'], {}), "(self.data_dir / 'annot.h5', 'r')\n", (5165, 5198), False, 'import h5py\n'), ((6112, 6154), 'h5py.File', 'h5py.File', (["(self.data_dir / 'valid.h5')", '"""r"""'], {}), "(self.data_dir / 'valid.h5', 'r')\n", (6121, 6154), False, 'import h5py\n'), ((3941, 3995), 'tqdm.tqdm', 'tqdm', ([], {'iterable': 'subdir_members', 'ascii': '(True)', 'leave': '(False)'}), '(iterable=subdir_members, ascii=True, leave=False)\n', (3945, 3995), False, 'from tqdm import tqdm\n'), ((4292, 4327), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'duration_seconds'}), '(seconds=duration_seconds)\n', (4301, 4327), False, 'from datetime import timedelta\n'), ((4481, 4499), 'numpy.ndim', 'np.ndim', (['keypoints'], {}), '(keypoints)\n', (4488, 4499), True, 'import numpy as np\n'), ((5908, 5945), 'numpy.ndarray', 'np.ndarray', (['tmp.shape'], {'dtype': 'np.uint8'}), '(tmp.shape, dtype=np.uint8)\n', (5918, 5945), True, 'import numpy as np\n')] |
import unittest
import numpy
from oo_trees.dataset import Dataset
from oo_trees.attribute import *
from oo_trees.splitter import *
class TestDataset(unittest.TestCase):
def test_entropy(self):
X = numpy.array([[0, 1], [0, 0]])
y = numpy.array(['H', 'T'])
dataset = Dataset(X, y)
c0, c1 = dataset.attributes
self.assertEqual(dataset.splitter_entropy(IsEqualSplitter(c0, 0)), 1)
self.assertEqual(dataset.splitter_entropy(IsEqualSplitter(c0, 1)), 1)
self.assertEqual(dataset.splitter_entropy(IsEqualSplitter(c1, 0)), 0)
self.assertEqual(dataset.splitter_entropy(IsEqualSplitter(c1, 1)), 0)
best_splitter = dataset.best_single_attribute_splitter()
self.assertEqual(best_splitter.attribute.index, 1)
self.assertEqual(best_splitter.value, 0)
def test_split_on(self):
X = numpy.array([[0, 1], [0, 0], [1, 0]])
y = numpy.array(['H', 'T', 'T'])
dataset = Dataset(X, y)
c0, c1 = dataset.attributes
split = dataset.split_on(IsEqualSplitter(c1, 0))
numpy.testing.assert_array_equal(split[0].X, numpy.array([[0, 1]]))
numpy.testing.assert_array_equal(split[1].X, numpy.array([[0, 0], [1, 0]]))
def test_multitype_splitting(self):
# x1 < 0.5, x2 = 0 => 'Red'
# x1 < 0.5, x2 = 1 => 'Yellow'
# x1 >= .5 => 'Green'
X = numpy.array([[0.25, 0], [0.33, 0], [0.31, 1], [0.12, 1], [0.45, 0], [0.52, 0], [0.81, 0], [0.67, 1], [0.51, 1]])
y = numpy.array(['Red', 'Red', 'Yellow', 'Yellow', 'Red', 'Green', 'Green', 'Green', 'Green'])
dataset = Dataset(X, y, [NumericAttribute(0), CategoricalAttribute(1)])
splitter = dataset.best_single_attribute_splitter()
self.assertEqual(splitter.attribute.index, 0)
self.assertGreaterEqual(splitter.value, 0.45)
self.assertLess(splitter.value, 0.52)
subset1, subset2 = dataset.split_on(splitter).values()
subsplitter = subset1.best_single_attribute_splitter()
self.assertEqual(subsplitter.attribute.index, 1)
self.assertEqual(subsplitter.value, 0)
def test_more_complicated_splitting(self):
# x1 < 0.25 => 'a'
# x1 >= 0.25, x2 = 0 => 'b'
# x1 < 0.50, x2 = 1 => 'c'
# x1 >= 0.50, x2 = 1 => 'd'
X = numpy.array([[0.2, 0], [0.01, 1], [0.15, 0], [0.232, 1], [0.173, 0], [0.263, 0], [0.671, 0], [0.9, 0], [0.387, 1], [0.482, 1], [0.632, 1], [0.892, 1]])
y = numpy.array(['a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'a', 'a'])
dataset = Dataset(X, y, [NumericAttribute(0), CategoricalAttribute(1)])
splitter = dataset.best_single_attribute_splitter()
self.assertEqual(splitter.attribute.index, 0)
self.assertGreaterEqual(splitter.value, 0.23)
self.assertLess(splitter.value, 0.27)
subset1, subset2 = dataset.split_on(splitter).values()
numpy.testing.assert_array_equal(subset1.y, ['a', 'a', 'a', 'a', 'a'])
splitter2 = subset2.best_single_attribute_splitter()
self.assertEqual(splitter2.attribute.index, 1)
self.assertEqual(splitter2.value, 0)
subset21, subset22 = subset2.split_on(splitter2).values()
numpy.testing.assert_array_equal(subset22.y, ['b', 'b', 'b'])
splitter21 = subset21.best_single_attribute_splitter()
self.assertEqual(splitter21.attribute.index, 0)
self.assertGreaterEqual(splitter21.value, 0.482)
self.assertLess(splitter21.value, 0.632)
subset211, subset212 = subset21.split_on(splitter21).values()
numpy.testing.assert_array_equal(subset211.y, ['c', 'c'])
numpy.testing.assert_array_equal(subset212.y, ['a', 'a'])
def test_outcomes(self):
X = numpy.array([[0, 1], [0, 0], [1, 0]])
y = numpy.array(['H', 'T', 'T'])
dataset = Dataset(X, y)
outcomes = dataset.outcome_counter
self.assertEqual(outcomes.counter.most_common(), [('T', 2), ('H', 1)])
def test_bootstrap(self):
X = numpy.array([[0, 1], [0, 0]])
y = numpy.array(['H', 'T'])
dataset = Dataset(X, y)
bootstrap = dataset.bootstrap(1000)
self.assertEqual(bootstrap.X.shape[0], 1000)
self.assertEqual('H' in bootstrap.y, True) # this has a 10e-302ish chance of failing
| [
"oo_trees.dataset.Dataset",
"numpy.array",
"numpy.testing.assert_array_equal"
] | [((210, 239), 'numpy.array', 'numpy.array', (['[[0, 1], [0, 0]]'], {}), '([[0, 1], [0, 0]])\n', (221, 239), False, 'import numpy\n'), ((252, 275), 'numpy.array', 'numpy.array', (["['H', 'T']"], {}), "(['H', 'T'])\n", (263, 275), False, 'import numpy\n'), ((294, 307), 'oo_trees.dataset.Dataset', 'Dataset', (['X', 'y'], {}), '(X, y)\n', (301, 307), False, 'from oo_trees.dataset import Dataset\n'), ((872, 909), 'numpy.array', 'numpy.array', (['[[0, 1], [0, 0], [1, 0]]'], {}), '([[0, 1], [0, 0], [1, 0]])\n', (883, 909), False, 'import numpy\n'), ((922, 950), 'numpy.array', 'numpy.array', (["['H', 'T', 'T']"], {}), "(['H', 'T', 'T'])\n", (933, 950), False, 'import numpy\n'), ((969, 982), 'oo_trees.dataset.Dataset', 'Dataset', (['X', 'y'], {}), '(X, y)\n', (976, 982), False, 'from oo_trees.dataset import Dataset\n'), ((1394, 1511), 'numpy.array', 'numpy.array', (['[[0.25, 0], [0.33, 0], [0.31, 1], [0.12, 1], [0.45, 0], [0.52, 0], [0.81, 0\n ], [0.67, 1], [0.51, 1]]'], {}), '([[0.25, 0], [0.33, 0], [0.31, 1], [0.12, 1], [0.45, 0], [0.52, \n 0], [0.81, 0], [0.67, 1], [0.51, 1]])\n', (1405, 1511), False, 'import numpy\n'), ((1519, 1613), 'numpy.array', 'numpy.array', (["['Red', 'Red', 'Yellow', 'Yellow', 'Red', 'Green', 'Green', 'Green', 'Green']"], {}), "(['Red', 'Red', 'Yellow', 'Yellow', 'Red', 'Green', 'Green',\n 'Green', 'Green'])\n", (1530, 1613), False, 'import numpy\n'), ((2331, 2486), 'numpy.array', 'numpy.array', (['[[0.2, 0], [0.01, 1], [0.15, 0], [0.232, 1], [0.173, 0], [0.263, 0], [0.671,\n 0], [0.9, 0], [0.387, 1], [0.482, 1], [0.632, 1], [0.892, 1]]'], {}), '([[0.2, 0], [0.01, 1], [0.15, 0], [0.232, 1], [0.173, 0], [0.263,\n 0], [0.671, 0], [0.9, 0], [0.387, 1], [0.482, 1], [0.632, 1], [0.892, 1]])\n', (2342, 2486), False, 'import numpy\n'), ((2495, 2568), 'numpy.array', 'numpy.array', (["['a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'a', 'a']"], {}), "(['a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'c', 'c', 'a', 'a'])\n", (2506, 2568), False, 'import numpy\n'), ((2935, 3005), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['subset1.y', "['a', 'a', 'a', 'a', 'a']"], {}), "(subset1.y, ['a', 'a', 'a', 'a', 'a'])\n", (2967, 3005), False, 'import numpy\n'), ((3242, 3303), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['subset22.y', "['b', 'b', 'b']"], {}), "(subset22.y, ['b', 'b', 'b'])\n", (3274, 3303), False, 'import numpy\n'), ((3608, 3665), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['subset211.y', "['c', 'c']"], {}), "(subset211.y, ['c', 'c'])\n", (3640, 3665), False, 'import numpy\n'), ((3674, 3731), 'numpy.testing.assert_array_equal', 'numpy.testing.assert_array_equal', (['subset212.y', "['a', 'a']"], {}), "(subset212.y, ['a', 'a'])\n", (3706, 3731), False, 'import numpy\n'), ((3774, 3811), 'numpy.array', 'numpy.array', (['[[0, 1], [0, 0], [1, 0]]'], {}), '([[0, 1], [0, 0], [1, 0]])\n', (3785, 3811), False, 'import numpy\n'), ((3824, 3852), 'numpy.array', 'numpy.array', (["['H', 'T', 'T']"], {}), "(['H', 'T', 'T'])\n", (3835, 3852), False, 'import numpy\n'), ((3871, 3884), 'oo_trees.dataset.Dataset', 'Dataset', (['X', 'y'], {}), '(X, y)\n', (3878, 3884), False, 'from oo_trees.dataset import Dataset\n'), ((4050, 4079), 'numpy.array', 'numpy.array', (['[[0, 1], [0, 0]]'], {}), '([[0, 1], [0, 0]])\n', (4061, 4079), False, 'import numpy\n'), ((4092, 4115), 'numpy.array', 'numpy.array', (["['H', 'T']"], {}), "(['H', 'T'])\n", (4103, 4115), False, 'import numpy\n'), ((4134, 4147), 'oo_trees.dataset.Dataset', 'Dataset', (['X', 'y'], {}), '(X, y)\n', (4141, 4147), False, 'from oo_trees.dataset import Dataset\n'), ((1129, 1150), 'numpy.array', 'numpy.array', (['[[0, 1]]'], {}), '([[0, 1]])\n', (1140, 1150), False, 'import numpy\n'), ((1205, 1234), 'numpy.array', 'numpy.array', (['[[0, 0], [1, 0]]'], {}), '([[0, 0], [1, 0]])\n', (1216, 1234), False, 'import numpy\n')] |
# -*- coding:utf-8 -*-
import argparse
import torch
import os
import cv2
import pyssim
import codecs
from scipy.ndimage import gaussian_filter
from numpy.lib.stride_tricks import as_strided as ast
from PIL import Image
from torch.autograd import Variable
import torch.nn as nn
import numpy as np
import time, math
import scipy.io as sio
from skimage import measure, io
from functools import partial
import pickle
from model.sk_optimized import Net
parser = argparse.ArgumentParser(description="SKNet Test")
parser.add_argument("--cuda", action="store_true", help="use cuda?")
parser.add_argument("--model", default="sk/finalmodel_epoch_50.pth", type=str, help="model path")
parser.add_argument("--image", default="butterfly_GT", type=str, help="image name")
parser.add_argument("--scale", default=4, type=int, help="scale factor, Default: 4")
parser.add_argument("--testdir", default='all', type=str, help="") #"testdir/Urban100a"
parser.add_argument("--mode", default="evaluate", type=str, help="")
opt = parser.parse_args()
cuda = opt.cuda
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
def savelog(path,psnr,ssim):
log_path='./log/'
if not os.path.exists(log_path):
os.mkdir(log_path)
test_time=time.time()
test_time=str(int(test_time))
log=codecs.open(log_path+'test_log'+'.txt','a+','utf-8')
log.writelines("=======================================\n")
log.writelines(test_time+'\n')
log.writelines(path+'\n')
log.writelines('PSNR==>%f \n'%psnr)
log.writelines('SSIM==>%f \n'%ssim)
log.close()
def eval():
if opt.testdir == 'all':
# run all tests
testdirs=["testdir/Set5b","testdir/Set14b","testdir/bsd100a","testdir/Urban100a"]
for t in testdirs:
evaluate_by_path('../lapsrn/'+t)
else:
t=opt.testdir
evaluate_by_path(t)
def data_trans(im,num):
org_image = im
if num ==0:
ud_image = np.flipud(org_image)
tranform = ud_image
elif num ==1:
lr_image = np.fliplr(org_image)
tranform = lr_image
elif num ==2:
lr_image = np.fliplr(org_image)
lrud_image = np.flipud(lr_image)
tranform = lrud_image
elif num ==3:
rotated_image1 = np.rot90(org_image)
tranform = rotated_image1
elif num ==4:
rotated_image2 = np.rot90(org_image, -1)
tranform = rotated_image2
elif num ==5:
rotated_image1 = np.rot90(org_image)
ud_image1 = np.flipud(rotated_image1)
tranform = ud_image1
elif num ==6:
rotated_image2 = np.rot90(org_image, -1)
ud_image2 = np.flipud(rotated_image2)
tranform = ud_image2
else:
tranform = org_image
return tranform
def data_trans_inv(im,num):
org_image = im
if num ==0:
ud_image = np.flipud(org_image)
tranform = ud_image
elif num ==1:
lr_image = np.fliplr(org_image)
tranform = lr_image
elif num ==2:
lr_image = np.fliplr(org_image)
lrud_image = np.flipud(lr_image)
tranform = lrud_image
elif num ==3:
rotated_image1 = np.rot90(org_image,-1)
tranform = rotated_image1
elif num ==4:
rotated_image2 = np.rot90(org_image)
tranform = rotated_image2
elif num ==5:
rotated_image1 = np.rot90(org_image)
ud_image1 = np.flipud(rotated_image1)
tranform = ud_image1
elif num ==6:
rotated_image2 = np.rot90(org_image, -1)
ud_image2 = np.flipud(rotated_image2)
tranform = ud_image2
else:
tranform = org_image
return tranform
def evaluate_by_path(path):
pimages=os.listdir(path)
s_psnr=0
s_ssim=0
s_time=0
save=True
eva=True
convert=True
for pimg in pimages:
img = io.imread(path+'/'+pimg)
im_list = []
for i in range(8):
tmp = data_trans(img,i)
seim1=predict(tmp,save,convert,eva,pimg)
seim2=data_trans_inv(seim1,i)
print('i===',i,'shape==',seim2.shape)
im_list.append(seim2)
for i in range(len(im_list)):
if i == 0:
sum = im_list[0]
else:
sum += im_list[i]
avg = sum/len(im_list)
psnr,ssim = eva_se(avg,img,pimg)
s_psnr+=psnr
s_ssim+=ssim
avg_psnr=s_psnr/len(pimages)
avg_ssim=s_ssim/len(pimages)
avg_time=s_time/len(pimages)
print_summary(avg_psnr,avg_ssim,avg_time)
savelog(path,avg_psnr,avg_ssim)
def predict(img_read, save, convert, eva, name):
if convert:
if eva:
# h, w, _ = img_read.shape
im_gt_y = convert_rgb_to_y(img_read)
# gt_yuv = convert_rgb_to_ycbcr(img_read)
im_gt_y = im_gt_y.astype("float32")
sc = 1.0 / opt.scale
img_y = resize_image_by_pil(im_gt_y, sc)
img_y = img_y[:, :, 0]
# im_gt_y = im_gt_y[:, :, 0]
else:
sc = opt.scale
tmp = resize_image_by_pil(img_read, sc)
# gt_yuv = convert_rgb_to_ycbcr(tmp)
img_y = convert_rgb_to_y(img_read)
img_y = img_y.astype("float32")
else:
im_gt_y, img_y = img_read
# im_gt_y = im_gt_y.astype("float32")
im_input = img_y / 255.
im_input = Variable(torch.from_numpy(im_input).float()).view(1, -1, im_input.shape[0], im_input.shape[1])
img_y = np.uint8(img_y)
UseCPU = True
# model = Net()
model = Net(blocks=8, rate=opt.scale)
# model = torch.load(opt.model, map_location='cpu')['modelPth']
# torch.save(model.state_dict(), '1.pth')
weights = torch.load(opt.model, map_location=torch.device('cpu'))
saved_state = weights['modelPth'].state_dict()
if UseCPU:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in saved_state.items():
namekey = k[7:] # remove `module.`
new_state_dict[namekey] = v
# load params
model.load_state_dict(new_state_dict)
if cuda:
model = model.cuda()
im_input = im_input.cuda()
else:
model = model.cpu()
start_time = time.time()
# model=nn.DataParallel(model,device_ids=[0,1], output_device=1)
if opt.scale ==2:
HR_2x = model(im_input)
elif opt.scale ==4:
HR_4x = model(im_input)
else:
HR_8x = model(im_input)
# elapsed_time = time.time() - start_time
if opt.scale == 2:
HR_2x = HR_2x.cpu()
im_h_y = HR_2x.data[0].numpy().astype(np.float32)
elif opt.scale == 4:
HR_4x = HR_4x.cpu()
im_h_y = HR_4x.data[0].numpy().astype(np.float32)
else:
HR_8x = HR_8x.cpu()
im_h_y = HR_8x.data[0].numpy().astype(np.float32)
im_h_y = im_h_y * 255.
im_h_y[im_h_y < 0] = 0
im_h_y[im_h_y > 255.] = 255.
im_h_y = im_h_y[0, :, :]
return im_h_y
def eva_se(im_h_y,img_read,name):
convert = True
eva = True
save = True
if convert:
if eva:
# h, w, _ = img_read.shape
im_gt_y = convert_rgb_to_y(img_read)
if len(img_read.shape) == 2:
gt_yuv = np.zeros([img_read.shape[0], img_read.shape[1], 3])
gt_yuv[:, :, 0] = img_read
else:
gt_yuv = convert_rgb_to_ycbcr(img_read)
im_gt_y = im_gt_y.astype("float32")
sc = 1.0 / opt.scale
img_y = resize_image_by_pil(im_gt_y, sc)
img_y = img_y[:, :, 0]
if len(img_read.shape) == 2:
im_gt_y = im_gt_y
else:
im_gt_y = im_gt_y[:, :, 0]
else:
sc = opt.scale
tmp = resize_image_by_pil(img_read, sc)
gt_yuv = convert_rgb_to_ycbcr(tmp)
img_y = convert_rgb_to_y(img_read)
img_y = img_y.astype("float32")
else:
im_gt_y, img_y = img_read
im_gt_y = im_gt_y.astype("float32")
if save:
# recon= im_h_y
recon = convert_y_and_cbcr_to_rgb(im_h_y, gt_yuv[:, :, 1:3])
save_figure(recon, name)
if eva:
# PSNR and SSIM
psnr_predicted = PSNR(im_gt_y, im_h_y, shave_border=opt.scale)
ssim_predicted = pyssim.compute_ssim(im_gt_y, im_h_y)
print("test psnr/ssim=%f/%f" % (psnr_predicted, ssim_predicted))
return psnr_predicted, ssim_predicted
def print_summary(psnr,ssim,time):
print("Scale=",opt.scale)
print("PSNR=", psnr)
print("SSIM=",ssim)
print("time=",time)
def modcrop(im, scale):
sz = im.shape
h = int(sz[0] - (sz[0]%scale))
w = int(sz[1] - (sz[1]%scale))
ims = im[:h, :w, ...]
return ims
def save_figure(img,name):
out_path='./save_img/'
if not os.path.exists(out_path):
os.mkdir(out_path)
print('saved '+name)
# rgb -> bgr
tmp = np.zeros([img.shape[0],img.shape[1],img.shape[2]])
tmp[:,:,0] = img[:,:,2]
tmp[:,:,1] = img[:,:,1]
tmp[:,:,2] = img[:,:,0]
cv2.imwrite(out_path+name[:-4]+'.png',tmp)
def PSNR(pred, gt, shave_border=0):
height, width = pred.shape[:2]
pred = pred[shave_border:height - shave_border, shave_border:width - shave_border]
gt = gt[shave_border:height - shave_border, shave_border:width - shave_border]
imdff = pred - gt
rmse = math.sqrt(np.mean(imdff ** 2))
if rmse == 0:
return 100
return 20 * math.log10(255.0 / rmse)
def convert_rgb_to_y(image, jpeg_mode=False, max_value=255.0):
if len(image.shape) <= 2 or image.shape[2] == 1:
return image
if jpeg_mode:
xform = np.array([[0.299, 0.587, 0.114]])
y_image = image.dot(xform.T)
else:
xform = np.array([[65.738 / 256.0, 129.057 / 256.0, 25.064 / 256.0]])
y_image = image.dot(xform.T) + (16.0 * max_value / 256.0)
return y_image
def convert_rgb_to_ycbcr(image, jpeg_mode=False, max_value=255):
if len(image.shape) < 2 or image.shape[2] == 1:
return image
if jpeg_mode:
xform = np.array([[0.299, 0.587, 0.114], [-0.169, - 0.331, 0.500], [0.500, - 0.419, - 0.081]])
ycbcr_image = image.dot(xform.T)
ycbcr_image[:, :, [1, 2]] += max_value / 2
else:
xform = np.array(
[[65.738 / 256.0, 129.057 / 256.0, 25.064 / 256.0], [- 37.945 / 256.0, - 74.494 / 256.0, 112.439 / 256.0],
[112.439 / 256.0, - 94.154 / 256.0, - 18.285 / 256.0]])
ycbcr_image = image.dot(xform.T)
ycbcr_image[:, :, 0] += (16.0 * max_value / 256.0)
ycbcr_image[:, :, [1, 2]] += (128.0 * max_value / 256.0)
return ycbcr_image
def convert_y_and_cbcr_to_rgb(y_image, cbcr_image, jpeg_mode=False, max_value=255.0):
if len(y_image.shape) == 3 and y_image.shape[2] == 3:
y_image = y_image[:, :, 0:1]
ycbcr_image = np.zeros([y_image.shape[0], y_image.shape[1], 3])
ycbcr_image[:, :, 0] = y_image
ycbcr_image[:, :, 1:3] = cbcr_image[:, :, 0:2]
return convert_ycbcr_to_rgb(ycbcr_image)
def convert_ycbcr_to_rgb1(ycbcr_image, jpeg_mode=False, max_value=255.0):
rgb_image = np.zeros([ycbcr_image.shape[0], ycbcr_image.shape[1], 3]) # type: np.ndarray
if jpeg_mode:
rgb_image[:, :, [1, 2]] = ycbcr_image[:, :, [1, 2]] - (128.0 * max_value / 256.0)
xform = np.array([[1, 0, 1.402], [1, - 0.344, - 0.714], [1, 1.772, 0]])
rgb_image = rgb_image.dot(xform.T)
else:
rgb_image[:, :, 0] = ycbcr_image[:, :, 0] - (16.0 * max_value / 256.0)
rgb_image[:, :, [1, 2]] = ycbcr_image[:, :, [1, 2]] - (128.0 * max_value / 256.0)
xform = np.array(
[[max_value / 219.0, 0, max_value * 0.701 / 112.0],
[max_value / 219, - max_value * 0.886 * 0.114 / (112 * 0.587), - max_value * 0.701 * 0.299 / (112 * 0.587)],
[max_value / 219.0, max_value * 0.886 / 112.0, 0]])
rgb_image = rgb_image.dot(xform.T)
return rgb_image
def convert_ycbcr_to_rgb(ycbcr_image):
rgb_image = np.zeros([ycbcr_image.shape[0], ycbcr_image.shape[1], 3]) # type: np.ndarray
rgb_image[:, :, 0] = ycbcr_image[:, :, 0] - 16.0
rgb_image[:, :, [1, 2]] = ycbcr_image[:, :, [1, 2]] - 128.0
xform = np.array(
[[298.082 / 256.0, 0, 408.583 / 256.0],
[298.082 / 256.0, -100.291 / 256.0, -208.120 / 256.0],
[298.082 / 256.0, 516.412 / 256.0, 0]])
rgb_image = rgb_image.dot(xform.T)
return rgb_image
def resize_image_by_pil(image, scale, resampling_method="bicubic"):
width, height = image.shape[1], image.shape[0]
new_width = int(width * scale)
new_height = int(height * scale)
if resampling_method == "bicubic":
method = Image.BICUBIC
elif resampling_method == "bilinear":
method = Image.BILINEAR
elif resampling_method == "nearest":
method = Image.NEAREST
else:
method = Image.LANCZOS
if len(image.shape) == 3 and image.shape[2] == 3:
image = Image.fromarray(image, "RGB")
image = image.resize([new_width, new_height], resample=method)
image = np.asarray(image)
elif len(image.shape) == 3 and image.shape[2] == 4:
# RGBA
image = Image.fromarray(image, "RGB")
image = image.resize([new_width, new_height], resample=method)
image = np.asarray(image)
else:
image = Image.fromarray(image.reshape(height, width))
image = image.resize([new_width, new_height], resample=method)
image = np.asarray(image)
image = image.reshape(new_height, new_width, 1)
return image
def main():
if opt.mode=="evaluate":
eval()
if __name__ == '__main__':
main()
| [
"numpy.uint8",
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"numpy.rot90",
"math.log10",
"os.path.exists",
"numpy.mean",
"os.listdir",
"pyssim.compute_ssim",
"argparse.ArgumentParser",
"numpy.asarray",
"os.mkdir",
"collections.OrderedDict",
"numpy.flipud",
"model.sk_op... | [((457, 506), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""SKNet Test"""'}), "(description='SKNet Test')\n", (480, 506), False, 'import argparse\n'), ((1281, 1292), 'time.time', 'time.time', ([], {}), '()\n', (1290, 1292), False, 'import time, math\n'), ((1335, 1393), 'codecs.open', 'codecs.open', (["(log_path + 'test_log' + '.txt')", '"""a+"""', '"""utf-8"""'], {}), "(log_path + 'test_log' + '.txt', 'a+', 'utf-8')\n", (1346, 1393), False, 'import codecs\n'), ((3781, 3797), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (3791, 3797), False, 'import os\n'), ((5558, 5573), 'numpy.uint8', 'np.uint8', (['img_y'], {}), '(img_y)\n', (5566, 5573), True, 'import numpy as np\n'), ((5625, 5654), 'model.sk_optimized.Net', 'Net', ([], {'blocks': '(8)', 'rate': 'opt.scale'}), '(blocks=8, rate=opt.scale)\n', (5628, 5654), False, 'from model.sk_optimized import Net\n'), ((6329, 6340), 'time.time', 'time.time', ([], {}), '()\n', (6338, 6340), False, 'import time, math\n'), ((9036, 9088), 'numpy.zeros', 'np.zeros', (['[img.shape[0], img.shape[1], img.shape[2]]'], {}), '([img.shape[0], img.shape[1], img.shape[2]])\n', (9044, 9088), True, 'import numpy as np\n'), ((9175, 9222), 'cv2.imwrite', 'cv2.imwrite', (["(out_path + name[:-4] + '.png')", 'tmp'], {}), "(out_path + name[:-4] + '.png', tmp)\n", (9186, 9222), False, 'import cv2\n'), ((10990, 11039), 'numpy.zeros', 'np.zeros', (['[y_image.shape[0], y_image.shape[1], 3]'], {}), '([y_image.shape[0], y_image.shape[1], 3])\n', (10998, 11039), True, 'import numpy as np\n'), ((11264, 11321), 'numpy.zeros', 'np.zeros', (['[ycbcr_image.shape[0], ycbcr_image.shape[1], 3]'], {}), '([ycbcr_image.shape[0], ycbcr_image.shape[1], 3])\n', (11272, 11321), True, 'import numpy as np\n'), ((12151, 12208), 'numpy.zeros', 'np.zeros', (['[ycbcr_image.shape[0], ycbcr_image.shape[1], 3]'], {}), '([ycbcr_image.shape[0], ycbcr_image.shape[1], 3])\n', (12159, 12208), True, 'import numpy as np\n'), ((12359, 12505), 'numpy.array', 'np.array', (['[[298.082 / 256.0, 0, 408.583 / 256.0], [298.082 / 256.0, -100.291 / 256.0,\n -208.12 / 256.0], [298.082 / 256.0, 516.412 / 256.0, 0]]'], {}), '([[298.082 / 256.0, 0, 408.583 / 256.0], [298.082 / 256.0, -100.291 /\n 256.0, -208.12 / 256.0], [298.082 / 256.0, 516.412 / 256.0, 0]])\n', (12367, 12505), True, 'import numpy as np\n'), ((1061, 1086), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1084, 1086), False, 'import torch\n'), ((1214, 1238), 'os.path.exists', 'os.path.exists', (['log_path'], {}), '(log_path)\n', (1228, 1238), False, 'import os\n'), ((1248, 1266), 'os.mkdir', 'os.mkdir', (['log_path'], {}), '(log_path)\n', (1256, 1266), False, 'import os\n'), ((1986, 2006), 'numpy.flipud', 'np.flipud', (['org_image'], {}), '(org_image)\n', (1995, 2006), True, 'import numpy as np\n'), ((2907, 2927), 'numpy.flipud', 'np.flipud', (['org_image'], {}), '(org_image)\n', (2916, 2927), True, 'import numpy as np\n'), ((3920, 3948), 'skimage.io.imread', 'io.imread', (["(path + '/' + pimg)"], {}), "(path + '/' + pimg)\n", (3929, 3948), False, 'from skimage import measure, io\n'), ((5974, 5987), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5985, 5987), False, 'from collections import OrderedDict\n'), ((8414, 8450), 'pyssim.compute_ssim', 'pyssim.compute_ssim', (['im_gt_y', 'im_h_y'], {}), '(im_gt_y, im_h_y)\n', (8433, 8450), False, 'import pyssim\n'), ((8931, 8955), 'os.path.exists', 'os.path.exists', (['out_path'], {}), '(out_path)\n', (8945, 8955), False, 'import os\n'), ((8965, 8983), 'os.mkdir', 'os.mkdir', (['out_path'], {}), '(out_path)\n', (8973, 8983), False, 'import os\n'), ((9503, 9522), 'numpy.mean', 'np.mean', (['(imdff ** 2)'], {}), '(imdff ** 2)\n', (9510, 9522), True, 'import numpy as np\n'), ((9577, 9601), 'math.log10', 'math.log10', (['(255.0 / rmse)'], {}), '(255.0 / rmse)\n', (9587, 9601), False, 'import time, math\n'), ((9775, 9808), 'numpy.array', 'np.array', (['[[0.299, 0.587, 0.114]]'], {}), '([[0.299, 0.587, 0.114]])\n', (9783, 9808), True, 'import numpy as np\n'), ((9872, 9933), 'numpy.array', 'np.array', (['[[65.738 / 256.0, 129.057 / 256.0, 25.064 / 256.0]]'], {}), '([[65.738 / 256.0, 129.057 / 256.0, 25.064 / 256.0]])\n', (9880, 9933), True, 'import numpy as np\n'), ((10195, 10274), 'numpy.array', 'np.array', (['[[0.299, 0.587, 0.114], [-0.169, -0.331, 0.5], [0.5, -0.419, -0.081]]'], {}), '([[0.299, 0.587, 0.114], [-0.169, -0.331, 0.5], [0.5, -0.419, -0.081]])\n', (10203, 10274), True, 'import numpy as np\n'), ((10400, 10577), 'numpy.array', 'np.array', (['[[65.738 / 256.0, 129.057 / 256.0, 25.064 / 256.0], [-37.945 / 256.0, -\n 74.494 / 256.0, 112.439 / 256.0], [112.439 / 256.0, -94.154 / 256.0, -\n 18.285 / 256.0]]'], {}), '([[65.738 / 256.0, 129.057 / 256.0, 25.064 / 256.0], [-37.945 / \n 256.0, -74.494 / 256.0, 112.439 / 256.0], [112.439 / 256.0, -94.154 / \n 256.0, -18.285 / 256.0]])\n', (10408, 10577), True, 'import numpy as np\n'), ((11467, 11528), 'numpy.array', 'np.array', (['[[1, 0, 1.402], [1, -0.344, -0.714], [1, 1.772, 0]]'], {}), '([[1, 0, 1.402], [1, -0.344, -0.714], [1, 1.772, 0]])\n', (11475, 11528), True, 'import numpy as np\n'), ((11769, 11998), 'numpy.array', 'np.array', (['[[max_value / 219.0, 0, max_value * 0.701 / 112.0], [max_value / 219, -\n max_value * 0.886 * 0.114 / (112 * 0.587), -max_value * 0.701 * 0.299 /\n (112 * 0.587)], [max_value / 219.0, max_value * 0.886 / 112.0, 0]]'], {}), '([[max_value / 219.0, 0, max_value * 0.701 / 112.0], [max_value / \n 219, -max_value * 0.886 * 0.114 / (112 * 0.587), -max_value * 0.701 * \n 0.299 / (112 * 0.587)], [max_value / 219.0, max_value * 0.886 / 112.0, 0]])\n', (11777, 11998), True, 'import numpy as np\n'), ((13112, 13141), 'PIL.Image.fromarray', 'Image.fromarray', (['image', '"""RGB"""'], {}), "(image, 'RGB')\n", (13127, 13141), False, 'from PIL import Image\n'), ((13229, 13246), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (13239, 13246), True, 'import numpy as np\n'), ((2078, 2098), 'numpy.fliplr', 'np.fliplr', (['org_image'], {}), '(org_image)\n', (2087, 2098), True, 'import numpy as np\n'), ((2999, 3019), 'numpy.fliplr', 'np.fliplr', (['org_image'], {}), '(org_image)\n', (3008, 3019), True, 'import numpy as np\n'), ((5818, 5837), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (5830, 5837), False, 'import torch\n'), ((13334, 13363), 'PIL.Image.fromarray', 'Image.fromarray', (['image', '"""RGB"""'], {}), "(image, 'RGB')\n", (13349, 13363), False, 'from PIL import Image\n'), ((13451, 13468), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (13461, 13468), True, 'import numpy as np\n'), ((13628, 13645), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (13638, 13645), True, 'import numpy as np\n'), ((2164, 2184), 'numpy.fliplr', 'np.fliplr', (['org_image'], {}), '(org_image)\n', (2173, 2184), True, 'import numpy as np\n'), ((2206, 2225), 'numpy.flipud', 'np.flipud', (['lr_image'], {}), '(lr_image)\n', (2215, 2225), True, 'import numpy as np\n'), ((3085, 3105), 'numpy.fliplr', 'np.fliplr', (['org_image'], {}), '(org_image)\n', (3094, 3105), True, 'import numpy as np\n'), ((3127, 3146), 'numpy.flipud', 'np.flipud', (['lr_image'], {}), '(lr_image)\n', (3136, 3146), True, 'import numpy as np\n'), ((7340, 7391), 'numpy.zeros', 'np.zeros', (['[img_read.shape[0], img_read.shape[1], 3]'], {}), '([img_read.shape[0], img_read.shape[1], 3])\n', (7348, 7391), True, 'import numpy as np\n'), ((2307, 2326), 'numpy.rot90', 'np.rot90', (['org_image'], {}), '(org_image)\n', (2315, 2326), True, 'import numpy as np\n'), ((3228, 3251), 'numpy.rot90', 'np.rot90', (['org_image', '(-1)'], {}), '(org_image, -1)\n', (3236, 3251), True, 'import numpy as np\n'), ((2413, 2436), 'numpy.rot90', 'np.rot90', (['org_image', '(-1)'], {}), '(org_image, -1)\n', (2421, 2436), True, 'import numpy as np\n'), ((3337, 3356), 'numpy.rot90', 'np.rot90', (['org_image'], {}), '(org_image)\n', (3345, 3356), True, 'import numpy as np\n'), ((5460, 5486), 'torch.from_numpy', 'torch.from_numpy', (['im_input'], {}), '(im_input)\n', (5476, 5486), False, 'import torch\n'), ((2515, 2534), 'numpy.rot90', 'np.rot90', (['org_image'], {}), '(org_image)\n', (2523, 2534), True, 'import numpy as np\n'), ((2556, 2581), 'numpy.flipud', 'np.flipud', (['rotated_image1'], {}), '(rotated_image1)\n', (2565, 2581), True, 'import numpy as np\n'), ((3435, 3454), 'numpy.rot90', 'np.rot90', (['org_image'], {}), '(org_image)\n', (3443, 3454), True, 'import numpy as np\n'), ((3476, 3501), 'numpy.flipud', 'np.flipud', (['rotated_image1'], {}), '(rotated_image1)\n', (3485, 3501), True, 'import numpy as np\n'), ((2662, 2685), 'numpy.rot90', 'np.rot90', (['org_image', '(-1)'], {}), '(org_image, -1)\n', (2670, 2685), True, 'import numpy as np\n'), ((2706, 2731), 'numpy.flipud', 'np.flipud', (['rotated_image2'], {}), '(rotated_image2)\n', (2715, 2731), True, 'import numpy as np\n'), ((3582, 3605), 'numpy.rot90', 'np.rot90', (['org_image', '(-1)'], {}), '(org_image, -1)\n', (3590, 3605), True, 'import numpy as np\n'), ((3626, 3651), 'numpy.flipud', 'np.flipud', (['rotated_image2'], {}), '(rotated_image2)\n', (3635, 3651), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Inverse Cloze Task dataset."""
import functools
from language.orqa.utils import bert_utils
import numpy as np
import tensorflow.compat.v1 as tf
def get_retrieval_examples(serialized_example, mask_rate, bert_hub_module_path,
query_seq_len, block_seq_len):
"""Make retrieval examples."""
feature_spec = dict(
title_ids=tf.FixedLenSequenceFeature([], tf.int64, True),
token_ids=tf.FixedLenSequenceFeature([], tf.int64, True),
sentence_starts=tf.FixedLenSequenceFeature([], tf.int64, True))
features = tf.parse_single_example(serialized_example, feature_spec)
features = {k: tf.cast(v, tf.int32) for k, v in features.items()}
title_ids = features["title_ids"]
token_ids = features["token_ids"]
sentence_starts = features["sentence_starts"]
sentence_ends = tf.concat([sentence_starts[1:], [tf.size(token_ids)]], 0)
tokenizer = bert_utils.get_tokenizer(bert_hub_module_path)
cls_id, sep_id = tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]"])
# Randomly choose a sentence and pretend that it is a query.
query_index = tf.random.uniform(
shape=[],
minval=0,
maxval=tf.size(sentence_starts),
dtype=tf.int32)
query_start = sentence_starts[query_index]
query_end = sentence_ends[query_index]
query_ids = token_ids[query_start:query_end]
mask_query = tf.less(tf.random.uniform([]), mask_rate)
def _apply_mask():
return tf.concat([token_ids[:query_start], token_ids[query_end:]], 0)
block_ids = tf.cond(
pred=mask_query,
true_fn=_apply_mask,
false_fn=lambda: token_ids)
query_ids, query_mask = bert_utils.pad_or_truncate(
token_ids=query_ids,
sequence_length=query_seq_len,
cls_id=cls_id,
sep_id=sep_id)
block_ids, block_mask, block_segment_ids = bert_utils.pad_or_truncate_pair(
token_ids_a=title_ids,
token_ids_b=block_ids,
sequence_length=block_seq_len,
cls_id=cls_id,
sep_id=sep_id)
# Masked examples for single-sentence blocks don't make any sense.
keep_example = tf.logical_or(
tf.logical_not(mask_query),
tf.greater(tf.size(sentence_starts), 1))
return dict(
keep_example=keep_example,
mask_query=mask_query,
query_ids=query_ids,
query_mask=query_mask,
block_ids=block_ids,
block_mask=block_mask,
block_segment_ids=block_segment_ids)
def perturbed_chunks(max_val, num_chunks):
"""Perturbed chunks."""
indices, chunk_size = np.linspace(
start=0,
stop=max_val,
num=num_chunks,
endpoint=False,
retstep=True,
dtype=np.int64)
perturbation = np.random.randint(chunk_size, size=indices.shape)
return indices + perturbation
def get_dataset(examples_path, mask_rate, bert_hub_module_path, query_seq_len,
block_seq_len, num_block_records, num_input_threads):
"""An input function satisfying the tf.estimator API."""
# The input file is not sharded. We can still get the randomization and
# efficiency benefits of sharded inputs by doing multiple reads concurrently
# but starting at different points.
skips = perturbed_chunks(num_block_records, num_input_threads)
tf.logging.info("Concurrent reads of %d records: %s",
num_block_records, skips)
dataset = tf.data.Dataset.from_tensor_slices(tf.constant(skips, tf.int64))
def _skipped_dataset(skip):
"""Get skipped dataset."""
dataset = tf.data.TFRecordDataset(
examples_path, buffer_size=16 * 1024 * 1024)
dataset = dataset.repeat()
dataset = dataset.skip(skip)
dataset = dataset.map(
functools.partial(
get_retrieval_examples,
mask_rate=mask_rate,
bert_hub_module_path=bert_hub_module_path,
query_seq_len=query_seq_len,
block_seq_len=block_seq_len))
dataset = dataset.filter(lambda d: d.pop("keep_example"))
return dataset
dataset = dataset.apply(tf.data.experimental.parallel_interleave(
_skipped_dataset,
sloppy=True,
cycle_length=num_input_threads))
return dataset
| [
"tensorflow.compat.v1.FixedLenSequenceFeature",
"language.orqa.utils.bert_utils.pad_or_truncate",
"language.orqa.utils.bert_utils.get_tokenizer",
"tensorflow.compat.v1.cast",
"tensorflow.compat.v1.parse_single_example",
"tensorflow.compat.v1.cond",
"tensorflow.compat.v1.random.uniform",
"numpy.linspac... | [((1188, 1245), 'tensorflow.compat.v1.parse_single_example', 'tf.parse_single_example', (['serialized_example', 'feature_spec'], {}), '(serialized_example, feature_spec)\n', (1211, 1245), True, 'import tensorflow.compat.v1 as tf\n'), ((1526, 1572), 'language.orqa.utils.bert_utils.get_tokenizer', 'bert_utils.get_tokenizer', (['bert_hub_module_path'], {}), '(bert_hub_module_path)\n', (1550, 1572), False, 'from language.orqa.utils import bert_utils\n'), ((2139, 2213), 'tensorflow.compat.v1.cond', 'tf.cond', ([], {'pred': 'mask_query', 'true_fn': '_apply_mask', 'false_fn': '(lambda : token_ids)'}), '(pred=mask_query, true_fn=_apply_mask, false_fn=lambda : token_ids)\n', (2146, 2213), True, 'import tensorflow.compat.v1 as tf\n'), ((2259, 2372), 'language.orqa.utils.bert_utils.pad_or_truncate', 'bert_utils.pad_or_truncate', ([], {'token_ids': 'query_ids', 'sequence_length': 'query_seq_len', 'cls_id': 'cls_id', 'sep_id': 'sep_id'}), '(token_ids=query_ids, sequence_length=\n query_seq_len, cls_id=cls_id, sep_id=sep_id)\n', (2285, 2372), False, 'from language.orqa.utils import bert_utils\n'), ((2438, 2581), 'language.orqa.utils.bert_utils.pad_or_truncate_pair', 'bert_utils.pad_or_truncate_pair', ([], {'token_ids_a': 'title_ids', 'token_ids_b': 'block_ids', 'sequence_length': 'block_seq_len', 'cls_id': 'cls_id', 'sep_id': 'sep_id'}), '(token_ids_a=title_ids, token_ids_b=\n block_ids, sequence_length=block_seq_len, cls_id=cls_id, sep_id=sep_id)\n', (2469, 2581), False, 'from language.orqa.utils import bert_utils\n'), ((3119, 3220), 'numpy.linspace', 'np.linspace', ([], {'start': '(0)', 'stop': 'max_val', 'num': 'num_chunks', 'endpoint': '(False)', 'retstep': '(True)', 'dtype': 'np.int64'}), '(start=0, stop=max_val, num=num_chunks, endpoint=False, retstep=\n True, dtype=np.int64)\n', (3130, 3220), True, 'import numpy as np\n'), ((3270, 3319), 'numpy.random.randint', 'np.random.randint', (['chunk_size'], {'size': 'indices.shape'}), '(chunk_size, size=indices.shape)\n', (3287, 3319), True, 'import numpy as np\n'), ((3820, 3899), 'tensorflow.compat.v1.logging.info', 'tf.logging.info', (['"""Concurrent reads of %d records: %s"""', 'num_block_records', 'skips'], {}), "('Concurrent reads of %d records: %s', num_block_records, skips)\n", (3835, 3899), True, 'import tensorflow.compat.v1 as tf\n'), ((1263, 1283), 'tensorflow.compat.v1.cast', 'tf.cast', (['v', 'tf.int32'], {}), '(v, tf.int32)\n', (1270, 1283), True, 'import tensorflow.compat.v1 as tf\n'), ((1994, 2015), 'tensorflow.compat.v1.random.uniform', 'tf.random.uniform', (['[]'], {}), '([])\n', (2011, 2015), True, 'import tensorflow.compat.v1 as tf\n'), ((2061, 2123), 'tensorflow.compat.v1.concat', 'tf.concat', (['[token_ids[:query_start], token_ids[query_end:]]', '(0)'], {}), '([token_ids[:query_start], token_ids[query_end:]], 0)\n', (2070, 2123), True, 'import tensorflow.compat.v1 as tf\n'), ((2716, 2742), 'tensorflow.compat.v1.logical_not', 'tf.logical_not', (['mask_query'], {}), '(mask_query)\n', (2730, 2742), True, 'import tensorflow.compat.v1 as tf\n'), ((3965, 3993), 'tensorflow.compat.v1.constant', 'tf.constant', (['skips', 'tf.int64'], {}), '(skips, tf.int64)\n', (3976, 3993), True, 'import tensorflow.compat.v1 as tf\n'), ((4071, 4139), 'tensorflow.compat.v1.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['examples_path'], {'buffer_size': '(16 * 1024 * 1024)'}), '(examples_path, buffer_size=16 * 1024 * 1024)\n', (4094, 4139), True, 'import tensorflow.compat.v1 as tf\n'), ((4582, 4689), 'tensorflow.compat.v1.data.experimental.parallel_interleave', 'tf.data.experimental.parallel_interleave', (['_skipped_dataset'], {'sloppy': '(True)', 'cycle_length': 'num_input_threads'}), '(_skipped_dataset, sloppy=True,\n cycle_length=num_input_threads)\n', (4622, 4689), True, 'import tensorflow.compat.v1 as tf\n'), ((993, 1039), 'tensorflow.compat.v1.FixedLenSequenceFeature', 'tf.FixedLenSequenceFeature', (['[]', 'tf.int64', '(True)'], {}), '([], tf.int64, True)\n', (1019, 1039), True, 'import tensorflow.compat.v1 as tf\n'), ((1057, 1103), 'tensorflow.compat.v1.FixedLenSequenceFeature', 'tf.FixedLenSequenceFeature', (['[]', 'tf.int64', '(True)'], {}), '([], tf.int64, True)\n', (1083, 1103), True, 'import tensorflow.compat.v1 as tf\n'), ((1127, 1173), 'tensorflow.compat.v1.FixedLenSequenceFeature', 'tf.FixedLenSequenceFeature', (['[]', 'tf.int64', '(True)'], {}), '([], tf.int64, True)\n', (1153, 1173), True, 'import tensorflow.compat.v1 as tf\n'), ((1788, 1812), 'tensorflow.compat.v1.size', 'tf.size', (['sentence_starts'], {}), '(sentence_starts)\n', (1795, 1812), True, 'import tensorflow.compat.v1 as tf\n'), ((2761, 2785), 'tensorflow.compat.v1.size', 'tf.size', (['sentence_starts'], {}), '(sentence_starts)\n', (2768, 2785), True, 'import tensorflow.compat.v1 as tf\n'), ((4248, 4419), 'functools.partial', 'functools.partial', (['get_retrieval_examples'], {'mask_rate': 'mask_rate', 'bert_hub_module_path': 'bert_hub_module_path', 'query_seq_len': 'query_seq_len', 'block_seq_len': 'block_seq_len'}), '(get_retrieval_examples, mask_rate=mask_rate,\n bert_hub_module_path=bert_hub_module_path, query_seq_len=query_seq_len,\n block_seq_len=block_seq_len)\n', (4265, 4419), False, 'import functools\n'), ((1486, 1504), 'tensorflow.compat.v1.size', 'tf.size', (['token_ids'], {}), '(token_ids)\n', (1493, 1504), True, 'import tensorflow.compat.v1 as tf\n')] |
import numpy as np
from numpy.random import default_rng
def random_crop(data, size, padding, rng=default_rng()):
x = rng.integers(2 * padding, size=data.shape[:-3] + (1, 1, 1))
y = rng.integers(2 * padding, size=data.shape[:-3] + (1, 1, 1))
arange = np.arange(size)
rows = x + arange.reshape((size, 1, 1))
cols = y + arange.reshape((1, size, 1))
padding = (padding, padding)
data = np.pad(data, ((0, 0),) * (data.ndim - 3) + (padding, padding, (0, 0)))
data = np.take_along_axis(data, rows, axis=-3)
data = np.take_along_axis(data, cols, axis=-2)
return data
def random_horizontal_flip(data, rng=default_rng()):
to_flip = (rng.random(size=data.shape[:-3]) < 0.5)
data[to_flip] = np.flip(data[to_flip], axis=-2)
return data
def normalize(data, mean, std):
data -= mean
data /= std
return data
def _blend(img1, img2, ratio):
ratio = ratio.reshape((-1,) + (1,) * (img1.ndim - 1))
img1 *= ratio
img1 += (1. - ratio) * img2
img1 = np.clip(img1, 0., 1., out=img1)
return img1
def rgb_to_grayscale(data):
col = np.array([0.2989, 0.587, 0.114], dtype=data.dtype)
return np.expand_dims(data.dot(col), axis=-1)
def adjust_brightness(data, factor):
return _blend(data, np.zeros_like(data), factor)
def adjust_saturation(data, factor):
gray = rgb_to_grayscale(data)
return _blend(data, gray, factor)
def adjust_contrast(data, factor):
mean_gray = np.mean(rgb_to_grayscale(data), axis=(-3, -2, -1), keepdims=True)
return _blend(data, mean_gray, factor)
def color_jitter(data, brightness, contrast, saturation, rng=default_rng()):
order = np.argsort(rng.random(size=(3,) + data.shape[:-3]), axis=0)
brightness = rng.uniform(1. - brightness, 1. + brightness, size=data.shape[:-3])
contrast = rng.uniform(1. - contrast, 1. + contrast, size=data.shape[:-3])
saturation = rng.uniform(1. - saturation, 1. + saturation, size=data.shape[:-3])
for transform in order:
data[transform == 0] = adjust_brightness(data[transform == 0], brightness[transform == 0])
data[transform == 1] = adjust_contrast(data[transform == 1], contrast[transform == 1])
data[transform == 2] = adjust_saturation(data[transform == 2], saturation[transform == 2])
return data
| [
"numpy.clip",
"numpy.flip",
"numpy.random.default_rng",
"numpy.array",
"numpy.pad",
"numpy.zeros_like",
"numpy.arange",
"numpy.take_along_axis"
] | [((100, 113), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (111, 113), False, 'from numpy.random import default_rng\n'), ((265, 280), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (274, 280), True, 'import numpy as np\n'), ((414, 484), 'numpy.pad', 'np.pad', (['data', '(((0, 0),) * (data.ndim - 3) + (padding, padding, (0, 0)))'], {}), '(data, ((0, 0),) * (data.ndim - 3) + (padding, padding, (0, 0)))\n', (420, 484), True, 'import numpy as np\n'), ((496, 535), 'numpy.take_along_axis', 'np.take_along_axis', (['data', 'rows'], {'axis': '(-3)'}), '(data, rows, axis=-3)\n', (514, 535), True, 'import numpy as np\n'), ((547, 586), 'numpy.take_along_axis', 'np.take_along_axis', (['data', 'cols'], {'axis': '(-2)'}), '(data, cols, axis=-2)\n', (565, 586), True, 'import numpy as np\n'), ((642, 655), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (653, 655), False, 'from numpy.random import default_rng\n'), ((733, 764), 'numpy.flip', 'np.flip', (['data[to_flip]'], {'axis': '(-2)'}), '(data[to_flip], axis=-2)\n', (740, 764), True, 'import numpy as np\n'), ((1016, 1049), 'numpy.clip', 'np.clip', (['img1', '(0.0)', '(1.0)'], {'out': 'img1'}), '(img1, 0.0, 1.0, out=img1)\n', (1023, 1049), True, 'import numpy as np\n'), ((1104, 1154), 'numpy.array', 'np.array', (['[0.2989, 0.587, 0.114]'], {'dtype': 'data.dtype'}), '([0.2989, 0.587, 0.114], dtype=data.dtype)\n', (1112, 1154), True, 'import numpy as np\n'), ((1633, 1646), 'numpy.random.default_rng', 'default_rng', ([], {}), '()\n', (1644, 1646), False, 'from numpy.random import default_rng\n'), ((1268, 1287), 'numpy.zeros_like', 'np.zeros_like', (['data'], {}), '(data)\n', (1281, 1287), True, 'import numpy as np\n')] |
# Copyright (c) 2015, <NAME> (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from GPy.inference.latent_function_inference.var_dtc import VarDTC
from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides,\
dpotrs, dpotri, symmetrify, mdot
from GPy.core.parameterization.variational import VariationalPosterior
from GPy.util import diag
from GPy.inference.latent_function_inference.posterior import Posterior
log_2_pi = np.log(2*np.pi)
import logging, itertools
logger = logging.getLogger('vardtc')
class VarDTCFixedCov(VarDTC):
"""
An object for inference when the likelihood is Gaussian, but we want to do sparse inference.
The function self.inference returns a Posterior object, which summarizes
the posterior.
For efficiency, we sometimes work with the cholesky of Y*Y.T. To save repeatedly recomputing this, we cache it.
save_per_dim:
save the log likelihood per output dimension, this is for testing the differential gene expression analysis using BGPLVM and MRD
"""
const_jitter = 1e-6
def __init__(self, limit=1, save_per_dim=False):
#self._YYTfactor_cache = caching.cache()
from paramz.caching import Cacher
self.limit = limit
self.get_trYYT = Cacher(self._get_trYYT, limit)
self.get_YYTfactor = Cacher(self._get_YYTfactor, limit)
self.save_per_dim = save_per_dim
def set_limit(self, limit):
self.get_trYYT.limit = limit
self.get_YYTfactor.limit = limit
def _get_trYYT(self, Y):
return np.einsum("ij,ij->", Y, Y)
# faster than, but same as:
# return np.sum(np.square(Y))
def __getstate__(self):
# has to be overridden, as Cacher objects cannot be pickled.
return self.limit
def __setstate__(self, state):
# has to be overridden, as Cacher objects cannot be pickled.
self.limit = state
from paramz.caching import Cacher
self.get_trYYT = Cacher(self._get_trYYT, self.limit)
self.get_YYTfactor = Cacher(self._get_YYTfactor, self.limit)
def _get_YYTfactor(self, Y):
"""
find a matrix L which satisfies LLT = YYT.
Note that L may have fewer columns than Y.
"""
N, D = Y.shape
if (N>=D):
return Y.view(np.ndarray)
else:
return jitchol(tdot(Y))
def compute_lik_per_dim(self, psi0, A, LB, _LBi_Lmi_psi1, beta, Y):
lik_1 = (-0.5 * Y.shape[0] * (np.log(2. * np.pi) - np.log(beta)) - 0.5 * beta * np.einsum('ij,ij->j',Y,Y))
lik_2 = -0.5 * (np.sum(beta * psi0) - np.trace(A)) * np.ones(Y.shape[1])
lik_3 = -(np.sum(np.log(np.diag(LB))))
lik_4 = .5* beta**2 * ((_LBi_Lmi_psi1.dot(Y).T)**2).sum(1)
return lik_1 + lik_2 + lik_3 + lik_4
def get_VVTfactor(self, Y, prec):
return Y * prec # TODO chache this, and make it effective
def inference(self, kern, X, Z, likelihood, Y, Y_metadata=None, Lm=None, dL_dKmm=None, fixed_covs_kerns=None, **kw):
_, output_dim = Y.shape
uncertain_inputs = isinstance(X, VariationalPosterior)
#see whether we've got a different noise variance for each datum
beta = 1./np.fmax(likelihood.gaussian_variance(Y_metadata), 1e-6)
# VVT_factor is a matrix such that tdot(VVT_factor) = VVT...this is for efficiency!
#self.YYTfactor = self.get_YYTfactor(Y)
#VVT_factor = self.get_VVTfactor(self.YYTfactor, beta)
het_noise = beta.size > 1
if het_noise:
raise(NotImplementedError("Heteroscedastic noise not implemented, should be possible though, feel free to try implementing it :)"))
if beta.ndim == 1:
beta = beta[:, None]
# do the inference:
num_inducing = Z.shape[0]
num_data = Y.shape[0]
# kernel computations, using BGPLVM notation
Kmm = kern.K(Z).copy()
diag.add(Kmm, self.const_jitter)
if Lm is None:
Lm = jitchol(Kmm)
# The rather complex computations of A, and the psi stats
if uncertain_inputs:
psi0 = kern.psi0(Z, X)
psi1 = kern.psi1(Z, X)
if het_noise:
psi2_beta = np.sum([kern.psi2(Z,X[i:i+1,:]) * beta_i for i,beta_i in enumerate(beta)],0)
else:
psi2_beta = kern.psi2(Z,X) * beta
LmInv = dtrtri(Lm)
A = LmInv.dot(psi2_beta.dot(LmInv.T))
else:
psi0 = kern.Kdiag(X)
psi1 = kern.K(X, Z)
if het_noise:
tmp = psi1 * (np.sqrt(beta))
else:
tmp = psi1 * (np.sqrt(beta))
tmp, _ = dtrtrs(Lm, tmp.T, lower=1)
A = tdot(tmp)
# factor B
B = np.eye(num_inducing) + A
LB = jitchol(B)
# back substutue C into psi1Vf
#tmp, _ = dtrtrs(Lm, psi1.T.dot(VVT_factor), lower=1, trans=0)
#_LBi_Lmi_psi1Vf, _ = dtrtrs(LB, tmp, lower=1, trans=0)
#tmp, _ = dtrtrs(LB, _LBi_Lmi_psi1Vf, lower=1, trans=1)
#Cpsi1Vf, _ = dtrtrs(Lm, tmp, lower=1, trans=1)
# data fit and derivative of L w.r.t. Kmm
#delit = tdot(_LBi_Lmi_psi1Vf)
# Expose YYT to get additional covariates in (YYT + Kgg):
tmp, _ = dtrtrs(Lm, psi1.T, lower=1, trans=0)
_LBi_Lmi_psi1, _ = dtrtrs(LB, tmp, lower=1, trans=0)
tmp, _ = dtrtrs(LB, _LBi_Lmi_psi1, lower=1, trans=1)
Cpsi1, _ = dtrtrs(Lm, tmp, lower=1, trans=1)
# TODO: cache this:
# Compute fixed covariates covariance:
if fixed_covs_kerns is not None:
K_fixed = 0
for name, [cov, k] in fixed_covs_kerns.iteritems():
K_fixed += k.K(cov)
#trYYT = self.get_trYYT(Y)
YYT_covs = (tdot(Y) + K_fixed)
data_term = beta**2 * YYT_covs
trYYT_covs = np.trace(YYT_covs)
else:
data_term = beta**2 * tdot(Y)
trYYT_covs = self.get_trYYT(Y)
#trYYT = self.get_trYYT(Y)
delit = mdot(_LBi_Lmi_psi1, data_term, _LBi_Lmi_psi1.T)
data_fit = np.trace(delit)
DBi_plus_BiPBi = backsub_both_sides(LB, output_dim * np.eye(num_inducing) + delit)
if dL_dKmm is None:
delit = -0.5 * DBi_plus_BiPBi
delit += -0.5 * B * output_dim
delit += output_dim * np.eye(num_inducing)
# Compute dL_dKmm
dL_dKmm = backsub_both_sides(Lm, delit)
# derivatives of L w.r.t. psi
dL_dpsi0, dL_dpsi1, dL_dpsi2 = _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm,
data_term, Cpsi1, DBi_plus_BiPBi,
psi1, het_noise, uncertain_inputs)
# log marginal likelihood
log_marginal = _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise,
psi0, A, LB, trYYT_covs, data_fit, Y)
if self.save_per_dim:
self.saved_vals = [psi0, A, LB, _LBi_Lmi_psi1, beta]
# No heteroscedastics, so no _LBi_Lmi_psi1Vf:
# For the interested reader, try implementing the heteroscedastic version, it should be possible
_LBi_Lmi_psi1Vf = None # Is just here for documentation, so you can see, what it was.
#noise derivatives
dL_dR = _compute_dL_dR(likelihood,
het_noise, uncertain_inputs, LB,
_LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A,
psi0, psi1, beta,
data_fit, num_data, output_dim, trYYT_covs, Y, None)
dL_dthetaL = likelihood.exact_inference_gradients(dL_dR,Y_metadata)
#put the gradients in the right places
if uncertain_inputs:
grad_dict = {'dL_dKmm': dL_dKmm,
'dL_dpsi0':dL_dpsi0,
'dL_dpsi1':dL_dpsi1,
'dL_dpsi2':dL_dpsi2,
'dL_dthetaL':dL_dthetaL}
else:
grad_dict = {'dL_dKmm': dL_dKmm,
'dL_dKdiag':dL_dpsi0,
'dL_dKnm':dL_dpsi1,
'dL_dthetaL':dL_dthetaL}
if fixed_covs_kerns is not None:
# For now, we do not take the gradients, we can compute them,
# but the maximum likelihood solution is to switch off the additional covariates....
dL_dcovs = beta * np.eye(K_fixed.shape[0]) - beta**2*tdot(_LBi_Lmi_psi1.T)
grad_dict['dL_dcovs'] = -.5 * dL_dcovs
#get sufficient things for posterior prediction
#TODO: do we really want to do this in the loop?
if 1:
woodbury_vector = (beta*Cpsi1).dot(Y)
else:
import ipdb; ipdb.set_trace()
psi1V = np.dot(Y.T*beta, psi1).T
tmp, _ = dtrtrs(Lm, psi1V, lower=1, trans=0)
tmp, _ = dpotrs(LB, tmp, lower=1)
woodbury_vector, _ = dtrtrs(Lm, tmp, lower=1, trans=1)
Bi, _ = dpotri(LB, lower=1)
symmetrify(Bi)
Bi = -dpotri(LB, lower=1)[0]
diag.add(Bi, 1)
woodbury_inv = backsub_both_sides(Lm, Bi)
#construct a posterior object
post = Posterior(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector, K=Kmm, mean=None, cov=None, K_chol=Lm)
return post, log_marginal, grad_dict
def _compute_dL_dpsi(num_inducing, num_data, output_dim, beta, Lm, data_term, Cpsi1, DBi_plus_BiPBi, psi1, het_noise, uncertain_inputs):
dL_dpsi0 = -0.5 * output_dim * (beta* np.ones([num_data, 1])).flatten()
dL_dpsi1 = np.dot(data_term, Cpsi1.T)
dL_dpsi2_beta = 0.5 * backsub_both_sides(Lm, output_dim * np.eye(num_inducing) - DBi_plus_BiPBi)
if het_noise:
if uncertain_inputs:
dL_dpsi2 = beta[:, None] * dL_dpsi2_beta[None, :, :]
else:
dL_dpsi1 += 2.*np.dot(dL_dpsi2_beta, (psi1 * beta).T).T
dL_dpsi2 = None
else:
dL_dpsi2 = beta * dL_dpsi2_beta
if not uncertain_inputs:
# subsume back into psi1 (==Kmn)
dL_dpsi1 += 2.*np.dot(psi1, dL_dpsi2)
dL_dpsi2 = None
return dL_dpsi0, dL_dpsi1, dL_dpsi2
def _compute_dL_dR(likelihood, het_noise, uncertain_inputs, LB, _LBi_Lmi_psi1Vf, DBi_plus_BiPBi, Lm, A, psi0, psi1, beta, data_fit, num_data, output_dim, trYYT, Y, VVT_factr=None):
# the partial derivative vector for the likelihood
if likelihood.size == 0:
# save computation here.
dL_dR = None
elif het_noise:
if uncertain_inputs:
raise(NotImplementedError, "heteroscedatic derivates with uncertain inputs not implemented")
else:
#from ...util.linalg import chol_inv
#LBi = chol_inv(LB)
LBi, _ = dtrtrs(LB,np.eye(LB.shape[0]))
Lmi_psi1, nil = dtrtrs(Lm, psi1.T, lower=1, trans=0)
_LBi_Lmi_psi1, _ = dtrtrs(LB, Lmi_psi1, lower=1, trans=0)
dL_dR = -0.5 * beta + 0.5 * VVT_factr**2
dL_dR += 0.5 * output_dim * (psi0 - np.sum(Lmi_psi1**2,0))[:,None] * beta**2
dL_dR += 0.5*np.sum(mdot(LBi.T,LBi,Lmi_psi1)*Lmi_psi1,0)[:,None]*beta**2
dL_dR += -np.dot(_LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T * Y * beta**2
dL_dR += 0.5*np.dot(_LBi_Lmi_psi1Vf.T,_LBi_Lmi_psi1).T**2 * beta**2
else:
# likelihood is not heteroscedatic
dL_dR = -0.5 * num_data * output_dim * beta + 0.5 * trYYT * beta ** 2
dL_dR += 0.5 * output_dim * (psi0.sum() * beta ** 2 - np.trace(A) * beta)
dL_dR += beta * (0.5 * np.sum(A * DBi_plus_BiPBi) - data_fit)
return dL_dR
def _compute_log_marginal_likelihood(likelihood, num_data, output_dim, beta, het_noise, psi0, A, LB, trYYT_covs, data_fit, Y):
#compute log marginal likelihood
if het_noise:
lik_1 = -0.5 * num_data * output_dim * np.log(2. * np.pi) + 0.5 * output_dim * np.sum(np.log(beta)) - 0.5 * np.sum(beta.ravel() * np.square(Y).sum(axis=-1))
lik_2 = -0.5 * output_dim * (np.sum(beta.flatten() * psi0) - np.trace(A))
else:
lik_1 = -0.5 * num_data * output_dim * (np.log(2. * np.pi) - np.log(beta)) - 0.5 * beta * trYYT_covs
lik_2 = -0.5 * output_dim * (np.sum(beta * psi0) - np.trace(A))
lik_3 = -output_dim * (np.sum(np.log(np.diag(LB))))
lik_4 = 0.5 * data_fit
log_marginal = lik_1 + lik_2 + lik_3 + lik_4
return log_marginal | [
"logging.getLogger",
"numpy.trace",
"numpy.sqrt",
"numpy.log",
"paramz.caching.Cacher",
"numpy.einsum",
"GPy.util.linalg.jitchol",
"numpy.dot",
"numpy.eye",
"numpy.ones",
"ipdb.set_trace",
"numpy.square",
"GPy.util.linalg.symmetrify",
"GPy.util.linalg.mdot",
"GPy.util.linalg.dtrtri",
"... | [((492, 509), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (498, 509), True, 'import numpy as np\n'), ((543, 570), 'logging.getLogger', 'logging.getLogger', (['"""vardtc"""'], {}), "('vardtc')\n", (560, 570), False, 'import logging, itertools\n'), ((9570, 9596), 'numpy.dot', 'np.dot', (['data_term', 'Cpsi1.T'], {}), '(data_term, Cpsi1.T)\n', (9576, 9596), True, 'import numpy as np\n'), ((1304, 1334), 'paramz.caching.Cacher', 'Cacher', (['self._get_trYYT', 'limit'], {}), '(self._get_trYYT, limit)\n', (1310, 1334), False, 'from paramz.caching import Cacher\n'), ((1364, 1398), 'paramz.caching.Cacher', 'Cacher', (['self._get_YYTfactor', 'limit'], {}), '(self._get_YYTfactor, limit)\n', (1370, 1398), False, 'from paramz.caching import Cacher\n'), ((1596, 1622), 'numpy.einsum', 'np.einsum', (['"""ij,ij->"""', 'Y', 'Y'], {}), "('ij,ij->', Y, Y)\n", (1605, 1622), True, 'import numpy as np\n'), ((2020, 2055), 'paramz.caching.Cacher', 'Cacher', (['self._get_trYYT', 'self.limit'], {}), '(self._get_trYYT, self.limit)\n', (2026, 2055), False, 'from paramz.caching import Cacher\n'), ((2085, 2124), 'paramz.caching.Cacher', 'Cacher', (['self._get_YYTfactor', 'self.limit'], {}), '(self._get_YYTfactor, self.limit)\n', (2091, 2124), False, 'from paramz.caching import Cacher\n'), ((3967, 3999), 'GPy.util.diag.add', 'diag.add', (['Kmm', 'self.const_jitter'], {}), '(Kmm, self.const_jitter)\n', (3975, 3999), False, 'from GPy.util import diag\n'), ((4856, 4866), 'GPy.util.linalg.jitchol', 'jitchol', (['B'], {}), '(B)\n', (4863, 4866), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((5335, 5371), 'GPy.util.linalg.dtrtrs', 'dtrtrs', (['Lm', 'psi1.T'], {'lower': '(1)', 'trans': '(0)'}), '(Lm, psi1.T, lower=1, trans=0)\n', (5341, 5371), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((5399, 5432), 'GPy.util.linalg.dtrtrs', 'dtrtrs', (['LB', 'tmp'], {'lower': '(1)', 'trans': '(0)'}), '(LB, tmp, lower=1, trans=0)\n', (5405, 5432), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((5450, 5493), 'GPy.util.linalg.dtrtrs', 'dtrtrs', (['LB', '_LBi_Lmi_psi1'], {'lower': '(1)', 'trans': '(1)'}), '(LB, _LBi_Lmi_psi1, lower=1, trans=1)\n', (5456, 5493), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((5513, 5546), 'GPy.util.linalg.dtrtrs', 'dtrtrs', (['Lm', 'tmp'], {'lower': '(1)', 'trans': '(1)'}), '(Lm, tmp, lower=1, trans=1)\n', (5519, 5546), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((6109, 6156), 'GPy.util.linalg.mdot', 'mdot', (['_LBi_Lmi_psi1', 'data_term', '_LBi_Lmi_psi1.T'], {}), '(_LBi_Lmi_psi1, data_term, _LBi_Lmi_psi1.T)\n', (6113, 6156), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((6176, 6191), 'numpy.trace', 'np.trace', (['delit'], {}), '(delit)\n', (6184, 6191), True, 'import numpy as np\n'), ((8978, 8997), 'GPy.util.linalg.dpotri', 'dpotri', (['LB'], {'lower': '(1)'}), '(LB, lower=1)\n', (8984, 8997), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((9006, 9020), 'GPy.util.linalg.symmetrify', 'symmetrify', (['Bi'], {}), '(Bi)\n', (9016, 9020), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((9066, 9081), 'GPy.util.diag.add', 'diag.add', (['Bi', '(1)'], {}), '(Bi, 1)\n', (9074, 9081), False, 'from GPy.util import diag\n'), ((9106, 9132), 'GPy.util.linalg.backsub_both_sides', 'backsub_both_sides', (['Lm', 'Bi'], {}), '(Lm, Bi)\n', (9124, 9132), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((9187, 9299), 'GPy.inference.latent_function_inference.posterior.Posterior', 'Posterior', ([], {'woodbury_inv': 'woodbury_inv', 'woodbury_vector': 'woodbury_vector', 'K': 'Kmm', 'mean': 'None', 'cov': 'None', 'K_chol': 'Lm'}), '(woodbury_inv=woodbury_inv, woodbury_vector=woodbury_vector, K=Kmm,\n mean=None, cov=None, K_chol=Lm)\n', (9196, 9299), False, 'from GPy.inference.latent_function_inference.posterior import Posterior\n'), ((2665, 2684), 'numpy.ones', 'np.ones', (['Y.shape[1]'], {}), '(Y.shape[1])\n', (2672, 2684), True, 'import numpy as np\n'), ((4040, 4052), 'GPy.util.linalg.jitchol', 'jitchol', (['Kmm'], {}), '(Kmm)\n', (4047, 4052), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((4438, 4448), 'GPy.util.linalg.dtrtri', 'dtrtri', (['Lm'], {}), '(Lm)\n', (4444, 4448), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((4733, 4759), 'GPy.util.linalg.dtrtrs', 'dtrtrs', (['Lm', 'tmp.T'], {'lower': '(1)'}), '(Lm, tmp.T, lower=1)\n', (4739, 4759), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((4776, 4785), 'GPy.util.linalg.tdot', 'tdot', (['tmp'], {}), '(tmp)\n', (4780, 4785), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((4818, 4838), 'numpy.eye', 'np.eye', (['num_inducing'], {}), '(num_inducing)\n', (4824, 4838), True, 'import numpy as np\n'), ((5939, 5957), 'numpy.trace', 'np.trace', (['YYT_covs'], {}), '(YYT_covs)\n', (5947, 5957), True, 'import numpy as np\n'), ((6504, 6533), 'GPy.util.linalg.backsub_both_sides', 'backsub_both_sides', (['Lm', 'delit'], {}), '(Lm, delit)\n', (6522, 6533), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((8730, 8746), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (8744, 8746), False, 'import ipdb\n'), ((8813, 8848), 'GPy.util.linalg.dtrtrs', 'dtrtrs', (['Lm', 'psi1V'], {'lower': '(1)', 'trans': '(0)'}), '(Lm, psi1V, lower=1, trans=0)\n', (8819, 8848), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((8870, 8894), 'GPy.util.linalg.dpotrs', 'dpotrs', (['LB', 'tmp'], {'lower': '(1)'}), '(LB, tmp, lower=1)\n', (8876, 8894), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((8928, 8961), 'GPy.util.linalg.dtrtrs', 'dtrtrs', (['Lm', 'tmp'], {'lower': '(1)', 'trans': '(1)'}), '(Lm, tmp, lower=1, trans=1)\n', (8934, 8961), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((2407, 2414), 'GPy.util.linalg.tdot', 'tdot', (['Y'], {}), '(Y)\n', (2411, 2414), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((2577, 2604), 'numpy.einsum', 'np.einsum', (['"""ij,ij->j"""', 'Y', 'Y'], {}), "('ij,ij->j', Y, Y)\n", (2586, 2604), True, 'import numpy as np\n'), ((5852, 5859), 'GPy.util.linalg.tdot', 'tdot', (['Y'], {}), '(Y)\n', (5856, 5859), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((6006, 6013), 'GPy.util.linalg.tdot', 'tdot', (['Y'], {}), '(Y)\n', (6010, 6013), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((6431, 6451), 'numpy.eye', 'np.eye', (['num_inducing'], {}), '(num_inducing)\n', (6437, 6451), True, 'import numpy as np\n'), ((8767, 8791), 'numpy.dot', 'np.dot', (['(Y.T * beta)', 'psi1'], {}), '(Y.T * beta, psi1)\n', (8773, 8791), True, 'import numpy as np\n'), ((9035, 9054), 'GPy.util.linalg.dpotri', 'dpotri', (['LB'], {'lower': '(1)'}), '(LB, lower=1)\n', (9041, 9054), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((10075, 10097), 'numpy.dot', 'np.dot', (['psi1', 'dL_dpsi2'], {}), '(psi1, dL_dpsi2)\n', (10081, 10097), True, 'import numpy as np\n'), ((10817, 10853), 'GPy.util.linalg.dtrtrs', 'dtrtrs', (['Lm', 'psi1.T'], {'lower': '(1)', 'trans': '(0)'}), '(Lm, psi1.T, lower=1, trans=0)\n', (10823, 10853), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((10885, 10923), 'GPy.util.linalg.dtrtrs', 'dtrtrs', (['LB', 'Lmi_psi1'], {'lower': '(1)', 'trans': '(0)'}), '(LB, Lmi_psi1, lower=1, trans=0)\n', (10891, 10923), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((12028, 12039), 'numpy.trace', 'np.trace', (['A'], {}), '(A)\n', (12036, 12039), True, 'import numpy as np\n'), ((12197, 12216), 'numpy.sum', 'np.sum', (['(beta * psi0)'], {}), '(beta * psi0)\n', (12203, 12216), True, 'import numpy as np\n'), ((12219, 12230), 'numpy.trace', 'np.trace', (['A'], {}), '(A)\n', (12227, 12230), True, 'import numpy as np\n'), ((12273, 12284), 'numpy.diag', 'np.diag', (['LB'], {}), '(LB)\n', (12280, 12284), True, 'import numpy as np\n'), ((2527, 2546), 'numpy.log', 'np.log', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (2533, 2546), True, 'import numpy as np\n'), ((2548, 2560), 'numpy.log', 'np.log', (['beta'], {}), '(beta)\n', (2554, 2560), True, 'import numpy as np\n'), ((2628, 2647), 'numpy.sum', 'np.sum', (['(beta * psi0)'], {}), '(beta * psi0)\n', (2634, 2647), True, 'import numpy as np\n'), ((2650, 2661), 'numpy.trace', 'np.trace', (['A'], {}), '(A)\n', (2658, 2661), True, 'import numpy as np\n'), ((2717, 2728), 'numpy.diag', 'np.diag', (['LB'], {}), '(LB)\n', (2724, 2728), True, 'import numpy as np\n'), ((4634, 4647), 'numpy.sqrt', 'np.sqrt', (['beta'], {}), '(beta)\n', (4641, 4647), True, 'import numpy as np\n'), ((4697, 4710), 'numpy.sqrt', 'np.sqrt', (['beta'], {}), '(beta)\n', (4704, 4710), True, 'import numpy as np\n'), ((6254, 6274), 'numpy.eye', 'np.eye', (['num_inducing'], {}), '(num_inducing)\n', (6260, 6274), True, 'import numpy as np\n'), ((8404, 8428), 'numpy.eye', 'np.eye', (['K_fixed.shape[0]'], {}), '(K_fixed.shape[0])\n', (8410, 8428), True, 'import numpy as np\n'), ((8439, 8460), 'GPy.util.linalg.tdot', 'tdot', (['_LBi_Lmi_psi1.T'], {}), '(_LBi_Lmi_psi1.T)\n', (8443, 8460), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n'), ((9521, 9543), 'numpy.ones', 'np.ones', (['[num_data, 1]'], {}), '([num_data, 1])\n', (9528, 9543), True, 'import numpy as np\n'), ((9659, 9679), 'numpy.eye', 'np.eye', (['num_inducing'], {}), '(num_inducing)\n', (9665, 9679), True, 'import numpy as np\n'), ((9851, 9889), 'numpy.dot', 'np.dot', (['dL_dpsi2_beta', '(psi1 * beta).T'], {}), '(dL_dpsi2_beta, (psi1 * beta).T)\n', (9857, 9889), True, 'import numpy as np\n'), ((10767, 10786), 'numpy.eye', 'np.eye', (['LB.shape[0]'], {}), '(LB.shape[0])\n', (10773, 10786), True, 'import numpy as np\n'), ((11841, 11860), 'numpy.log', 'np.log', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (11847, 11860), True, 'import numpy as np\n'), ((12099, 12118), 'numpy.log', 'np.log', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (12105, 12118), True, 'import numpy as np\n'), ((12120, 12132), 'numpy.log', 'np.log', (['beta'], {}), '(beta)\n', (12126, 12132), True, 'import numpy as np\n'), ((11504, 11515), 'numpy.trace', 'np.trace', (['A'], {}), '(A)\n', (11512, 11515), True, 'import numpy as np\n'), ((11555, 11581), 'numpy.sum', 'np.sum', (['(A * DBi_plus_BiPBi)'], {}), '(A * DBi_plus_BiPBi)\n', (11561, 11581), True, 'import numpy as np\n'), ((11888, 11900), 'numpy.log', 'np.log', (['beta'], {}), '(beta)\n', (11894, 11900), True, 'import numpy as np\n'), ((11025, 11049), 'numpy.sum', 'np.sum', (['(Lmi_psi1 ** 2)', '(0)'], {}), '(Lmi_psi1 ** 2, 0)\n', (11031, 11049), True, 'import numpy as np\n'), ((11175, 11215), 'numpy.dot', 'np.dot', (['_LBi_Lmi_psi1Vf.T', '_LBi_Lmi_psi1'], {}), '(_LBi_Lmi_psi1Vf.T, _LBi_Lmi_psi1)\n', (11181, 11215), True, 'import numpy as np\n'), ((11256, 11296), 'numpy.dot', 'np.dot', (['_LBi_Lmi_psi1Vf.T', '_LBi_Lmi_psi1'], {}), '(_LBi_Lmi_psi1Vf.T, _LBi_Lmi_psi1)\n', (11262, 11296), True, 'import numpy as np\n'), ((11932, 11944), 'numpy.square', 'np.square', (['Y'], {}), '(Y)\n', (11941, 11944), True, 'import numpy as np\n'), ((11099, 11125), 'GPy.util.linalg.mdot', 'mdot', (['LBi.T', 'LBi', 'Lmi_psi1'], {}), '(LBi.T, LBi, Lmi_psi1)\n', (11103, 11125), False, 'from GPy.util.linalg import jitchol, tdot, dtrtri, dtrtrs, backsub_both_sides, dpotrs, dpotri, symmetrify, mdot\n')] |
# -*-coding:utf8-*-
"""
author:zhangyu
用线性模型检查文件,在测试中
email:<EMAIL>
"""
from __future__ import division
import numpy as np
from sklearn.externals import joblib
import math
import sys
sys.path.append("../")
import LR.util.get_feature_num as gf
def get_test_data(test_file: str, feature_num_file: str):
"""
Args:
test_file:测试文件
feature_num_file: 特征文件数量
Return:
二维数组
"""
total_feature_num = gf.get_feature_num(feature_num_file)
test_label = np.genfromtxt(test_file, dtype=np.float32, delimiter=",", usecols=-1)
feature_list = range(total_feature_num)
test_feature = np.genfromtxt(test_file, dtype=np.float32, delimiter=",", usecols=feature_list)
return test_feature, test_label
def predict_by_lr_model(test_feature, lr_model):
"""
通过逻辑回归模型
"""
result_list = []
prob_list = lr_model.predict_proba(test_feature)
for index in range(len(prob_list)):
result_list.append(prob_list[index][1])
return result_list
def predict_by_lr_coef(test_feature, lr_coef):
"""
通过模型预测
"""
sigmoid_func = np.frompyfunc(sigmoid, 1, 1)
return sigmoid_func(np.dot(test_feature, lr_coef))
def sigmoid(x):
"""
通过sigmod函数
"""
return 1 / (1 + math.exp(-x))
def get_auc(predict_list, test_label):
"""
Args:
predict_list: 预测链表
test_label: 测试标签
auc = (sum(pos_index)-pos_num(pos_num + 1)/2)/pos_num*neg_num
"""
total_list = []
for index in range(len(predict_list)):
predict_score = predict_list[index]
label = test_label[index]
total_list.append((label, predict_score))
sorted_total_list = sorted(total_list, key=lambda ele: ele[1])
neg_num = 0
pos_num = 0
count = 1
total_pos_index = 0
for zuhe in sorted_total_list:
label, predict_score = zuhe
if label == 0:
neg_num += 1
else:
pos_num += 1
total_pos_index += count
count += 1
auc_score = (total_pos_index - (pos_num) * (pos_num + 1) / 2) / (pos_num * neg_num)
print("auc:%.5f" % (auc_score))
def get_accuracy(predict_list, test_label):
"""
Args:
predict_list: 测试链表
test_label: 测试标签
"""
score_thr = 0.5
right_num = 0
for index in range(len(predict_list)):
predict_score = predict_list[index]
if predict_score >= score_thr:
predict_label = 1
else:
predict_label = 0
if predict_label == test_label[index]:
right_num += 1
total_num = len(predict_list)
accuracy_score = right_num / total_num
print("accuracy_score:%.5f" % (accuracy_score))
def run_check_core(test_feature, test_label, model, score_func):
"""
Args:
test_feature:测试特征
test_label:测试标签
model: lr_coef, lr_model
score_func:分数方法
"""
predict_list = score_func(test_feature, model)
get_auc(predict_list, test_label)
get_accuracy(predict_list, test_label)
def run_check(test_file, lr_coef_file, lr_model_file, feature_num_file):
"""
Args:
test_file: 测试文件
lr_coef_file: w1,w2
lr_model_file: 输出文件
feature_num_file: 特征数量文件
"""
test_feature, test_label = get_test_data(test_file, feature_num_file)
lr_coef = np.genfromtxt(lr_coef_file, dtype=np.float32, delimiter=",")
lr_model = joblib.load(lr_model_file)
run_check_core(test_feature, test_label, lr_model, predict_by_lr_model)
run_check_core(test_feature, test_label, lr_coef, predict_by_lr_coef)
if __name__ == "__main__":
if len(sys.argv) < 5:
print("usage: python xx.py test_file coef_file model_file feature_num_file")
sys.exit()
else:
test_file = sys.argv[1]
coef_file = sys.argv[2]
model_file = sys.argv[3]
feature_num_file = sys.argv[4]
run_check(test_file, coef_file, model_file, feature_num_file)
| [
"sys.exit",
"numpy.genfromtxt",
"LR.util.get_feature_num.get_feature_num",
"sklearn.externals.joblib.load",
"numpy.dot",
"numpy.frompyfunc",
"math.exp",
"sys.path.append"
] | [((185, 207), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (200, 207), False, 'import sys\n'), ((437, 473), 'LR.util.get_feature_num.get_feature_num', 'gf.get_feature_num', (['feature_num_file'], {}), '(feature_num_file)\n', (455, 473), True, 'import LR.util.get_feature_num as gf\n'), ((491, 560), 'numpy.genfromtxt', 'np.genfromtxt', (['test_file'], {'dtype': 'np.float32', 'delimiter': '""","""', 'usecols': '(-1)'}), "(test_file, dtype=np.float32, delimiter=',', usecols=-1)\n", (504, 560), True, 'import numpy as np\n'), ((624, 703), 'numpy.genfromtxt', 'np.genfromtxt', (['test_file'], {'dtype': 'np.float32', 'delimiter': '""","""', 'usecols': 'feature_list'}), "(test_file, dtype=np.float32, delimiter=',', usecols=feature_list)\n", (637, 703), True, 'import numpy as np\n'), ((1108, 1136), 'numpy.frompyfunc', 'np.frompyfunc', (['sigmoid', '(1)', '(1)'], {}), '(sigmoid, 1, 1)\n', (1121, 1136), True, 'import numpy as np\n'), ((3333, 3393), 'numpy.genfromtxt', 'np.genfromtxt', (['lr_coef_file'], {'dtype': 'np.float32', 'delimiter': '""","""'}), "(lr_coef_file, dtype=np.float32, delimiter=',')\n", (3346, 3393), True, 'import numpy as np\n'), ((3409, 3435), 'sklearn.externals.joblib.load', 'joblib.load', (['lr_model_file'], {}), '(lr_model_file)\n', (3420, 3435), False, 'from sklearn.externals import joblib\n'), ((1161, 1190), 'numpy.dot', 'np.dot', (['test_feature', 'lr_coef'], {}), '(test_feature, lr_coef)\n', (1167, 1190), True, 'import numpy as np\n'), ((3734, 3744), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3742, 3744), False, 'import sys\n'), ((1265, 1277), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (1273, 1277), False, 'import math\n')] |
from credentials.blob_credentials import facts_sas_token, facts_container
from azure.storage.blob import ContainerClient, BlobClient
import pandas as pd
import os
import json
import colorsys
import random
import numpy as np
import cv2
import imageio
from matplotlib import patches
from matplotlib.patches import Polygon
import matplotlib.pyplot as plt
from datetime import datetime
import zlib
import base64
from mrcnn.model import MaskRCNN
from mrcnn.utils import resize_image
account_url = "https://hecdf.blob.core.windows.net"
facts_blob_service = ContainerClient(account_url=account_url,
container_name=facts_container,
credential=facts_sas_token)
# ===============================
# INLINES
# ===============================
def get_people_local_path(sample_id: str, directory_path: str) -> str:
return os.path.join(directory_path, sample_id + '_people.json')
def get_poly_local_path(sample_id: str, directory_path: str) -> str:
return os.path.join(directory_path, sample_id + '_poly.json')
def get_image_local_path(sample_id: str, directory_path: str) -> str:
return os.path.join(directory_path, sample_id + '.jpg')
def get_result_local_path(sample_id: str, directory_path: str) -> str:
return os.path.join(directory_path, sample_id + '.npz')
def get_json_local_path(sample_id: str, directory_path: str) -> str:
return os.path.join(directory_path, sample_id + '.json')
def get_inference_result_path(sample_id: str, directory_path: str) -> str:
return os.path.join(directory_path, sample_id + '.json')
# ===============================
# INFERENCE MODE
# ===============================
def filter_resuLt_by_score(result: dict, thres: float = 0.9) -> dict:
"""Filter the inference results by the given confidence."""
filters = (result['scores'] >= thres)
fitered_result = {}
fitered_result['rois'] = result['rois'][filters]
fitered_result['masks'] = []
for i in range(len(filters)):
if filters[i]:
fitered_result['masks'].append(result['masks'][..., i])
fitered_result['masks'] = np.stack(fitered_result['masks'], axis=-1)
fitered_result['class_ids'] = result['class_ids'][filters]
fitered_result['scores'] = result['scores'][filters]
return fitered_result
def retrieve_inference_to_json(model: MaskRCNN, ds,
image_id: str, json_dir: str) -> None:
"""Retrieve inference result from the model the store in json file."""
# Load image from dataset
original_image = ds.load_image(image_id)
_, window, scale, padding, _ = resize_image(original_image, mode="pad64")
# No rescaling applied
assert scale == 1
# Retrieve predictions
height, width = original_image.shape[:2]
result = model.detect([original_image], verbose=0)[0]
filtered_result = filter_resuLt_by_score(result, 0.9)
# Dict object to dump to json
dump = {}
dump["image"] = {
"file_name": 'img/' + ds.image_info[image_id]['id'] + '.jpg',
"id": int(image_id),
"height": int(height),
"width": int(width),
}
dump["annotations"] = []
assert filtered_result['rois'].shape[0] == \
filtered_result['masks'].shape[-1] == \
filtered_result['class_ids'].shape[0]
# Encoding annotations into json
for obj_id in range(filtered_result['rois'].shape[0]):
roi = filtered_result['rois'][obj_id, :]
mask = filtered_result['masks'][..., obj_id]
class_id = filtered_result['class_ids'][obj_id]
y1, x1, y2, x2 = int(roi[0]), int(roi[1]), int(roi[2]), int(roi[3])
contours, _ = cv2.findContours(mask.astype(np.uint8),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
polygon = []
# 1d flatten list of [x, y] coordinates
for pt_id in range(cnt.shape[0]):
polygon.append(int(cnt[pt_id, 0, 0]))
polygon.append(int(cnt[pt_id, 0, 1]))
obj = {'id': int(obj_id),
'segmentation': [polygon],
'area': float(cv2.contourArea(cnt)),
# x, y, h, w
'bbox': [x1, y1, x2 - x1, y2 - y1],
'image_id': int(image_id),
'category_id': int(class_id),
'iscrowd': 0}
dump["annotations"].append(obj)
json_path = get_inference_result_path(ds.image_info[image_id]['id'],
json_dir)
with open(json_path, 'w') as f:
json.dump(dump, f)
return
def visualize_inference_sample(sample_id: str,
class_names: list,
image_dir: str,
json_dir: str,
render_dir: str) -> None:
"""Load inference result json file and render overlay on raw image."""
image_path = get_image_local_path(sample_id, image_dir)
json_path = get_inference_result_path(sample_id, json_dir)
render_path = os.path.join(render_dir, sample_id + '.jpg')
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
with open(json_path, 'r') as f:
data = json.load(f)
assert image.shape[0] == data["image"]["height"]
assert image.shape[1] == data["image"]["width"]
fig, ax = plt.subplots(1, figsize=(20,20))
colors = random_colors(len(data["annotations"]))
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(f"Sample_id: {sample_id}, shape {image.shape}")
masked_image = image.astype(np.uint32).copy()
for i, instance in enumerate(data["annotations"]):
x, y, w, h = instance["bbox"]
p = patches.Rectangle((x, y), w, h, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=colors[i], facecolor='none')
ax.add_patch(p)
polygon = instance["segmentation"][0]
polygon = np.array(polygon).reshape(-1, 2)
p = Polygon(polygon, facecolor="none", edgecolor=colors[i],
linewidth=None, fill=True)
p.set_fill(True)
ax.add_patch(p)
# Label
ax.text(x, y + 8, class_names[instance["category_id"]],
color='w', size=11, backgroundcolor="none")
ax.imshow(masked_image.astype(np.uint8))
plt.savefig(render_path)
plt.close(fig)
return
def dump_coco_format(sample_ids: list,
json_dir: str,
coco_json_path: str,
class_names: list) -> None:
"""Merge individual json files into an integrated one."""
dump_dict = {
"categories": [],
"info": {
"description": "Chronsite test set, Group 8.",
"year": 2020
},
"licenses": [],
"images": [],
"annotations": []
}
for class_id, class_name in enumerate(class_names):
dump_dict["categories"].append({
"Supercategory": "none",
"name": class_name,
"id": class_id
})
annotation_counter = 0
for image_id, sample_id in enumerate(sample_ids):
json_path = get_inference_result_path(sample_id, json_dir)
with open(json_path, 'r') as fp:
data = json.load(fp)
image_meta = data["image"]
image_meta["id"] = image_id
dump_dict["images"].append(image_meta)
for anno in data["annotations"]:
annotation_counter += 1
anno["image_id"] = image_id
anno["id"] = annotation_counter
dump_dict["annotations"].append(anno)
with open(coco_json_path, 'w') as fp:
json.dump(dump_dict, fp)
return
def verify_coco_json(json_path: str) -> None:
"""Check integraty of the big json file in coco format. """
with open(json_path, 'r') as fp:
data = json.load(fp)
print(f"CALSSES {len(data['categories'])} classes:")
class_id_name = {}
for cat in data["categories"]:
class_id_name.update({cat['id']: cat['name']})
print(class_id_name)
print(" ")
print(f'INFO: {data["info"]}')
print(" ")
print(f'LICENSES: {data["licenses"]}')
print(" ")
print("IMAGES")
shapes = set()
image_names = []
image_id = set()
for image in data["images"]:
shapes.add((image["height"], image["width"]))
image_names.append(image["file_name"])
image_id.add(image["id"])
assert len(image_names) == len(image_id)
print(f"Shapes: {shapes}")
print(f"Filenames: {image_names}")
print(f"ID: {image_id}")
print(" ")
print("SEGMENTATIONS")
anno_id = set()
for obj in data["annotations"]:
anno_id.add(obj["id"])
assert obj["image_id"] in image_id
assert obj["category_id"] in list(class_id_name.keys())
assert len(anno_id) == len(data["annotations"])
print(f"number of annotations {len(data['annotations'])}")
return
# ===============================
# EXPOLORE DATA
# ===============================
def get_sample_role(sample_id: str,
train_sample_list: list,
val_sample_list: list) -> str:
"""Query the sample is in which bucket."""
if sample_id in train_sample_list:
return "train"
elif sample_id in val_sample_list:
return "val"
return None
def get_facts_blobs() -> list:
"""Query all blobs from azure storage."""
blobs = list(facts_blob_service.list_blobs())
return blobs
def fetch_train_set() -> pd.DataFrame:
"""Query all samples from azure storage."""
blobs = list(facts_blob_service.list_blobs())
records = {}
for blob in blobs:
# remove irrelevant files
file_name = blob.name
if ".DS_Store" in file_name:
continue
split_file_name = file_name.split('/')
# Detection_Train_Set_bis
if "_Bis" in split_file_name[0]:
pattern = ".jpg"
index = split_file_name[2].find(pattern)
date_time_string = split_file_name[2][:index]
sample_id = date_time_string
try:
date_time = datetime.strptime(date_time_string, "%Y_%m_%d_%H_%M_%S")
except Exception:
date_time = None
if ".json" in file_name:
# Annotation
record = {
"sample_id": date_time_string,
"construction_site": "Analytic",
"annotation_blob_id": file_name,
"date_time": date_time,
}
else:
# Image
record = {
"sample_id": date_time_string,
"construction_site": "Analytic",
"image_blob_id": file_name,
"date_time": date_time,
}
elif split_file_name[0] == "Analytics_Train_Set":
pattern = ".jpg"
index = split_file_name[-1].find(pattern)
date_time_string = split_file_name[-1][:index]
sample_id = date_time_string
try:
date_time = datetime.strptime(date_time_string, "%Y-%m-%d-%H-%M-%S")
except Exception:
date_time = None
if ".json" in file_name:
# Annotation
if "people" in file_name:
record = {
"sample_id": sample_id,
"construction_site": "Analytic2",
"annotation_people_blob_id": file_name,
"date_time": date_time,
}
elif "poly" in file_name:
record = {
"sample_id": sample_id,
"construction_site": "Analytic2",
"annotation_poly_blob_id": file_name,
"date_time": date_time,
}
else:
record = {
"sample_id": sample_id,
"construction_site": "Analytic2",
"image_blob_id": file_name,
"date_time": date_time,
}
else:
# Detection_Train_Set
pattern1 = "Batch2__"
pattern2 = "frame"
pattern3 = ".jpg"
index1 = split_file_name[2].find(pattern1)
index2 = split_file_name[2].find(pattern2)
index3 = split_file_name[2].find(pattern3)
construction_site_name = split_file_name[2][index1+len(pattern1):index2]
sample_id = split_file_name[2][:index3]
if ".json" in file_name:
# Annotation
record = {
"sample_id": sample_id,
"construction_site": construction_site_name,
"annotation_blob_id": file_name,
"date_time": None,
}
else:
# Image
record = {
"sample_id": sample_id,
"construction_site": construction_site_name,
"image_blob_id": file_name,
"date_time": None,
}
if sample_id in records:
records[sample_id].update(record)
else:
records[sample_id] = record
records = list(records.values())
records = [l for l in records if (l.get("image_blob_id")) and
((l.get("annotation_blob_id")) or ((l.get("annotation_poly_blob_id"))
and (l.get("annotation_people_blob_id"))))]
return pd.DataFrame(records)
def download_blob(blob_id: str, local_path: str) -> None:
"""Download file from remote storage to local path."""
bc = BlobClient(account_url=account_url, container_name=facts_container,
blob_name=blob_id, snapshot=None,
credential=facts_sas_token)
with open(local_path, "wb") as download_file:
download_file.write(bc.download_blob().readall())
return
def download_sample(image_blob: str, annotation_blob: str,
sample_id: str, directory_path: str,
force_refresh: bool = False) -> None:
"""Download image and json for a given sample id"""
image_local_path = get_image_local_path(sample_id, directory_path)
json_local_path = get_json_local_path(sample_id, directory_path)
if ((force_refresh) or (not os.path.exists(image_local_path))):
download_blob(image_blob, image_local_path)
if ((force_refresh) or (not os.path.exists(json_local_path))):
download_blob(annotation_blob, json_local_path)
return
def download_sample_analytics(image_blob: str,
annotation_people_blob: str,
annotation_poly_blob: str,
sample_id: str,
directory_path: str,
force_refresh: bool = False) -> None:
"""Download image, people_json and poly_json for a given sample id"""
image_local_path = get_image_local_path(sample_id, directory_path)
people_local_path = get_people_local_path(sample_id, directory_path)
poly_local_path = get_poly_local_path(sample_id, directory_path)
if ((force_refresh) or (not os.path.exists(image_local_path))):
download_blob(image_blob, image_local_path)
if ((force_refresh) or (not os.path.exists(people_local_path))):
download_blob(annotation_people_blob, people_local_path)
if ((force_refresh) or (not os.path.exists(poly_local_path))):
download_blob(annotation_poly_blob, poly_local_path)
return
def visualize_sample(sample_id: str, directory_path: str) -> None:
"""Render the image and annotations for a given sample id. """
image = cv2.imread(get_image_local_path(sample_id, directory_path))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
with open(get_json_local_path(sample_id, directory_path)) as f:
data = json.load(f)
_, ax = plt.subplots(1, figsize=(20, 20))
colors = random_colors(len(data["objects"]))
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(f"Sample_id: {sample_id}, shape {image.shape}")
masked_image = image.astype(np.uint32).copy()
for i, instance in enumerate(data["objects"]):
if instance["geometryType"] == "rectangle":
y1, x1, y2, x2 = instance["points"]["exterior"][0][1], \
instance["points"]["exterior"][0][0], \
instance["points"]["exterior"][1][1], \
instance["points"]["exterior"][1][0]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=colors[i], facecolor='none')
ax.add_patch(p)
if instance["geometryType"] == "polygon":
y1, x1 = instance["points"]["exterior"][0][1], \
instance["points"]["exterior"][0][0]
p = Polygon(instance["points"]["exterior"],
facecolor="none", edgecolor=colors[i],
linewidth=None, fill=True)
p.set_fill(True)
ax.add_patch(p)
# Label
ax.text(x1, y1 + 8, instance["classTitle"],
color='w', size=11, backgroundcolor="none")
ax.imshow(masked_image.astype(np.uint8))
plt.show()
return
def visualize_sample_analytic(sample_id: str, directory_path: str) -> None:
"""Render the image and annotations for a given sample id in analytic set."""
image = cv2.imread(get_image_local_path(sample_id, directory_path))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
with open(get_people_local_path(sample_id, directory_path)) as f:
annotation_people = json.load(f)
with open(get_poly_local_path(sample_id, directory_path)) as f:
annotation_poly = json.load(f)
_, ax = plt.subplots(1, figsize=(20, 20))
print(f"{len(annotation_people['objects'])} peoples + {len(annotation_poly['objects'])} polygons")
colors = random_colors(len(annotation_people["objects"]) +
len(annotation_poly["objects"]))
height, width = image.shape[:2]
ax.set_ylim(height + 10, -10)
ax.set_xlim(-10, width + 10)
ax.axis('off')
ax.set_title(f"Sample_id: {sample_id}, shape {image.shape}")
masked_image = image.astype(np.uint32).copy()
for i, instance in enumerate(annotation_people["objects"]):
if instance["geometryType"] == "rectangle":
y1, x1, y2, x2 = instance["points"]["exterior"][0][1], \
instance["points"]["exterior"][0][0], \
instance["points"]["exterior"][1][1], \
instance["points"]["exterior"][1][0]
p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,
alpha=0.7, linestyle="dashed",
edgecolor=colors[i], facecolor='none')
ax.add_patch(p)
# Label
ax.text(x1, y1 + 8, instance["classTitle"],
color='w', size=11, backgroundcolor="none")
for j, instance in enumerate(annotation_poly["objects"]):
if instance["geometryType"] == "bitmap":
mask = base64_2_mask(instance["bitmap"]["data"])
x1, y1 = instance["bitmap"]["origin"][0], \
instance["bitmap"]["origin"][1]
contours, hierarchy = cv2.findContours(mask.astype(np.uint8),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnt = np.squeeze(contours[0], axis=1)
# Depends on whether the mini-mask was adpoted
# cnt += [x1, y1]
p = Polygon(cnt, facecolor="none",
edgecolor=colors[len(annotation_people["objects"])+j],
linewidth=None, fill=True)
p.set_fill(True)
ax.add_patch(p)
# Label
ax.text(x1, y1 + 8, instance["classTitle"],
color='w', size=11, backgroundcolor="none")
ax.imshow(masked_image.astype(np.uint8))
plt.show()
return
def render_heatmap(df: pd.DataFrame, local_directory: str) -> None:
"""Visualize the heatmap of concentration of workers."""
image = read_image(get_image_local_path(df.iloc[-1, 0], local_directory))
heatmap = np.zeros(image.shape[:2], dtype=np.float)
for index, row in df.iterrows():
sample_id = row["sample_id"]
mask_path = get_people_local_path(sample_id, local_directory)
with open(mask_path) as f:
annotations = json.load(f)
for obj in annotations["objects"]:
if obj["classTitle"] == "People_model":
mask = np.zeros((1024, 1280), dtype=np.float)
mask = draw_rectangle(mask, obj['points']['exterior'])
heatmap += mask
kernel = np.ones((10, 10), np.float32)/25
heatmap = cv2.filter2D(heatmap, -1, kernel)
heatmap_render = (heatmap / np.max(np.max(heatmap))) * 255.0
heatmap_render = cv2.applyColorMap(heatmap_render.astype(np.uint8),
cv2.COLORMAP_RAINBOW)
heatmap_mask = heatmap >= np.max(np.max(heatmap))*0.01
image[heatmap_mask] = image[heatmap_mask] * 0.5 + \
heatmap_render[heatmap_mask] * 0.5
_ = plt.figure(figsize=(12, 12))
plt.axis('off')
print(image.shape)
plt.imshow(image)
return
# ===============================
# EVALUATION FUNCTIONS
# ===============================
def get_precision_recall(df: pd.DataFrame) -> (list, list):
"""Calculate P/R pairs for a range of confidence_levels"""
precisions = [0]
recalls = [1]
for confidence_level in np.arange(0, 1.0, 0.01):
tp = len(df[(df.pred_class == df.gt_class) &
(df.pred_score >= confidence_level)])
fn = len(df[((df.pred_class < 0) |
(df.pred_score < confidence_level)) &
(df.gt_class > -1)])
fp = len(df[((df.pred_class > -1) &
(df.pred_score >= confidence_level)) &
(df.gt_class < 0)])
precisions.append(tp/(tp+fp+1e-5))
recalls.append(tp/(tp+fn+1e-5))
precisions.append(1)
recalls.append(0)
return precisions, recalls
def calculate_ap_from_pr(precisions: list, recalls: list) -> float:
"""Calculate Average Percision from P-R pairs"""
AP = 0
for i in range(len(precisions) - 1):
AP += (recalls[i] - recalls[i+1]) * (precisions[i] + precisions[i+1]) / 2
return AP
def get_object_matches(result: dict,
pred_matches: list,
gt_matches: list,
gt_class_id: list,
image_id: int,
IoUs: list) -> list:
"""Sort and merge the matched objects between gt and pred."""
IoUs = np.max(IoUs, axis=1)
assert(len(IoUs) == len(pred_matches))
object_matches = []
for pred_score, pred_match, pred_class_id, IoU in zip(result["scores"], pred_matches, result["class_ids"], IoUs):
pred_class = pred_class_id
gt_class = gt_class_id[int(pred_match)] if pred_match > -1 else -1
object_matches.append({"image_id": image_id,
"pred_class": int(pred_class),
"gt_class": int(gt_class),
"pred_score": pred_score,
"highest_mIoU": IoU})
for gt_match, gt_class in zip(gt_matches, gt_class_id):
if gt_match == -1:
object_matches.append({"image_id": image_id,
"pred_class": -1,
"gt_class": int(gt_class),
"pred_score": 0,
"highest_mIoU": 0})
return object_matches
# ===============================
# MISCELLANEOUS FUNCTIONS
# ===============================
def read_imageio(image_path: str):
"""Load image by imageio library."""
image = imageio.imread(image_path)
return image
def read_image(image_path: str):
"""Load image by opencv library."""
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def random_colors(N: int, bright=True) -> list:
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
random.shuffle(colors)
return colors
def draw_rectangle(mask, bbox):
"""Helper function for rendering."""
y1, x1, y2, x2 = bbox[0][1], bbox[0][0], bbox[1][1], bbox[1][0]
contour = [[x1, y1], [x1, y2], [x2, y2], [x2, y1]]
contour = np.array(contour, dtype=np.int32)
cv2.drawContours(mask, [contour], -1, (255), -1)
return mask
def draw_polygon(mask, contour):
"""Helper function for rendering."""
contour = np.array(contour, dtype=np.int32)
cv2.drawContours(mask, [contour], -1, (255), -1)
return mask
def convert_annotations(annotations, image_shape):
"""Convert masks from polygon to binary mask for training."""
masks = []
class_names = []
for obj in annotations["objects"]:
class_name = obj["classTitle"]
mask = np.zeros(image_shape, dtype=np.int8)
if (obj["geometryType"] == "rectangle"):
mask = draw_rectangle(mask, obj['points']['exterior'])
elif (obj["geometryType"] == "polygon"):
mask = draw_polygon(mask, obj['points']['exterior'])
masks.append(mask)
class_names.append(class_name)
masks = np.stack(masks, axis=2)
return masks, class_names
def split_train_val_dataframe(df_all, split_frac=0.2, verbose=False):
"""Split train valid dataset."""
construction_site_meta = list(df_all.construction_site.value_counts().items())
train_sample_ids = []
val_sample_ids = []
for construction_site, num_tot in construction_site_meta:
# print(construction_site, num_tot)
sample_ids = list(df_all[df_all.construction_site == construction_site].sample_id.values)
assert(len(sample_ids) == num_tot)
split_index = int(num_tot * split_frac)
random.shuffle(sample_ids)
val_sample_ids += sample_ids[:split_index]
train_sample_ids += sample_ids[split_index:]
if verbose:
print(f'Construction site {construction_site}, total samples {num_tot}')
print(f'train samples {len(sample_ids[split_index:])} , valid samples {len(sample_ids[:split_index])}')
return train_sample_ids, val_sample_ids
def base64_2_mask(s):
"""Convert bitmap string to binary mask."""
z = zlib.decompress(base64.b64decode(s))
n = np.fromstring(z, np.uint8)
mask = cv2.imdecode(n, cv2.IMREAD_UNCHANGED)[:, :, 3].astype(bool)
return mask
| [
"azure.storage.blob.BlobClient",
"cv2.filter2D",
"colorsys.hsv_to_rgb",
"numpy.array",
"cv2.imdecode",
"numpy.arange",
"matplotlib.pyplot.imshow",
"os.path.exists",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.stack",
"cv2.contourArea",
"azure.storage.blob.ContainerClient",
"pandas.DataF... | [((552, 656), 'azure.storage.blob.ContainerClient', 'ContainerClient', ([], {'account_url': 'account_url', 'container_name': 'facts_container', 'credential': 'facts_sas_token'}), '(account_url=account_url, container_name=facts_container,\n credential=facts_sas_token)\n', (567, 656), False, 'from azure.storage.blob import ContainerClient, BlobClient\n'), ((891, 947), 'os.path.join', 'os.path.join', (['directory_path', "(sample_id + '_people.json')"], {}), "(directory_path, sample_id + '_people.json')\n", (903, 947), False, 'import os\n'), ((1030, 1084), 'os.path.join', 'os.path.join', (['directory_path', "(sample_id + '_poly.json')"], {}), "(directory_path, sample_id + '_poly.json')\n", (1042, 1084), False, 'import os\n'), ((1168, 1216), 'os.path.join', 'os.path.join', (['directory_path', "(sample_id + '.jpg')"], {}), "(directory_path, sample_id + '.jpg')\n", (1180, 1216), False, 'import os\n'), ((1301, 1349), 'os.path.join', 'os.path.join', (['directory_path', "(sample_id + '.npz')"], {}), "(directory_path, sample_id + '.npz')\n", (1313, 1349), False, 'import os\n'), ((1432, 1481), 'os.path.join', 'os.path.join', (['directory_path', "(sample_id + '.json')"], {}), "(directory_path, sample_id + '.json')\n", (1444, 1481), False, 'import os\n'), ((1570, 1619), 'os.path.join', 'os.path.join', (['directory_path', "(sample_id + '.json')"], {}), "(directory_path, sample_id + '.json')\n", (1582, 1619), False, 'import os\n'), ((2150, 2192), 'numpy.stack', 'np.stack', (["fitered_result['masks']"], {'axis': '(-1)'}), "(fitered_result['masks'], axis=-1)\n", (2158, 2192), True, 'import numpy as np\n'), ((2648, 2690), 'mrcnn.utils.resize_image', 'resize_image', (['original_image'], {'mode': '"""pad64"""'}), "(original_image, mode='pad64')\n", (2660, 2690), False, 'from mrcnn.utils import resize_image\n'), ((5128, 5172), 'os.path.join', 'os.path.join', (['render_dir', "(sample_id + '.jpg')"], {}), "(render_dir, sample_id + '.jpg')\n", (5140, 5172), False, 'import os\n'), ((5186, 5208), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (5196, 5208), False, 'import cv2\n'), ((5221, 5259), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (5233, 5259), False, 'import cv2\n'), ((5446, 5479), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(20, 20)'}), '(1, figsize=(20, 20))\n', (5458, 5479), True, 'import matplotlib.pyplot as plt\n'), ((6530, 6554), 'matplotlib.pyplot.savefig', 'plt.savefig', (['render_path'], {}), '(render_path)\n', (6541, 6554), True, 'import matplotlib.pyplot as plt\n'), ((6559, 6573), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (6568, 6573), True, 'import matplotlib.pyplot as plt\n'), ((13860, 13881), 'pandas.DataFrame', 'pd.DataFrame', (['records'], {}), '(records)\n', (13872, 13881), True, 'import pandas as pd\n'), ((14010, 14143), 'azure.storage.blob.BlobClient', 'BlobClient', ([], {'account_url': 'account_url', 'container_name': 'facts_container', 'blob_name': 'blob_id', 'snapshot': 'None', 'credential': 'facts_sas_token'}), '(account_url=account_url, container_name=facts_container,\n blob_name=blob_id, snapshot=None, credential=facts_sas_token)\n', (14020, 14143), False, 'from azure.storage.blob import ContainerClient, BlobClient\n'), ((16155, 16193), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (16167, 16193), False, 'import cv2\n'), ((16302, 16335), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(20, 20)'}), '(1, figsize=(20, 20))\n', (16314, 16335), True, 'import matplotlib.pyplot as plt\n'), ((17817, 17827), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17825, 17827), True, 'import matplotlib.pyplot as plt\n'), ((18083, 18121), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (18095, 18121), False, 'import cv2\n'), ((18352, 18385), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {'figsize': '(20, 20)'}), '(1, figsize=(20, 20))\n', (18364, 18385), True, 'import matplotlib.pyplot as plt\n'), ((20661, 20671), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20669, 20671), True, 'import matplotlib.pyplot as plt\n'), ((20907, 20948), 'numpy.zeros', 'np.zeros', (['image.shape[:2]'], {'dtype': 'np.float'}), '(image.shape[:2], dtype=np.float)\n', (20915, 20948), True, 'import numpy as np\n'), ((21487, 21520), 'cv2.filter2D', 'cv2.filter2D', (['heatmap', '(-1)', 'kernel'], {}), '(heatmap, -1, kernel)\n', (21499, 21520), False, 'import cv2\n'), ((21887, 21915), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (21897, 21915), True, 'import matplotlib.pyplot as plt\n'), ((21920, 21935), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (21928, 21935), True, 'import matplotlib.pyplot as plt\n'), ((21963, 21980), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (21973, 21980), True, 'import matplotlib.pyplot as plt\n'), ((22277, 22300), 'numpy.arange', 'np.arange', (['(0)', '(1.0)', '(0.01)'], {}), '(0, 1.0, 0.01)\n', (22286, 22300), True, 'import numpy as np\n'), ((23439, 23459), 'numpy.max', 'np.max', (['IoUs'], {'axis': '(1)'}), '(IoUs, axis=1)\n', (23445, 23459), True, 'import numpy as np\n'), ((24618, 24644), 'imageio.imread', 'imageio.imread', (['image_path'], {}), '(image_path)\n', (24632, 24644), False, 'import imageio\n'), ((24749, 24771), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (24759, 24771), False, 'import cv2\n'), ((24784, 24822), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (24796, 24822), False, 'import cv2\n'), ((25183, 25205), 'random.shuffle', 'random.shuffle', (['colors'], {}), '(colors)\n', (25197, 25205), False, 'import random\n'), ((25436, 25469), 'numpy.array', 'np.array', (['contour'], {'dtype': 'np.int32'}), '(contour, dtype=np.int32)\n', (25444, 25469), True, 'import numpy as np\n'), ((25474, 25520), 'cv2.drawContours', 'cv2.drawContours', (['mask', '[contour]', '(-1)', '(255)', '(-1)'], {}), '(mask, [contour], -1, 255, -1)\n', (25490, 25520), False, 'import cv2\n'), ((25629, 25662), 'numpy.array', 'np.array', (['contour'], {'dtype': 'np.int32'}), '(contour, dtype=np.int32)\n', (25637, 25662), True, 'import numpy as np\n'), ((25667, 25713), 'cv2.drawContours', 'cv2.drawContours', (['mask', '[contour]', '(-1)', '(255)', '(-1)'], {}), '(mask, [contour], -1, 255, -1)\n', (25683, 25713), False, 'import cv2\n'), ((26325, 26348), 'numpy.stack', 'np.stack', (['masks'], {'axis': '(2)'}), '(masks, axis=2)\n', (26333, 26348), True, 'import numpy as np\n'), ((27445, 27471), 'numpy.fromstring', 'np.fromstring', (['z', 'np.uint8'], {}), '(z, np.uint8)\n', (27458, 27471), True, 'import numpy as np\n'), ((4633, 4651), 'json.dump', 'json.dump', (['dump', 'f'], {}), '(dump, f)\n', (4642, 4651), False, 'import json\n'), ((5312, 5324), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5321, 5324), False, 'import json\n'), ((5877, 5995), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(x, y)', 'w', 'h'], {'linewidth': '(2)', 'alpha': '(0.7)', 'linestyle': '"""dashed"""', 'edgecolor': 'colors[i]', 'facecolor': '"""none"""'}), "((x, y), w, h, linewidth=2, alpha=0.7, linestyle='dashed',\n edgecolor=colors[i], facecolor='none')\n", (5894, 5995), False, 'from matplotlib import patches\n'), ((6187, 6273), 'matplotlib.patches.Polygon', 'Polygon', (['polygon'], {'facecolor': '"""none"""', 'edgecolor': 'colors[i]', 'linewidth': 'None', 'fill': '(True)'}), "(polygon, facecolor='none', edgecolor=colors[i], linewidth=None,\n fill=True)\n", (6194, 6273), False, 'from matplotlib.patches import Polygon\n'), ((7860, 7884), 'json.dump', 'json.dump', (['dump_dict', 'fp'], {}), '(dump_dict, fp)\n', (7869, 7884), False, 'import json\n'), ((8060, 8073), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (8069, 8073), False, 'import json\n'), ((16277, 16289), 'json.load', 'json.load', (['f'], {}), '(f)\n', (16286, 16289), False, 'import json\n'), ((18220, 18232), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18229, 18232), False, 'import json\n'), ((18327, 18339), 'json.load', 'json.load', (['f'], {}), '(f)\n', (18336, 18339), False, 'import json\n'), ((21440, 21469), 'numpy.ones', 'np.ones', (['(10, 10)', 'np.float32'], {}), '((10, 10), np.float32)\n', (21447, 21469), True, 'import numpy as np\n'), ((25980, 26016), 'numpy.zeros', 'np.zeros', (['image_shape'], {'dtype': 'np.int8'}), '(image_shape, dtype=np.int8)\n', (25988, 26016), True, 'import numpy as np\n'), ((26924, 26950), 'random.shuffle', 'random.shuffle', (['sample_ids'], {}), '(sample_ids)\n', (26938, 26950), False, 'import random\n'), ((27416, 27435), 'base64.b64decode', 'base64.b64decode', (['s'], {}), '(s)\n', (27432, 27435), False, 'import base64\n'), ((7465, 7478), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (7474, 7478), False, 'import json\n'), ((14703, 14735), 'os.path.exists', 'os.path.exists', (['image_local_path'], {}), '(image_local_path)\n', (14717, 14735), False, 'import os\n'), ((14823, 14854), 'os.path.exists', 'os.path.exists', (['json_local_path'], {}), '(json_local_path)\n', (14837, 14854), False, 'import os\n'), ((15574, 15606), 'os.path.exists', 'os.path.exists', (['image_local_path'], {}), '(image_local_path)\n', (15588, 15606), False, 'import os\n'), ((15694, 15727), 'os.path.exists', 'os.path.exists', (['people_local_path'], {}), '(people_local_path)\n', (15708, 15727), False, 'import os\n'), ((15828, 15859), 'os.path.exists', 'os.path.exists', (['poly_local_path'], {}), '(poly_local_path)\n', (15842, 15859), False, 'import os\n'), ((17016, 17148), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(x1, y1)', '(x2 - x1)', '(y2 - y1)'], {'linewidth': '(2)', 'alpha': '(0.7)', 'linestyle': '"""dashed"""', 'edgecolor': 'colors[i]', 'facecolor': '"""none"""'}), "((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=0.7,\n linestyle='dashed', edgecolor=colors[i], facecolor='none')\n", (17033, 17148), False, 'from matplotlib import patches\n'), ((17427, 17537), 'matplotlib.patches.Polygon', 'Polygon', (["instance['points']['exterior']"], {'facecolor': '"""none"""', 'edgecolor': 'colors[i]', 'linewidth': 'None', 'fill': '(True)'}), "(instance['points']['exterior'], facecolor='none', edgecolor=colors[\n i], linewidth=None, fill=True)\n", (17434, 17537), False, 'from matplotlib.patches import Polygon\n'), ((19256, 19388), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(x1, y1)', '(x2 - x1)', '(y2 - y1)'], {'linewidth': '(2)', 'alpha': '(0.7)', 'linestyle': '"""dashed"""', 'edgecolor': 'colors[i]', 'facecolor': '"""none"""'}), "((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=0.7,\n linestyle='dashed', edgecolor=colors[i], facecolor='none')\n", (19273, 19388), False, 'from matplotlib import patches\n'), ((20125, 20156), 'numpy.squeeze', 'np.squeeze', (['contours[0]'], {'axis': '(1)'}), '(contours[0], axis=1)\n', (20135, 20156), True, 'import numpy as np\n'), ((21154, 21166), 'json.load', 'json.load', (['f'], {}), '(f)\n', (21163, 21166), False, 'import json\n'), ((4204, 4224), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (4219, 4224), False, 'import cv2\n'), ((6141, 6158), 'numpy.array', 'np.array', (['polygon'], {}), '(polygon)\n', (6149, 6158), True, 'import numpy as np\n'), ((10354, 10410), 'datetime.datetime.strptime', 'datetime.strptime', (['date_time_string', '"""%Y_%m_%d_%H_%M_%S"""'], {}), "(date_time_string, '%Y_%m_%d_%H_%M_%S')\n", (10371, 10410), False, 'from datetime import datetime\n'), ((21285, 21323), 'numpy.zeros', 'np.zeros', (['(1024, 1280)'], {'dtype': 'np.float'}), '((1024, 1280), dtype=np.float)\n', (21293, 21323), True, 'import numpy as np\n'), ((21561, 21576), 'numpy.max', 'np.max', (['heatmap'], {}), '(heatmap)\n', (21567, 21576), True, 'import numpy as np\n'), ((21757, 21772), 'numpy.max', 'np.max', (['heatmap'], {}), '(heatmap)\n', (21763, 21772), True, 'import numpy as np\n'), ((25148, 25171), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*c'], {}), '(*c)\n', (25167, 25171), False, 'import colorsys\n'), ((27483, 27520), 'cv2.imdecode', 'cv2.imdecode', (['n', 'cv2.IMREAD_UNCHANGED'], {}), '(n, cv2.IMREAD_UNCHANGED)\n', (27495, 27520), False, 'import cv2\n'), ((11356, 11412), 'datetime.datetime.strptime', 'datetime.strptime', (['date_time_string', '"""%Y-%m-%d-%H-%M-%S"""'], {}), "(date_time_string, '%Y-%m-%d-%H-%M-%S')\n", (11373, 11412), False, 'from datetime import datetime\n')] |
# -*- coding: UTF-8 -*-
"""
此脚本用于展示使用惩罚项解决模型幻觉的问题
"""
import os
import numpy as np
import statsmodels.api as sm
from sklearn import linear_model
import matplotlib.pyplot as plt
import pandas as pd
def read_data(path):
"""
使用pandas读取数据
"""
data = pd.read_csv(path)
return data
def generate_random_var():
"""
生成不相关的特征
"""
np.random.seed(4873)
return np.random.randint(2, size=20)
def train_model(x, y, alpha):
"""
训练模型
"""
# 数据里面已经包含常变量,所以fit_intercept=False
model = linear_model.Lasso(alpha=alpha, fit_intercept=False)
model.fit(x, y)
return model
def visualize_model(X, Y, alphas, coefs):
"""
模型可视化
"""
# 创建一个图形框
fig = plt.figure(figsize=(6, 6), dpi=80)
# 在图形框里只画一幅图
ax = fig.add_subplot(1, 1, 1)
ax.plot(alphas, coefs[:, 1], "r:", label=u'%s' % "a")
ax.plot(alphas, coefs[:, 2], "g", label=u'%s' % "b")
ax.plot(alphas, coefs[:, 0], "b-.", label=u'%s' % "c")
legend = plt.legend(loc=4, shadow=True)
legend.get_frame().set_facecolor("#6F93AE")
ax.set_yticks(np.arange(-1, 1.3, 0.3))
ax.set_xscale("log")
ax.set_xlabel("$alpha$")
plt.show()
def run_model(data):
"""
运行模型
"""
features = ["x"]
labels = ["y"]
Y = data[labels]
X = data[features]
# 加入新的随机变量,这个变量的系数应为0
X["z"] = generate_random_var()
# 加入常变量const
X = sm.add_constant(X)
alphas = np.logspace(-4, -0.5, 100)
coefs = []
for alpha in alphas:
model = train_model(X, Y, alpha)
coefs.append(model.coef_)
coefs = np.array(coefs)
# 可视化惩罚项效果
visualize_model(X, Y, alphas, coefs)
if __name__ == "__main__":
home_path = os.path.dirname(os.path.abspath(__file__))
# Windows下的存储路径与Linux并不相同
if os.name == "nt":
data_path = "%s\\simple_example.csv" % home_path
else:
data_path = "%s/simple_example.csv" % home_path
data = read_data(data_path)
run_model(data) | [
"sklearn.linear_model.Lasso",
"pandas.read_csv",
"numpy.arange",
"numpy.array",
"numpy.random.randint",
"matplotlib.pyplot.figure",
"statsmodels.api.add_constant",
"numpy.random.seed",
"os.path.abspath",
"numpy.logspace",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((267, 284), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (278, 284), True, 'import pandas as pd\n'), ((363, 383), 'numpy.random.seed', 'np.random.seed', (['(4873)'], {}), '(4873)\n', (377, 383), True, 'import numpy as np\n'), ((395, 424), 'numpy.random.randint', 'np.random.randint', (['(2)'], {'size': '(20)'}), '(2, size=20)\n', (412, 424), True, 'import numpy as np\n'), ((534, 586), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', ([], {'alpha': 'alpha', 'fit_intercept': '(False)'}), '(alpha=alpha, fit_intercept=False)\n', (552, 586), False, 'from sklearn import linear_model\n'), ((718, 752), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)', 'dpi': '(80)'}), '(figsize=(6, 6), dpi=80)\n', (728, 752), True, 'import matplotlib.pyplot as plt\n'), ((991, 1021), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)', 'shadow': '(True)'}), '(loc=4, shadow=True)\n', (1001, 1021), True, 'import matplotlib.pyplot as plt\n'), ((1171, 1181), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1179, 1181), True, 'import matplotlib.pyplot as plt\n'), ((1400, 1418), 'statsmodels.api.add_constant', 'sm.add_constant', (['X'], {}), '(X)\n', (1415, 1418), True, 'import statsmodels.api as sm\n'), ((1432, 1458), 'numpy.logspace', 'np.logspace', (['(-4)', '(-0.5)', '(100)'], {}), '(-4, -0.5, 100)\n', (1443, 1458), True, 'import numpy as np\n'), ((1586, 1601), 'numpy.array', 'np.array', (['coefs'], {}), '(coefs)\n', (1594, 1601), True, 'import numpy as np\n'), ((1088, 1111), 'numpy.arange', 'np.arange', (['(-1)', '(1.3)', '(0.3)'], {}), '(-1, 1.3, 0.3)\n', (1097, 1111), True, 'import numpy as np\n'), ((1719, 1744), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1734, 1744), False, 'import os\n')] |
import pytz
import sys
import numpy as np
from datetime import datetime
from metrics import utils
QUERY_CONTENT = '*'
# return a list of throughputs computed per call
def get_service_throughput_per_hit(service, computation_timestamp, time_window):
print(service,file=sys.stderr)
query_ids = QUERY_CONTENT + f' AND request.operationID:{service} AND @timestamp:{time_window}'
res = utils.es_query(query=query_ids)
total_hits = res['hits']['total']
res = utils.es_query(query=query_ids, size=total_hits)
throughput_dict = {}
for hit in res['hits']['hits']:
blueprint_id = utils.get_blueprint_id()
vdc_instance_id = utils.extract_vdc_id(hit['_index'], '-')
source = hit['_source']
request_id = source['request.id']
operation_id = source['request.operationID']
if blueprint_id not in throughput_dict.keys():
throughput_dict[blueprint_id] = {}
if request_id not in throughput_dict[blueprint_id].keys():
throughput_dict[blueprint_id][request_id] = {"BluePrint-ID": blueprint_id,
"VDC-Instance-ID": vdc_instance_id,
"Operation-ID": operation_id,
'Request-ID': request_id,
'response-length': 0,
'request-time': 0,
"hit-timestamp": source['@timestamp']
}
if 'response.length' in source:
throughput_dict[blueprint_id][request_id]['response-length'] += source['response.length']
if 'request.requestTime' in source:
throughput_dict[blueprint_id][request_id]['request-time'] += source['request.requestTime']
throughputs = []
for bp_id in throughput_dict.keys():
for throughput in throughput_dict[bp_id].values():
blueprint_id = throughput['BluePrint-ID']
operation_id = throughput['Operation-ID']
vdc_instance_id = throughput['VDC-Instance-ID']
request_id = throughput['Request-ID']
length = throughput['response-length']
time = throughput['request-time']
timestamp = throughput['hit-timestamp']
value = 0
if time > 0 and length > 0:
value = (length / (time / 1000000000))/ (1024 * 1024)
metric_per_hit = {"BluePrint-ID": blueprint_id,
"VDC-Instance-ID": vdc_instance_id,
"Operation-ID": operation_id,
"Request-ID": request_id,
"metric": "throughput",
"unit": "MB/s",
"value": float(value),
"hit-timestamp": timestamp,
"@timestamp": computation_timestamp
}
throughputs.append(metric_per_hit)
return throughputs
def get_throughput_per_bp_and_method(computation_timestamp, time_window, method=''):
services = utils.get_services()
aggregate_throughputs = []
now_ts = datetime.now(pytz.utc)
for service in services:
if method == '' or method == service:
throughputs = get_service_throughput_per_hit(service, computation_timestamp, time_window)
aggregate_throughputs_per_service = {}
infos_per_service = {}
for throughput in throughputs:
bp_id = throughput['BluePrint-ID']
if bp_id not in aggregate_throughputs_per_service.keys():
aggregate_throughputs_per_service[bp_id] = []
infos_per_service[bp_id] = {'oldest_ts': now_ts, 'hits': 0}
aggregate_throughputs_per_service[bp_id].append(throughput['value'])
# Here take the timestamp of the hit: if ts < oldest_ts then oldest_ts = ts
ts = utils.parse_timestamp(throughput['hit-timestamp'])
if ts < infos_per_service[bp_id]['oldest_ts']:
infos_per_service[bp_id]['oldest_ts'] = ts
# Update the number of hit
infos_per_service[bp_id]['hits'] += 1
for bp_id in aggregate_throughputs_per_service.keys():
# Delta is computed from now to the oldest hit found
delta = (now_ts - infos_per_service[bp_id]['oldest_ts']).total_seconds() / 60
dict = {
'method': service,
'BluePrint-ID': bp_id,
'mean': np.array(aggregate_throughputs_per_service[bp_id]).mean(),
'min': np.array(aggregate_throughputs_per_service[bp_id]).min(),
'max': np.array(aggregate_throughputs_per_service[bp_id]).max(),
'metric': 'throughput',
'unit': 'MB/s',
"@timestamp": computation_timestamp,
'delta': delta,
'delta_unit': 'minutes',
'hits': infos_per_service[bp_id]['hits']
}
aggregate_throughputs.append(dict)
return aggregate_throughputs
def all_throughput_of_minutes(minutes):
timestamp, time_window = utils.get_timestamp_timewindow(minutes)
timestamp, time_window = '2016-06-20T22:28:46', '[2018-06-20T22:28:46 TO 2020-06-20T22:36:41]'
# Read list of services, of which to compute the metric
services = utils.get_services()
ret_dict = {}
for service in services:
ret_dict[service] = get_service_throughput_per_hit(service, timestamp, time_window)
return ret_dict
def service_throughput_of_minutes(service, minutes):
# timestamp, time_window = get_timestamp_timewindow(minutes)
timestamp, time_window = '2016-06-20T22:28:46', '[2018-06-20T22:28:46 TO 2020-06-20T22:36:41]'
ret_dict = {service: get_service_throughput_per_hit(service, timestamp, time_window, minutes)}
return ret_dict
| [
"metrics.utils.es_query",
"metrics.utils.parse_timestamp",
"metrics.utils.extract_vdc_id",
"datetime.datetime.now",
"numpy.array",
"metrics.utils.get_services",
"metrics.utils.get_blueprint_id",
"metrics.utils.get_timestamp_timewindow"
] | [((395, 426), 'metrics.utils.es_query', 'utils.es_query', ([], {'query': 'query_ids'}), '(query=query_ids)\n', (409, 426), False, 'from metrics import utils\n'), ((475, 523), 'metrics.utils.es_query', 'utils.es_query', ([], {'query': 'query_ids', 'size': 'total_hits'}), '(query=query_ids, size=total_hits)\n', (489, 523), False, 'from metrics import utils\n'), ((3309, 3329), 'metrics.utils.get_services', 'utils.get_services', ([], {}), '()\n', (3327, 3329), False, 'from metrics import utils\n'), ((3375, 3397), 'datetime.datetime.now', 'datetime.now', (['pytz.utc'], {}), '(pytz.utc)\n', (3387, 3397), False, 'from datetime import datetime\n'), ((5495, 5534), 'metrics.utils.get_timestamp_timewindow', 'utils.get_timestamp_timewindow', (['minutes'], {}), '(minutes)\n', (5525, 5534), False, 'from metrics import utils\n'), ((5709, 5729), 'metrics.utils.get_services', 'utils.get_services', ([], {}), '()\n', (5727, 5729), False, 'from metrics import utils\n'), ((609, 633), 'metrics.utils.get_blueprint_id', 'utils.get_blueprint_id', ([], {}), '()\n', (631, 633), False, 'from metrics import utils\n'), ((660, 700), 'metrics.utils.extract_vdc_id', 'utils.extract_vdc_id', (["hit['_index']", '"""-"""'], {}), "(hit['_index'], '-')\n", (680, 700), False, 'from metrics import utils\n'), ((4174, 4224), 'metrics.utils.parse_timestamp', 'utils.parse_timestamp', (["throughput['hit-timestamp']"], {}), "(throughput['hit-timestamp'])\n", (4195, 4224), False, 'from metrics import utils\n'), ((4814, 4864), 'numpy.array', 'np.array', (['aggregate_throughputs_per_service[bp_id]'], {}), '(aggregate_throughputs_per_service[bp_id])\n', (4822, 4864), True, 'import numpy as np\n'), ((4900, 4950), 'numpy.array', 'np.array', (['aggregate_throughputs_per_service[bp_id]'], {}), '(aggregate_throughputs_per_service[bp_id])\n', (4908, 4950), True, 'import numpy as np\n'), ((4985, 5035), 'numpy.array', 'np.array', (['aggregate_throughputs_per_service[bp_id]'], {}), '(aggregate_throughputs_per_service[bp_id])\n', (4993, 5035), True, 'import numpy as np\n')] |
import numpy as np
def lonlat2km(lon1,lat1,lon2,lat2):
con=radians(lat1)
ymeter=111132.92-559.8*np.cos(2*con)+1.175*np.cos(4*con)-0.0023*np.cos(6*con)
xmeter=111412.84*np.cos(con)-93.5*np.cos(3*con)+0.0118*np.cos(5*con)
east=(lon2-lon1)*xmeter/1000
north=(lat2-lat1)*ymeter/1000
return east,north
def radians(d):
r=d*np.pi/180
return r
| [
"numpy.cos"
] | [((148, 163), 'numpy.cos', 'np.cos', (['(6 * con)'], {}), '(6 * con)\n', (154, 163), True, 'import numpy as np\n'), ((222, 237), 'numpy.cos', 'np.cos', (['(5 * con)'], {}), '(5 * con)\n', (228, 237), True, 'import numpy as np\n'), ((127, 142), 'numpy.cos', 'np.cos', (['(4 * con)'], {}), '(4 * con)\n', (133, 142), True, 'import numpy as np\n'), ((184, 195), 'numpy.cos', 'np.cos', (['con'], {}), '(con)\n', (190, 195), True, 'import numpy as np\n'), ((201, 216), 'numpy.cos', 'np.cos', (['(3 * con)'], {}), '(3 * con)\n', (207, 216), True, 'import numpy as np\n'), ((107, 122), 'numpy.cos', 'np.cos', (['(2 * con)'], {}), '(2 * con)\n', (113, 122), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
from pycrazyswarm import *
def test_yaml_string_load():
crazyflies_yaml = """
crazyflies:
- channel: 100
id: 1
initialPosition: [1.0, 0.0, 0.0]
- channel: 100
id: 10
initialPosition: [0.0, -1.0, 0.0]
"""
swarm = Crazyswarm(crazyflies_yaml=crazyflies_yaml, args="--sim --vis null")
timeHelper = swarm.timeHelper
cfs = swarm.allcfs.crazyflies
byId = swarm.allcfs.crazyfliesById
assert len(cfs) == 2
cf1 = byId[1]
assert np.all(cf1.initialPosition == [1.0, 0.0, 0.0])
cf10 = byId[10]
assert np.all(cf10.initialPosition == [0.0, -1.0, 0.0])
| [
"numpy.all"
] | [((536, 582), 'numpy.all', 'np.all', (['(cf1.initialPosition == [1.0, 0.0, 0.0])'], {}), '(cf1.initialPosition == [1.0, 0.0, 0.0])\n', (542, 582), True, 'import numpy as np\n'), ((615, 663), 'numpy.all', 'np.all', (['(cf10.initialPosition == [0.0, -1.0, 0.0])'], {}), '(cf10.initialPosition == [0.0, -1.0, 0.0])\n', (621, 663), True, 'import numpy as np\n')] |
import numpy as np
import netket as nk
import sys
from shutil import move
import mpi4py.MPI as mpi
import symmetries
L = 150
msr = True
rank = mpi.COMM_WORLD.Get_rank()
if rank == 0:
with open("result.txt", "w") as fl:
fl.write("L, energy (real), energy (imag), energy_error\n")
g = nk.graph.Hypercube(length=L, n_dim=1, pbc=True)
# Spin based Hilbert Space
hi = nk.hilbert.Spin(s=0.5, total_sz=0.0, N=g.n_nodes)
ha = nk.custom.J1J2(g, J2=0.0, msr=msr)
transl = nk.custom.get_symms_chain(L)
ma = nk.machine.JastrowSymm(hi, transl)
ma.init_random_parameters(seed=1234)
# Optimizer
op = nk.optimizer.Sgd(ma, learning_rate=0.02)
# Sampler
sa = nk.sampler.MetropolisExchange(machine=ma,graph=g,d_max=L,n_chains=1)
sa.reset(True)
ma.parameters = np.load("/home/mmm0475/data/vGPS/heisenberg1D/Heisenberg_Jastrow_netket_1D_150_sites/Heisenberg_Jastrow_netket_1D_150_sites_409060/best_parameters.npy")
est = nk.variational.estimate_expectations(ha, sa, 1000000//mpi.COMM_WORLD.size, n_discard=200)
if mpi.COMM_WORLD.Get_rank() == 0:
with open("result.txt", "a") as fl:
fl.write("{} {} {} {}\n".format(L, np.real(est.mean), np.imag(est.mean), est.error_of_mean))
| [
"netket.custom.J1J2",
"netket.graph.Hypercube",
"numpy.imag",
"netket.optimizer.Sgd",
"netket.hilbert.Spin",
"numpy.real",
"netket.custom.get_symms_chain",
"netket.machine.JastrowSymm",
"netket.variational.estimate_expectations",
"numpy.load",
"mpi4py.MPI.COMM_WORLD.Get_rank",
"netket.sampler.... | [((145, 170), 'mpi4py.MPI.COMM_WORLD.Get_rank', 'mpi.COMM_WORLD.Get_rank', ([], {}), '()\n', (168, 170), True, 'import mpi4py.MPI as mpi\n'), ((299, 346), 'netket.graph.Hypercube', 'nk.graph.Hypercube', ([], {'length': 'L', 'n_dim': '(1)', 'pbc': '(True)'}), '(length=L, n_dim=1, pbc=True)\n', (317, 346), True, 'import netket as nk\n'), ((380, 429), 'netket.hilbert.Spin', 'nk.hilbert.Spin', ([], {'s': '(0.5)', 'total_sz': '(0.0)', 'N': 'g.n_nodes'}), '(s=0.5, total_sz=0.0, N=g.n_nodes)\n', (395, 429), True, 'import netket as nk\n'), ((436, 470), 'netket.custom.J1J2', 'nk.custom.J1J2', (['g'], {'J2': '(0.0)', 'msr': 'msr'}), '(g, J2=0.0, msr=msr)\n', (450, 470), True, 'import netket as nk\n'), ((481, 509), 'netket.custom.get_symms_chain', 'nk.custom.get_symms_chain', (['L'], {}), '(L)\n', (506, 509), True, 'import netket as nk\n'), ((516, 550), 'netket.machine.JastrowSymm', 'nk.machine.JastrowSymm', (['hi', 'transl'], {}), '(hi, transl)\n', (538, 550), True, 'import netket as nk\n'), ((606, 646), 'netket.optimizer.Sgd', 'nk.optimizer.Sgd', (['ma'], {'learning_rate': '(0.02)'}), '(ma, learning_rate=0.02)\n', (622, 646), True, 'import netket as nk\n'), ((663, 734), 'netket.sampler.MetropolisExchange', 'nk.sampler.MetropolisExchange', ([], {'machine': 'ma', 'graph': 'g', 'd_max': 'L', 'n_chains': '(1)'}), '(machine=ma, graph=g, d_max=L, n_chains=1)\n', (692, 734), True, 'import netket as nk\n'), ((764, 926), 'numpy.load', 'np.load', (['"""/home/mmm0475/data/vGPS/heisenberg1D/Heisenberg_Jastrow_netket_1D_150_sites/Heisenberg_Jastrow_netket_1D_150_sites_409060/best_parameters.npy"""'], {}), "(\n '/home/mmm0475/data/vGPS/heisenberg1D/Heisenberg_Jastrow_netket_1D_150_sites/Heisenberg_Jastrow_netket_1D_150_sites_409060/best_parameters.npy'\n )\n", (771, 926), True, 'import numpy as np\n'), ((924, 1019), 'netket.variational.estimate_expectations', 'nk.variational.estimate_expectations', (['ha', 'sa', '(1000000 // mpi.COMM_WORLD.size)'], {'n_discard': '(200)'}), '(ha, sa, 1000000 // mpi.COMM_WORLD.size,\n n_discard=200)\n', (960, 1019), True, 'import netket as nk\n'), ((1018, 1043), 'mpi4py.MPI.COMM_WORLD.Get_rank', 'mpi.COMM_WORLD.Get_rank', ([], {}), '()\n', (1041, 1043), True, 'import mpi4py.MPI as mpi\n'), ((1136, 1153), 'numpy.real', 'np.real', (['est.mean'], {}), '(est.mean)\n', (1143, 1153), True, 'import numpy as np\n'), ((1155, 1172), 'numpy.imag', 'np.imag', (['est.mean'], {}), '(est.mean)\n', (1162, 1172), True, 'import numpy as np\n')] |
from pyexpat import features
import numpy as np
from src.io import npy_events_tools
from src.io import psee_loader
import tqdm
import os
from numpy.lib import recfunctions as rfn
import torch
import time
import math
import argparse
def generate_agile_event_volume_cuda(events, shape, events_window = 50000, volume_bins=5):
H, W = shape
x, y, t, p = events.unbind(-1)
x, y, p = x.long(), y.long(), p.long()
t_star = (volume_bins * t.float())[:,None,None]
channels = volume_bins
adder = torch.stack([torch.arange(channels),torch.arange(channels)],dim = 1).to(x.device)[None,:,:] + 1 #1, 2, 2
adder = (1 - torch.abs(adder-t_star)) * torch.stack([p,1 - p],dim=1)[:,None,:] #n, 2, 2
adder = torch.where(adder>=0,adder,torch.zeros_like(adder)).view(adder.shape[0], channels * 2) #n, 4
img = torch.zeros((H * W, volume_bins * 2)).float().to(x.device)
img.index_add_(0, x + W * y, adder)
img = img.view(H * W, volume_bins, 2)
img_viewed = img.view((H, W, img.shape[1] * 2)).permute(2, 0, 1).contiguous()
# print(torch.quantile(img_viewed[img_viewed>0],0.95))
img_viewed = img_viewed / 5 * 255
return img_viewed
def denseToSparse(dense_tensor):
"""
Converts a dense tensor to a sparse vector.
:param dense_tensor: BatchSize x SpatialDimension_1 x SpatialDimension_2 x ... x FeatureDimension
:return locations: NumberOfActive x (SumSpatialDimensions + 1). The + 1 includes the batch index
:return features: NumberOfActive x FeatureDimension
"""
non_zero_indices = np.nonzero(dense_tensor)
features = dense_tensor[non_zero_indices[0],non_zero_indices[1],non_zero_indices[2]]
return np.stack(non_zero_indices), features
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='visualize one or several event files along with their boxes')
parser.add_argument('-raw_dir', type=str)
parser.add_argument('-target_dir', type=str)
parser.add_argument('-dataset', type=str, default="gen1")
parser.add_argument('-time_window', type=int, default=50000)
parser.add_argument('-event_volume_bins', type=int, default=5)
args = parser.parse_args()
raw_dir = args.raw_dir
target_dir = args.target_dir
dataset = args.dataset
if dataset == "gen4":
shape = [720,1280]
target_shape = [512, 640]
elif dataset == "kitti":
shape = [375,1242]
target_shape = [192, 640]
else:
shape = [240,304]
target_shape = [256, 320]
# event_volume_bins = [[5, 1, 1], [5, 1]]
# time_windows = [50000, 300000]
# time_steps = [[16, 1, 1], [1, 1]]
# cats = [["ev", "ev", "ts"], ["ev", "ts"]]
# time_step = 10000
rh = target_shape[0] / shape[0]
rw = target_shape[1] / shape[1]
time_window = args.time_window
#time_steps = [[1]]
event_volume_bins = args.event_volume_bins
time_step = 10000
#target_dirs = [os.path.join(target_dir, cat) for cat in ["normal", "long", "short", "ts_short", "ts_long"]]
#target_dirs = [os.path.join(target_dir, cat) for cat in ["normal"]]
#target_dir = os.path.join(target_dir, "normal")
if not os.path.exists(raw_dir):
os.makedirs(raw_dir)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
for mode in ["train","val","test"]:
file_dir = os.path.join(raw_dir, mode)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
root = file_dir
try:
files = os.listdir(file_dir)
except Exception:
continue
files = [time_seq_name[:-7] for time_seq_name in files
if time_seq_name[-3:] == 'dat']
pbar = tqdm.tqdm(total=len(files), unit='File', unit_scale=True)
target_root = os.path.join(target_dir, mode)
if not os.path.exists(target_root):
os.makedirs(target_root)
# Remove duplicates (.npy and .dat)
# files = files[int(2*len(files)/3):]
#files = files[int(len(files)/3):]
for i_file, file_name in enumerate(files):
# if not file_name == "moorea_2019-06-26_test_02_000_1708500000_1768500000":
# continue
event_file = os.path.join(root, file_name + '_td.dat')
bbox_file = os.path.join(root, file_name + '_bbox.npy')
f_bbox = open(bbox_file, "rb")
start, v_type, ev_size, size, dtype = npy_events_tools.parse_header(f_bbox)
dat_bbox = np.fromfile(f_bbox, dtype=v_type, count=-1)
f_bbox.close()
unique_ts, unique_indices = np.unique(dat_bbox['t'], return_index=True)
f_event = psee_loader.PSEELoader(event_file)
for bbox_count,unique_time in enumerate(unique_ts):
volume_save_path = os.path.join(target_root, file_name+"_"+str(unique_time)+".npy")
if os.path.exists(volume_save_path):
continue
end_time = int(unique_time)
end_count = f_event.seek_time(end_time)
if end_count is None:
break
start_time = end_time - time_window
dat_event = f_event
if start_time > 0:
dat_event.seek_time(start_time)
events = dat_event.load_delta_t(end_time - start_time)
else:
dat_event.seek_time(0)
events = dat_event.load_delta_t(end_time)
del dat_event
events = torch.from_numpy(rfn.structured_to_unstructured(events)[:, [1, 2, 0, 3]].astype(float)).cuda()
events[:,2] = (events[:,2] - start_time) / time_window
if target_shape[0] < shape[0]:
events[:,0] = events[:,0] * rw
events[:,1] = events[:,1] * rh
volume = generate_agile_event_volume_cuda(events, target_shape, time_window, event_volume_bins)
else:
volume = generate_agile_event_volume_cuda(events, shape, time_window, event_volume_bins)
volume = torch.nn.functional.interpolate(volume[None,:,:,:], size = target_shape, mode='nearest')[0]
volume = volume.cpu().numpy()
volume = np.where(volume > 255, 255, volume)
volume = volume.astype(np.uint8)
locations, features = denseToSparse(volume)
c, y, x = locations
p = c%2
c = (c/2).astype(int)
volume = x.astype(np.uint32) + np.left_shift(y.astype(np.uint32), 10) + np.left_shift(c.astype(np.uint32), 19) + np.left_shift(p.astype(np.uint32), 22) + np.left_shift(features.astype(np.uint32), 23)
x = np.bitwise_and(volume, 1023).astype(int)
y = np.right_shift(np.bitwise_and(volume, 523264), 10).astype(int)
c = np.right_shift(np.bitwise_and(volume, 3670016), 19).astype(int)
p = np.right_shift(np.bitwise_and(volume, 4194304), 22).astype(int)
features = np.right_shift(np.bitwise_and(volume, 2139095040), 23).astype(int)
events = np.stack([x, y, c, p, features], axis=1)
volume.tofile(volume_save_path)
#features.tofile(volume_save_path_f)
#np.savez(volume_save_path, locations = locations, features = features)
#h5.create_dataset(str(unique_time)+"/locations", data=locations)
#h5.create_dataset(str(unique_time)+"/features", data=features)
torch.cuda.empty_cache()
#h5.close()
pbar.update(1)
pbar.close()
# if mode == "test":
# np.save(os.path.join(root, 'total_volume_time.npy'),np.array(total_volume_time))
# np.save(os.path.join(root, 'total_taf_time.npy'),np.array(total_taf_time))
#h5.close() | [
"numpy.fromfile",
"torch.nn.functional.interpolate",
"torch.arange",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"numpy.where",
"numpy.stack",
"torch.zeros_like",
"pyexpat.features.astype",
"src.io.psee_loader.PSEELoader",
"torch.abs",
"numpy.nonzero",
"torch.cuda.empty_cach... | [((1558, 1582), 'numpy.nonzero', 'np.nonzero', (['dense_tensor'], {}), '(dense_tensor)\n', (1568, 1582), True, 'import numpy as np\n'), ((1763, 1866), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""visualize one or several event files along with their boxes"""'}), "(description=\n 'visualize one or several event files along with their boxes')\n", (1786, 1866), False, 'import argparse\n'), ((1685, 1711), 'numpy.stack', 'np.stack', (['non_zero_indices'], {}), '(non_zero_indices)\n', (1693, 1711), True, 'import numpy as np\n'), ((3175, 3198), 'os.path.exists', 'os.path.exists', (['raw_dir'], {}), '(raw_dir)\n', (3189, 3198), False, 'import os\n'), ((3208, 3228), 'os.makedirs', 'os.makedirs', (['raw_dir'], {}), '(raw_dir)\n', (3219, 3228), False, 'import os\n'), ((3240, 3266), 'os.path.exists', 'os.path.exists', (['target_dir'], {}), '(target_dir)\n', (3254, 3266), False, 'import os\n'), ((3276, 3299), 'os.makedirs', 'os.makedirs', (['target_dir'], {}), '(target_dir)\n', (3287, 3299), False, 'import os\n'), ((3360, 3387), 'os.path.join', 'os.path.join', (['raw_dir', 'mode'], {}), '(raw_dir, mode)\n', (3372, 3387), False, 'import os\n'), ((3809, 3839), 'os.path.join', 'os.path.join', (['target_dir', 'mode'], {}), '(target_dir, mode)\n', (3821, 3839), False, 'import os\n'), ((639, 664), 'torch.abs', 'torch.abs', (['(adder - t_star)'], {}), '(adder - t_star)\n', (648, 664), False, 'import torch\n'), ((666, 696), 'torch.stack', 'torch.stack', (['[p, 1 - p]'], {'dim': '(1)'}), '([p, 1 - p], dim=1)\n', (677, 696), False, 'import torch\n'), ((3403, 3427), 'os.path.exists', 'os.path.exists', (['file_dir'], {}), '(file_dir)\n', (3417, 3427), False, 'import os\n'), ((3441, 3462), 'os.makedirs', 'os.makedirs', (['file_dir'], {}), '(file_dir)\n', (3452, 3462), False, 'import os\n'), ((3520, 3540), 'os.listdir', 'os.listdir', (['file_dir'], {}), '(file_dir)\n', (3530, 3540), False, 'import os\n'), ((3860, 3887), 'os.path.exists', 'os.path.exists', (['target_root'], {}), '(target_root)\n', (3874, 3887), False, 'import os\n'), ((3901, 3925), 'os.makedirs', 'os.makedirs', (['target_root'], {}), '(target_root)\n', (3912, 3925), False, 'import os\n'), ((4274, 4315), 'os.path.join', 'os.path.join', (['root', "(file_name + '_td.dat')"], {}), "(root, file_name + '_td.dat')\n", (4286, 4315), False, 'import os\n'), ((4340, 4383), 'os.path.join', 'os.path.join', (['root', "(file_name + '_bbox.npy')"], {}), "(root, file_name + '_bbox.npy')\n", (4352, 4383), False, 'import os\n'), ((4477, 4514), 'src.io.npy_events_tools.parse_header', 'npy_events_tools.parse_header', (['f_bbox'], {}), '(f_bbox)\n', (4506, 4514), False, 'from src.io import npy_events_tools\n'), ((4538, 4581), 'numpy.fromfile', 'np.fromfile', (['f_bbox'], {'dtype': 'v_type', 'count': '(-1)'}), '(f_bbox, dtype=v_type, count=-1)\n', (4549, 4581), True, 'import numpy as np\n'), ((4650, 4693), 'numpy.unique', 'np.unique', (["dat_bbox['t']"], {'return_index': '(True)'}), "(dat_bbox['t'], return_index=True)\n", (4659, 4693), True, 'import numpy as np\n'), ((4717, 4751), 'src.io.psee_loader.PSEELoader', 'psee_loader.PSEELoader', (['event_file'], {}), '(event_file)\n', (4739, 4751), False, 'from src.io import psee_loader\n'), ((754, 777), 'torch.zeros_like', 'torch.zeros_like', (['adder'], {}), '(adder)\n', (770, 777), False, 'import torch\n'), ((4936, 4968), 'os.path.exists', 'os.path.exists', (['volume_save_path'], {}), '(volume_save_path)\n', (4950, 4968), False, 'import os\n'), ((6370, 6405), 'numpy.where', 'np.where', (['(volume > 255)', '(255)', 'volume'], {}), '(volume > 255, 255, volume)\n', (6378, 6405), True, 'import numpy as np\n'), ((7264, 7304), 'numpy.stack', 'np.stack', (['[x, y, c, p, features]'], {'axis': '(1)'}), '([x, y, c, p, features], axis=1)\n', (7272, 7304), True, 'import numpy as np\n'), ((7673, 7697), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (7695, 7697), False, 'import torch\n'), ((831, 868), 'torch.zeros', 'torch.zeros', (['(H * W, volume_bins * 2)'], {}), '((H * W, volume_bins * 2))\n', (842, 868), False, 'import torch\n'), ((6206, 6299), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (['volume[None, :, :, :]'], {'size': 'target_shape', 'mode': '"""nearest"""'}), "(volume[None, :, :, :], size=target_shape,\n mode='nearest')\n", (6237, 6299), False, 'import torch\n'), ((6799, 6825), 'pyexpat.features.astype', 'features.astype', (['np.uint32'], {}), '(np.uint32)\n', (6814, 6825), False, 'from pyexpat import features\n'), ((6852, 6880), 'numpy.bitwise_and', 'np.bitwise_and', (['volume', '(1023)'], {}), '(volume, 1023)\n', (6866, 6880), True, 'import numpy as np\n'), ((528, 550), 'torch.arange', 'torch.arange', (['channels'], {}), '(channels)\n', (540, 550), False, 'import torch\n'), ((551, 573), 'torch.arange', 'torch.arange', (['channels'], {}), '(channels)\n', (563, 573), False, 'import torch\n'), ((6928, 6958), 'numpy.bitwise_and', 'np.bitwise_and', (['volume', '(523264)'], {}), '(volume, 523264)\n', (6942, 6958), True, 'import numpy as np\n'), ((7011, 7042), 'numpy.bitwise_and', 'np.bitwise_and', (['volume', '(3670016)'], {}), '(volume, 3670016)\n', (7025, 7042), True, 'import numpy as np\n'), ((7095, 7126), 'numpy.bitwise_and', 'np.bitwise_and', (['volume', '(4194304)'], {}), '(volume, 4194304)\n', (7109, 7126), True, 'import numpy as np\n'), ((7186, 7220), 'numpy.bitwise_and', 'np.bitwise_and', (['volume', '(2139095040)'], {}), '(volume, 2139095040)\n', (7200, 7220), True, 'import numpy as np\n'), ((5630, 5668), 'numpy.lib.recfunctions.structured_to_unstructured', 'rfn.structured_to_unstructured', (['events'], {}), '(events)\n', (5660, 5668), True, 'from numpy.lib import recfunctions as rfn\n')] |
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# Data preprocessing
data = pd.read_csv("RealEstate.csv")
# Converting Pandas dataframe to numpy array
X = data.loc[:, ['Bedrooms', 'Bathrooms', 'Size']].values
Y = data.Price.values.reshape(-1, 1)
# Train Test Split
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.33, random_state=0)
# Model Intialization
reg = LinearRegression()
# Data Fitting to LinearRegression Model
reg = reg.fit(X_train, Y_train)
# Printing coefficients
print(f"Coefficients theta0 = {reg.intercept_}, other thetas = {reg.coef_}")
# Predicted Values
Y_pred = reg.predict(X_test)
# Model Evaluation
def rmse(Y, Y_pred):
rmse = np.sqrt(sum((Y - Y_pred) ** 2) / Y.shape[0])
return rmse
def r2_score(Y, Y_pred):
mean_y = np.mean(Y)
ss_tot = sum((Y - mean_y) ** 2)
ss_res = sum((Y - Y_pred) ** 2)
r2 = 1 - (ss_res / ss_tot)
return r2
# Print Scores
print("RMSE = ", rmse(Y_test, Y_pred))
print("R2 Score = ", r2_score(Y_test, Y_pred))
# Features are Bedrooms, Bathrooms and Size respectively
features = np.array([[3, 3, 2371]])
predict = reg.predict(features)
print("Predict = ", int(predict))
| [
"numpy.mean",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.array",
"sklearn.linear_model.LinearRegression"
] | [((203, 232), 'pandas.read_csv', 'pd.read_csv', (['"""RealEstate.csv"""'], {}), "('RealEstate.csv')\n", (214, 232), True, 'import pandas as pd\n'), ((429, 483), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.33)', 'random_state': '(0)'}), '(X, Y, test_size=0.33, random_state=0)\n', (445, 483), False, 'from sklearn.model_selection import train_test_split\n'), ((519, 537), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (535, 537), False, 'from sklearn.linear_model import LinearRegression\n'), ((1218, 1242), 'numpy.array', 'np.array', (['[[3, 3, 2371]]'], {}), '([[3, 3, 2371]])\n', (1226, 1242), True, 'import numpy as np\n'), ((917, 927), 'numpy.mean', 'np.mean', (['Y'], {}), '(Y)\n', (924, 927), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision.datasets.folder
from PIL import Image, ImageFile
from torch.utils.data import TensorDataset
from torchvision import transforms
from torchvision.datasets import MNIST, ImageFolder, CIFAR10
from torchvision.transforms.functional import rotate
from wilds.datasets.camelyon17_dataset import Camelyon17Dataset
from wilds.datasets.fmow_dataset import FMoWDataset
ImageFile.LOAD_TRUNCATED_IMAGES = True
DATASETS = [
# Debug
"Debug28",
"Debug224",
# Small images
"VerticalLine",
"VHLine",
"FullColoredMNIST",
"ColoredMNIST",
"RotatedMNIST",
# Big images
"VLCS",
"PACS",
"OfficeHome",
"TerraIncognita",
"DomainNet",
"SVIRO",
# WILDS datasets
"WILDSCamelyon",
"WILDSFMoW",
]
def get_dataset_class(dataset_name):
"""Return the dataset class with the given name."""
if dataset_name not in globals():
raise NotImplementedError("Dataset not found: {}".format(dataset_name))
return globals()[dataset_name]
def num_environments(dataset_name):
return len(get_dataset_class(dataset_name).ENVIRONMENTS)
class MultipleDomainDataset:
N_STEPS = 5001 # Default, subclasses may override
CHECKPOINT_FREQ = 100 # Default, subclasses may override
N_WORKERS = 4 # Default, subclasses may override
ENVIRONMENTS = None # Subclasses should override
INPUT_SHAPE = None # Subclasses should override
def __getitem__(self, index):
return self.datasets[index]
def __len__(self):
return len(self.datasets)
class Debug(MultipleDomainDataset):
def __init__(self, root, test_envs, hparams):
super().__init__()
self.input_shape = self.INPUT_SHAPE
self.num_classes = 2
self.datasets = []
for _ in [0, 1, 2]:
self.datasets.append(
TensorDataset(
torch.randn(16, *self.INPUT_SHAPE),
torch.randint(0, self.num_classes, (16,))
)
)
class Debug28(Debug):
INPUT_SHAPE = (3, 28, 28)
ENVIRONMENTS = ['0', '1', '2']
class Debug224(Debug):
INPUT_SHAPE = (3, 224, 224)
ENVIRONMENTS = ['0', '1', '2']
class MultipleEnvironmentCIFAR10(MultipleDomainDataset):
def __init__(self, root, environments, dataset_transform, input_shape,
num_classes):
super().__init__()
if root is None:
raise ValueError('Data directory not specified!')
original_dataset_tr = CIFAR10(root, train=True, download=True)
original_dataset_te = CIFAR10(root, train=False, download=True)
original_images = np.concatenate((original_dataset_tr.data, original_dataset_te.data))
original_labels = np.concatenate((original_dataset_tr.targets, original_dataset_te.targets))
shuffle = torch.randperm(len(original_images))
original_images = original_images[shuffle]
original_labels = original_labels[shuffle]
self.datasets = []
for i in range(len(environments)):
self.datasets.append(dataset_transform(original_images, original_labels, environments[i]))
self.input_shape = input_shape
self.num_classes = num_classes
class MultipleEnvironmentMNIST(MultipleDomainDataset):
def __init__(self, root, environments, dataset_transform, input_shape,
num_classes):
super().__init__()
if root is None:
raise ValueError('Data directory not specified!')
self.colors = torch.FloatTensor(
[[0, 100, 0], [188, 143, 143], [255, 0, 0], [255, 215, 0], [0, 255, 0], [65, 105, 225], [0, 225, 225],
[0, 0, 255], [255, 20, 147], [160, 160, 160]])
self.random_colors = torch.randint(255, (10, 3)).float()
original_dataset_tr = MNIST(root, train=True, download=True)
original_dataset_te = MNIST(root, train=False, download=True)
original_images = torch.cat((original_dataset_tr.data,
original_dataset_te.data))
original_labels = torch.cat((original_dataset_tr.targets,
original_dataset_te.targets))
shuffle = torch.randperm(len(original_images))
original_images = original_images[shuffle]
original_labels = original_labels[shuffle]
self.datasets = []
self.environments = environments
for i in range(len(environments)):
images = original_images[i::len(environments)]
labels = original_labels[i::len(environments)]
self.datasets.append(dataset_transform(images, labels, environments[i]))
self.input_shape = input_shape
self.num_classes = num_classes
def __getitem__(self, index):
return self.datasets[index]
def __len__(self):
return len(self.datasets)
class VHLine(MultipleEnvironmentCIFAR10):
ENVIRONMENT_NAMES = [0, 1]
N_WORKERS = 0
N_STEPS = 10001
def __init__(self, root, test_envs, hparams):
self.domain_label = [0, 1]
# print("MY COMBINE:", MY_COMBINE)
self.input_shape = (3, 32, 32)
self.num_classes = 10
super(VHLine, self).__init__(root, self.domain_label, self.color_dataset, (3, 32, 32,), 10)
def color_dataset(self, images, labels, environment):
# Add a line to the last channel and vary its brightness during testing.
images = self.add_vhline(images, labels, b_scale=1, env=environment)
for i in range(5):
rand_indx = np.random.randint(0, images.shape[0])
self._plot(images[rand_indx])
x = torch.Tensor(images).permute(0, 3, 1, 2)
y = torch.Tensor(labels).view(-1).long()
return TensorDataset(x, y)
def add_vhline(self, images, labels, b_scale, env):
images = np.divide(images, 255.0)
if env == 1:
return images
def configurations(images, cond_indx, cls):
# To create the ten-valued spurious feature, we consider a vertical line passing through the middle of each channel,
# and also additionally the horizontal line through the first channel.
if cls == 0:
images[cond_indx, :, 16:17, 0] = np.add(images[cond_indx, :, 16:17, 0], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 1] = np.add(images[cond_indx, :, 16:17, 1], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 2] = np.add(images[cond_indx, :, 16:17, 2], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, 16:17, :, 0] = np.add(images[cond_indx, 16:17, :, 2], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
elif cls == 1:
images[cond_indx, :, 16:17, 0] = np.add(images[cond_indx, :, 16:17, 0], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 1] = np.add(images[cond_indx, :, 16:17, 1], 0.5 - 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 2] = np.add(images[cond_indx, :, 16:17, 2], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, 16:17, :, 0] = np.add(images[cond_indx, 16:17, :, 2], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
elif cls == 2:
images[cond_indx, :, 16:17, 0] = np.add(images[cond_indx, :, 16:17, 0], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 1] = np.add(images[cond_indx, :, 16:17, 1], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 2] = np.add(images[cond_indx, :, 16:17, 2], 0.5 - 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, 16:17, :, 0] = np.add(images[cond_indx, 16:17, :, 2], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
elif cls == 3:
images[cond_indx, :, 16:17, 0] = np.add(images[cond_indx, :, 16:17, 0], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 1] = np.add(images[cond_indx, :, 16:17, 1], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 2] = np.add(images[cond_indx, :, 16:17, 2], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, 16:17, :, 0] = np.add(images[cond_indx, 16:17, :, 2], 0.5 - 0.5 * np.random.uniform(-b_scale, b_scale))
elif cls == 4:
images[cond_indx, :, 16:17, 0] = np.add(images[cond_indx, :, 16:17, 0], 0.5 - 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 1] = np.add(images[cond_indx, :, 16:17, 1], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 2] = np.add(images[cond_indx, :, 16:17, 2], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, 16:17, :, 0] = np.add(images[cond_indx, 16:17, :, 2], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
elif cls == 5:
images[cond_indx, :, 16:17, 0] = np.add(images[cond_indx, :, 16:17, 0], 0.5 - 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 1] = np.add(images[cond_indx, :, 16:17, 1], 0.5 - 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 2] = np.add(images[cond_indx, :, 16:17, 2], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, 16:17, :, 0] = np.add(images[cond_indx, 16:17, :, 2], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
elif cls == 6:
images[cond_indx, :, 16:17, 0] = np.add(images[cond_indx, :, 16:17, 0], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 1] = np.add(images[cond_indx, :, 16:17, 1], 0.5 - 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 2] = np.add(images[cond_indx, :, 16:17, 2], 0.5 - 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, 16:17, :, 0] = np.add(images[cond_indx, 16:17, :, 2], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
elif cls == 7:
images[cond_indx, :, 16:17, 0] = np.add(images[cond_indx, :, 16:17, 0], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 1] = np.add(images[cond_indx, :, 16:17, 1], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 2] = np.add(images[cond_indx, :, 16:17, 2], 0.5 - 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, 16:17, :, 0] = np.add(images[cond_indx, 16:17, :, 2], 0.5 - 0.5 * np.random.uniform(-b_scale, b_scale))
elif cls == 8:
images[cond_indx, :, 16:17, 0] = np.add(images[cond_indx, :, 16:17, 0], 0.5 - 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 1] = np.add(images[cond_indx, :, 16:17, 1], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 2] = np.add(images[cond_indx, :, 16:17, 2], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, 16:17, :, 0] = np.add(images[cond_indx, 16:17, :, 2], 0.5 - 0.5 * np.random.uniform(-b_scale, b_scale))
elif cls == 9:
images[cond_indx, :, 16:17, 0] = np.add(images[cond_indx, :, 16:17, 0], 0.5 - 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 1] = np.add(images[cond_indx, :, 16:17, 1], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 2] = np.add(images[cond_indx, :, 16:17, 2], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, 16:17, :, 0] = np.add(images[cond_indx, 16:17, :, 2], 0.5 - 0.5 * np.random.uniform(-b_scale, b_scale))
return images
for indx in range(self.num_classes):
class_cond_index = (labels == indx)
p_ii_arr = np.random.choice([True, False], p=[0.5, 0.5], size=class_cond_index.shape[0])
class_cond_index = np.multiply(class_cond_index, p_ii_arr)
images = configurations(images, class_cond_index, indx)
for indx_j in range(self.num_classes):
if indx_j != indx:
other_class_cond_index = (labels == indx_j)
p_ij_arr = np.random.choice([True, False], p=[0.05, 0.95], size=other_class_cond_index.shape[0])
other_class_cond_index = np.multiply(other_class_cond_index, p_ij_arr)
images = configurations(images, other_class_cond_index, indx_j)
return images
def _plot(self, img):
plt.imshow(img)
plt.axis('off')
plt.tight_layout()
plt.show()
class VerticalLine(MultipleEnvironmentCIFAR10):
ENVIRONMENT_NAMES = [0, 1, 2, 3, 4, 5]
def __init__(self, root, test_envs, hparams):
self.scale = [0, -4, -2, 0, 2, 4]
# print("MY COMBINE:", MY_COMBINE)
super(VerticalLine, self).__init__(root, self.scale, self.color_dataset, (3, 32, 32,), 10)
self.input_shape = (3, 32, 32)
self.num_classes = 10
def color_dataset(self, images, labels, environment):
# Add a line to the last channel and vary its brightness during testing.
images = self.add_line(images, environment)
# for i in range(5):
# rand_indx = np.random.randint(0, images.shape[0])
# self._plot(images[rand_indx])
# images = torch.stack([images, images], dim=1)
x = torch.Tensor(images).permute(0, 3, 1, 2)
y = torch.Tensor(labels).view(-1).long()
return TensorDataset(x, y)
def add_line(self, images, b):
images = np.divide(images, 255.0)
# add 4 to last channel to avoid negative values in this channel.
images[:, :, :, 2] = np.add(images[:, :, :, 2], 4)
images[:, :, 16:17, 2] = np.add(images[:, :, 16:17, 2], np.float(b))
images[:, :, :, 2] = np.divide(images[:, :, :, 2], 9)
return images
def _plot(self, img):
plt.imshow(img)
plt.axis('off')
plt.tight_layout()
plt.show()
class FullColoredMNIST(MultipleEnvironmentMNIST):
ENVIRONMENT_NAMES = [0, 1, 2]
def __init__(self, root, test_envs, hparams):
self.data_type = hparams['type']
if self.data_type == 0:
self.ratio = hparams.get('ratio', 0.9)
self.env_seed = hparams.get('env_seed', 1)
MY_COMBINE = [[self.env_seed, True, 0.0], [self.env_seed, True, 1.0], [self.env_seed, True, self.ratio]]
else:
raise NotImplementedError
# print("MY COMBINE:", MY_COMBINE)
super(FullColoredMNIST, self).__init__(root, MY_COMBINE, self.color_dataset, (3, 28, 28,), 10)
self.input_shape = (3, 28, 28)
self.num_classes = 10
def color_dataset(self, images, labels, environment):
# set the seed
original_seed = torch.cuda.initial_seed()
torch.manual_seed(environment[0])
shuffle = torch.randperm(len(self.colors))
self.colors_ = self.colors[shuffle] if environment[1] else self.random_colors[shuffle]
torch.manual_seed(environment[0])
ber = self.torch_bernoulli_(environment[2], len(labels))
print("ber:", len(ber), sum(ber))
torch.manual_seed(original_seed)
images = torch.stack([images, images, images], dim=1)
# binarize the images
images = (images > 0).float()
y = labels.view(-1).long()
color_label = torch.zeros_like(y).long()
# Apply the color to the image
for img_idx in range(len(images)):
if ber[img_idx] > 0:
color_label[img_idx] = labels[img_idx]
for channels in range(3):
images[img_idx, channels, :, :] = images[img_idx, channels, :, :] * \
self.colors_[labels[img_idx].long(), channels]
else:
color = torch.randint(10, [1])[0] # random color, regardless of label
color_label[img_idx] = color
for channels in range(3):
images[img_idx, channels, :, :] = images[img_idx, channels, :, :] * self.colors_[color, channels]
x = images.float().div_(255.0)
for i in range(5):
rand_indx = np.random.randint(0, x.shape[0])
self._plot(images[rand_indx])
return TensorDataset(True, x, y, color_label)
def torch_bernoulli_(self, p, size):
return (torch.rand(size) < p).float()
def torch_xor_(self, a, b):
return (a - b).abs()
def _plot(self, img):
plt.imshow(torch.permute(img, (1, 2, 0)))
plt.axis('off')
plt.tight_layout()
plt.show()
class ColoredMNIST(MultipleEnvironmentMNIST):
ENVIRONMENTS = ['+90%', '+80%', '-90%']
def __init__(self, root, test_envs, hparams):
super(ColoredMNIST, self).__init__(root, [0.1, 0.2, 0.9],
self.color_dataset, (2, 28, 28,), 2)
self.input_shape = (2, 28, 28,)
self.num_classes = 2
def color_dataset(self, images, labels, environment):
# # Subsample 2x for computational convenience
# images = images.reshape((-1, 28, 28))[:, fdf8:f53e:61e4::18, ::2]
# Assign a binary label based on the digit
labels = (labels < 5).float()
# Flip label with probability 0.25
labels = self.torch_xor_(labels,
self.torch_bernoulli_(0.25, len(labels)))
# Assign a color based on the label; flip the color with probability e
colors = self.torch_xor_(labels,
self.torch_bernoulli_(environment,
len(labels)))
images = torch.stack([images, images], dim=1)
# Apply the color to the image by zeroing out the other color channel
images[torch.tensor(range(len(images))), (
1 - colors).long(), :, :] *= 0
x = images.float().div_(255.0)
y = labels.view(-1).long()
return TensorDataset(x, y)
def torch_bernoulli_(self, p, size):
return (torch.rand(size) < p).float()
def torch_xor_(self, a, b):
return (a - b).abs()
class RotatedMNIST(MultipleEnvironmentMNIST):
ENVIRONMENTS = ['0', '15', '30', '45', '60', '75']
N_WORKERS = 0
def __init__(self, root, test_envs, hparams):
super(RotatedMNIST, self).__init__(root, [0, 15, 30, 45, 60, 75],
self.rotate_dataset, (1, 28, 28,), 10)
def rotate_dataset(self, images, labels, angle):
rotation = transforms.Compose([
transforms.ToPILImage(),
transforms.Lambda(lambda x: rotate(x, angle, fill=(0,),
interpolation=torchvision.transforms.InterpolationMode.BILINEAR)),
transforms.ToTensor()])
x = torch.zeros(len(images), 1, 28, 28)
for i in range(len(images)):
x[i] = rotation(images[i])
y = labels.view(-1)
return TensorDataset(x, y)
class MultipleEnvironmentImageFolder(MultipleDomainDataset):
def __init__(self, root, test_envs, augment, hparams):
super().__init__()
environments = [f.name for f in os.scandir(root) if f.is_dir()]
environments = sorted(environments)
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
augment_transform = transforms.Compose([
# transforms.Resize((224,224)),
transforms.RandomResizedCrop(224, scale=(0.7, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),
transforms.RandomGrayscale(),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
self.datasets = []
for i, environment in enumerate(environments):
if augment and (i not in test_envs):
env_transform = augment_transform
else:
env_transform = transform
path = os.path.join(root, environment)
env_dataset = ImageFolder(path,
transform=env_transform)
self.datasets.append(env_dataset)
self.input_shape = (3, 224, 224,)
self.num_classes = len(self.datasets[-1].classes)
class VLCS(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 300
ENVIRONMENTS = ["C", "L", "S", "V"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "VLCS/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class PACS(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 300
ENVIRONMENTS = ["A", "C", "P", "S"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "PACS/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class DomainNet(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 1000
ENVIRONMENTS = ["clip", "info", "paint", "quick", "real", "sketch"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "domain_net/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class OfficeHome(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 300
ENVIRONMENTS = ["A", "C", "P", "R"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "office_home/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class TerraIncognita(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 300
ENVIRONMENTS = ["L100", "L38", "L43", "L46"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "terra_incognita/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class SVIRO(MultipleEnvironmentImageFolder):
CHECKPOINT_FREQ = 300
ENVIRONMENTS = ["aclass", "escape", "hilux", "i3", "lexus", "tesla", "tiguan", "tucson", "x5", "zoe"]
def __init__(self, root, test_envs, hparams):
self.dir = os.path.join(root, "sviro/")
super().__init__(self.dir, test_envs, hparams['data_augmentation'], hparams)
class WILDSEnvironment:
def __init__(
self,
wilds_dataset,
metadata_name,
metadata_value,
transform=None):
self.name = metadata_name + "_" + str(metadata_value)
metadata_index = wilds_dataset.metadata_fields.index(metadata_name)
metadata_array = wilds_dataset.metadata_array
subset_indices = torch.where(
metadata_array[:, metadata_index] == metadata_value)[0]
self.dataset = wilds_dataset
self.indices = subset_indices
self.transform = transform
def __getitem__(self, i):
x = self.dataset.get_input(self.indices[i])
if type(x).__name__ != "Image":
x = Image.fromarray(x)
y = self.dataset.y_array[self.indices[i]]
if self.transform is not None:
x = self.transform(x)
return x, y
def __len__(self):
return len(self.indices)
class WILDSDataset(MultipleDomainDataset):
INPUT_SHAPE = (3, 224, 224)
def __init__(self, dataset, metadata_name, test_envs, augment, hparams):
super().__init__()
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
augment_transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.RandomResizedCrop(224, scale=(0.7, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(0.3, 0.3, 0.3, 0.3),
transforms.RandomGrayscale(),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
self.datasets = []
for i, metadata_value in enumerate(
self.metadata_values(dataset, metadata_name)):
if augment and (i not in test_envs):
env_transform = augment_transform
else:
env_transform = transform
env_dataset = WILDSEnvironment(
dataset, metadata_name, metadata_value, env_transform)
self.datasets.append(env_dataset)
self.input_shape = (3, 224, 224,)
self.num_classes = dataset.n_classes
def metadata_values(self, wilds_dataset, metadata_name):
metadata_index = wilds_dataset.metadata_fields.index(metadata_name)
metadata_vals = wilds_dataset.metadata_array[:, metadata_index]
return sorted(list(set(metadata_vals.view(-1).tolist())))
class WILDSCamelyon(WILDSDataset):
ENVIRONMENTS = ["hospital_0", "hospital_1", "hospital_2", "hospital_3",
"hospital_4"]
def __init__(self, root, test_envs, hparams):
dataset = Camelyon17Dataset(root_dir=root)
super().__init__(
dataset, "hospital", test_envs, hparams['data_augmentation'], hparams)
class WILDSFMoW(WILDSDataset):
ENVIRONMENTS = ["region_0", "region_1", "region_2", "region_3",
"region_4", "region_5"]
def __init__(self, root, test_envs, hparams):
dataset = FMoWDataset(root_dir=root)
super().__init__(
dataset, "region", test_envs, hparams['data_augmentation'], hparams)
class Spirals(MultipleDomainDataset):
CHECKPOINT_FREQ = 10
ENVIRONMENTS = [str(i) for i in range(16)]
def __init__(self, root, test_env, hparams):
super().__init__()
self.datasets = []
test_dataset = self.make_tensor_dataset(env='test')
self.datasets.append(test_dataset)
for env in self.ENVIRONMENTS:
env_dataset = self.make_tensor_dataset(env=env, seed=int(env))
self.datasets.append(env_dataset)
self.input_shape = (18,)
self.num_classes = 2
def make_tensor_dataset(self, env, n_examples=1024, n_envs=16, n_revolutions=3, n_dims=16,
flip_first_signature=False,
seed=0):
if env == 'test':
inputs, labels = self.generate_environment(2000,
n_rotations=n_revolutions,
env=env,
n_envs=n_envs,
n_dims_signatures=n_dims,
seed=2 ** 32 - 1
)
else:
inputs, labels = self.generate_environment(n_examples,
n_rotations=n_revolutions,
env=env,
n_envs=n_envs,
n_dims_signatures=n_dims,
seed=seed
)
if flip_first_signature:
inputs[:1, 2:] = -inputs[:1, 2:]
return TensorDataset(torch.tensor(inputs), torch.tensor(labels))
def generate_environment(self, n_examples, n_rotations, env, n_envs,
n_dims_signatures,
seed=None):
"""
env must either be "test" or an int between 0 and n_envs-1
n_dims_signatures: how many dimensions for the signatures (spirals are always 2)
seed: seed for numpy
"""
assert env == 'test' or 0 <= int(env) < n_envs
# Generate fixed dictionary of signatures
rng = np.random.RandomState(seed)
signatures_matrix = rng.randn(n_envs, n_dims_signatures)
radii = rng.uniform(0.08, 1, n_examples)
angles = 2 * n_rotations * np.pi * radii
labels = rng.randint(0, 2, n_examples)
angles = angles + np.pi * labels
radii += rng.uniform(-0.02, 0.02, n_examples)
xs = np.cos(angles) * radii
ys = np.sin(angles) * radii
if env == 'test':
signatures = rng.randn(n_examples, n_dims_signatures)
else:
env = int(env)
signatures_labels = np.array(labels * 2 - 1).reshape(1, -1)
signatures = signatures_matrix[env] * signatures_labels.T
signatures = np.stack(signatures)
mechanisms = np.stack((xs, ys), axis=1)
mechanisms /= mechanisms.std(axis=0) # make approx unit variance (signatures already are)
inputs = np.hstack((mechanisms, signatures))
return inputs.astype(np.float32), labels.astype(np.long)
| [
"torchvision.transforms.ToPILImage",
"numpy.hstack",
"torchvision.transforms.ColorJitter",
"numpy.array",
"torchvision.transforms.functional.rotate",
"numpy.sin",
"numpy.divide",
"numpy.random.RandomState",
"matplotlib.pyplot.imshow",
"numpy.multiply",
"numpy.stack",
"torchvision.datasets.Imag... | [((2642, 2682), 'torchvision.datasets.CIFAR10', 'CIFAR10', (['root'], {'train': '(True)', 'download': '(True)'}), '(root, train=True, download=True)\n', (2649, 2682), False, 'from torchvision.datasets import MNIST, ImageFolder, CIFAR10\n'), ((2713, 2754), 'torchvision.datasets.CIFAR10', 'CIFAR10', (['root'], {'train': '(False)', 'download': '(True)'}), '(root, train=False, download=True)\n', (2720, 2754), False, 'from torchvision.datasets import MNIST, ImageFolder, CIFAR10\n'), ((2782, 2850), 'numpy.concatenate', 'np.concatenate', (['(original_dataset_tr.data, original_dataset_te.data)'], {}), '((original_dataset_tr.data, original_dataset_te.data))\n', (2796, 2850), True, 'import numpy as np\n'), ((2877, 2951), 'numpy.concatenate', 'np.concatenate', (['(original_dataset_tr.targets, original_dataset_te.targets)'], {}), '((original_dataset_tr.targets, original_dataset_te.targets))\n', (2891, 2951), True, 'import numpy as np\n'), ((3664, 3839), 'torch.FloatTensor', 'torch.FloatTensor', (['[[0, 100, 0], [188, 143, 143], [255, 0, 0], [255, 215, 0], [0, 255, 0], [65,\n 105, 225], [0, 225, 225], [0, 0, 255], [255, 20, 147], [160, 160, 160]]'], {}), '([[0, 100, 0], [188, 143, 143], [255, 0, 0], [255, 215, 0],\n [0, 255, 0], [65, 105, 225], [0, 225, 225], [0, 0, 255], [255, 20, 147],\n [160, 160, 160]])\n', (3681, 3839), False, 'import torch\n'), ((3954, 3992), 'torchvision.datasets.MNIST', 'MNIST', (['root'], {'train': '(True)', 'download': '(True)'}), '(root, train=True, download=True)\n', (3959, 3992), False, 'from torchvision.datasets import MNIST, ImageFolder, CIFAR10\n'), ((4023, 4062), 'torchvision.datasets.MNIST', 'MNIST', (['root'], {'train': '(False)', 'download': '(True)'}), '(root, train=False, download=True)\n', (4028, 4062), False, 'from torchvision.datasets import MNIST, ImageFolder, CIFAR10\n'), ((4090, 4153), 'torch.cat', 'torch.cat', (['(original_dataset_tr.data, original_dataset_te.data)'], {}), '((original_dataset_tr.data, original_dataset_te.data))\n', (4099, 4153), False, 'import torch\n'), ((4218, 4287), 'torch.cat', 'torch.cat', (['(original_dataset_tr.targets, original_dataset_te.targets)'], {}), '((original_dataset_tr.targets, original_dataset_te.targets))\n', (4227, 4287), False, 'import torch\n'), ((5885, 5904), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x', 'y'], {}), '(x, y)\n', (5898, 5904), False, 'from torch.utils.data import TensorDataset\n'), ((5979, 6003), 'numpy.divide', 'np.divide', (['images', '(255.0)'], {}), '(images, 255.0)\n', (5988, 6003), True, 'import numpy as np\n'), ((12965, 12980), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (12975, 12980), True, 'import matplotlib.pyplot as plt\n'), ((12989, 13004), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (12997, 13004), True, 'import matplotlib.pyplot as plt\n'), ((13013, 13031), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (13029, 13031), True, 'import matplotlib.pyplot as plt\n'), ((13040, 13050), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13048, 13050), True, 'import matplotlib.pyplot as plt\n'), ((13953, 13972), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x', 'y'], {}), '(x, y)\n', (13966, 13972), False, 'from torch.utils.data import TensorDataset\n'), ((14026, 14050), 'numpy.divide', 'np.divide', (['images', '(255.0)'], {}), '(images, 255.0)\n', (14035, 14050), True, 'import numpy as np\n'), ((14154, 14183), 'numpy.add', 'np.add', (['images[:, :, :, 2]', '(4)'], {}), '(images[:, :, :, 2], 4)\n', (14160, 14183), True, 'import numpy as np\n'), ((14290, 14322), 'numpy.divide', 'np.divide', (['images[:, :, :, 2]', '(9)'], {}), '(images[:, :, :, 2], 9)\n', (14299, 14322), True, 'import numpy as np\n'), ((14380, 14395), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (14390, 14395), True, 'import matplotlib.pyplot as plt\n'), ((14404, 14419), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (14412, 14419), True, 'import matplotlib.pyplot as plt\n'), ((14428, 14446), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14444, 14446), True, 'import matplotlib.pyplot as plt\n'), ((14455, 14465), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14463, 14465), True, 'import matplotlib.pyplot as plt\n'), ((15273, 15298), 'torch.cuda.initial_seed', 'torch.cuda.initial_seed', ([], {}), '()\n', (15296, 15298), False, 'import torch\n'), ((15307, 15340), 'torch.manual_seed', 'torch.manual_seed', (['environment[0]'], {}), '(environment[0])\n', (15324, 15340), False, 'import torch\n'), ((15495, 15528), 'torch.manual_seed', 'torch.manual_seed', (['environment[0]'], {}), '(environment[0])\n', (15512, 15528), False, 'import torch\n'), ((15644, 15676), 'torch.manual_seed', 'torch.manual_seed', (['original_seed'], {}), '(original_seed)\n', (15661, 15676), False, 'import torch\n'), ((15695, 15739), 'torch.stack', 'torch.stack', (['[images, images, images]'], {'dim': '(1)'}), '([images, images, images], dim=1)\n', (15706, 15739), False, 'import torch\n'), ((16787, 16825), 'torch.utils.data.TensorDataset', 'TensorDataset', (['(True)', 'x', 'y', 'color_label'], {}), '(True, x, y, color_label)\n', (16800, 16825), False, 'from torch.utils.data import TensorDataset\n'), ((17061, 17076), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (17069, 17076), True, 'import matplotlib.pyplot as plt\n'), ((17085, 17103), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17101, 17103), True, 'import matplotlib.pyplot as plt\n'), ((17112, 17122), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17120, 17122), True, 'import matplotlib.pyplot as plt\n'), ((18194, 18230), 'torch.stack', 'torch.stack', (['[images, images]'], {'dim': '(1)'}), '([images, images], dim=1)\n', (18205, 18230), False, 'import torch\n'), ((18539, 18558), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x', 'y'], {}), '(x, y)\n', (18552, 18558), False, 'from torch.utils.data import TensorDataset\n'), ((19555, 19574), 'torch.utils.data.TensorDataset', 'TensorDataset', (['x', 'y'], {}), '(x, y)\n', (19568, 19574), False, 'from torch.utils.data import TensorDataset\n'), ((21264, 21291), 'os.path.join', 'os.path.join', (['root', '"""VLCS/"""'], {}), "(root, 'VLCS/')\n", (21276, 21291), False, 'import os\n'), ((21559, 21586), 'os.path.join', 'os.path.join', (['root', '"""PACS/"""'], {}), "(root, 'PACS/')\n", (21571, 21586), False, 'import os\n'), ((21892, 21925), 'os.path.join', 'os.path.join', (['root', '"""domain_net/"""'], {}), "(root, 'domain_net/')\n", (21904, 21925), False, 'import os\n'), ((22199, 22233), 'os.path.join', 'os.path.join', (['root', '"""office_home/"""'], {}), "(root, 'office_home/')\n", (22211, 22233), False, 'import os\n'), ((22520, 22558), 'os.path.join', 'os.path.join', (['root', '"""terra_incognita/"""'], {}), "(root, 'terra_incognita/')\n", (22532, 22558), False, 'import os\n'), ((22893, 22921), 'os.path.join', 'os.path.join', (['root', '"""sviro/"""'], {}), "(root, 'sviro/')\n", (22905, 22921), False, 'import os\n'), ((25860, 25892), 'wilds.datasets.camelyon17_dataset.Camelyon17Dataset', 'Camelyon17Dataset', ([], {'root_dir': 'root'}), '(root_dir=root)\n', (25877, 25892), False, 'from wilds.datasets.camelyon17_dataset import Camelyon17Dataset\n'), ((26216, 26242), 'wilds.datasets.fmow_dataset.FMoWDataset', 'FMoWDataset', ([], {'root_dir': 'root'}), '(root_dir=root)\n', (26227, 26242), False, 'from wilds.datasets.fmow_dataset import FMoWDataset\n'), ((28739, 28766), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (28760, 28766), True, 'import numpy as np\n'), ((29446, 29466), 'numpy.stack', 'np.stack', (['signatures'], {}), '(signatures)\n', (29454, 29466), True, 'import numpy as np\n'), ((29488, 29514), 'numpy.stack', 'np.stack', (['(xs, ys)'], {'axis': '(1)'}), '((xs, ys), axis=1)\n', (29496, 29514), True, 'import numpy as np\n'), ((29631, 29666), 'numpy.hstack', 'np.hstack', (['(mechanisms, signatures)'], {}), '((mechanisms, signatures))\n', (29640, 29666), True, 'import numpy as np\n'), ((5686, 5723), 'numpy.random.randint', 'np.random.randint', (['(0)', 'images.shape[0]'], {}), '(0, images.shape[0])\n', (5703, 5723), True, 'import numpy as np\n'), ((12248, 12325), 'numpy.random.choice', 'np.random.choice', (['[True, False]'], {'p': '[0.5, 0.5]', 'size': 'class_cond_index.shape[0]'}), '([True, False], p=[0.5, 0.5], size=class_cond_index.shape[0])\n', (12264, 12325), True, 'import numpy as np\n'), ((12357, 12396), 'numpy.multiply', 'np.multiply', (['class_cond_index', 'p_ii_arr'], {}), '(class_cond_index, p_ii_arr)\n', (12368, 12396), True, 'import numpy as np\n'), ((14248, 14259), 'numpy.float', 'np.float', (['b'], {}), '(b)\n', (14256, 14259), True, 'import numpy as np\n'), ((16697, 16729), 'numpy.random.randint', 'np.random.randint', (['(0)', 'x.shape[0]'], {}), '(0, x.shape[0])\n', (16714, 16729), True, 'import numpy as np\n'), ((17022, 17051), 'torch.permute', 'torch.permute', (['img', '(1, 2, 0)'], {}), '(img, (1, 2, 0))\n', (17035, 17051), False, 'import torch\n'), ((20795, 20826), 'os.path.join', 'os.path.join', (['root', 'environment'], {}), '(root, environment)\n', (20807, 20826), False, 'import os\n'), ((20853, 20895), 'torchvision.datasets.ImageFolder', 'ImageFolder', (['path'], {'transform': 'env_transform'}), '(path, transform=env_transform)\n', (20864, 20895), False, 'from torchvision.datasets import MNIST, ImageFolder, CIFAR10\n'), ((23398, 23462), 'torch.where', 'torch.where', (['(metadata_array[:, metadata_index] == metadata_value)'], {}), '(metadata_array[:, metadata_index] == metadata_value)\n', (23409, 23462), False, 'import torch\n'), ((23729, 23747), 'PIL.Image.fromarray', 'Image.fromarray', (['x'], {}), '(x)\n', (23744, 23747), False, 'from PIL import Image, ImageFile\n'), ((28203, 28223), 'torch.tensor', 'torch.tensor', (['inputs'], {}), '(inputs)\n', (28215, 28223), False, 'import torch\n'), ((28225, 28245), 'torch.tensor', 'torch.tensor', (['labels'], {}), '(labels)\n', (28237, 28245), False, 'import torch\n'), ((29089, 29103), 'numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (29095, 29103), True, 'import numpy as np\n'), ((29125, 29139), 'numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (29131, 29139), True, 'import numpy as np\n'), ((3887, 3914), 'torch.randint', 'torch.randint', (['(255)', '(10, 3)'], {}), '(255, (10, 3))\n', (3900, 3914), False, 'import torch\n'), ((5779, 5799), 'torch.Tensor', 'torch.Tensor', (['images'], {}), '(images)\n', (5791, 5799), False, 'import torch\n'), ((13847, 13867), 'torch.Tensor', 'torch.Tensor', (['images'], {}), '(images)\n', (13859, 13867), False, 'import torch\n'), ((15865, 15884), 'torch.zeros_like', 'torch.zeros_like', (['y'], {}), '(y)\n', (15881, 15884), False, 'import torch\n'), ((19142, 19165), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (19163, 19165), False, 'from torchvision import transforms\n'), ((19361, 19382), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (19380, 19382), False, 'from torchvision import transforms\n'), ((19764, 19780), 'os.scandir', 'os.scandir', (['root'], {}), '(root)\n', (19774, 19780), False, 'import os\n'), ((19894, 19923), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (19911, 19923), False, 'from torchvision import transforms\n'), ((19937, 19958), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (19956, 19958), False, 'from torchvision import transforms\n'), ((19972, 20047), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (19992, 20047), False, 'from torchvision import transforms\n'), ((20182, 20233), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {'scale': '(0.7, 1.0)'}), '(224, scale=(0.7, 1.0))\n', (20210, 20233), False, 'from torchvision import transforms\n'), ((20247, 20280), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (20278, 20280), False, 'from torchvision import transforms\n'), ((20294, 20336), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['(0.3)', '(0.3)', '(0.3)', '(0.3)'], {}), '(0.3, 0.3, 0.3, 0.3)\n', (20316, 20336), False, 'from torchvision import transforms\n'), ((20350, 20378), 'torchvision.transforms.RandomGrayscale', 'transforms.RandomGrayscale', ([], {}), '()\n', (20376, 20378), False, 'from torchvision import transforms\n'), ((20392, 20413), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (20411, 20413), False, 'from torchvision import transforms\n'), ((20427, 20502), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (20447, 20502), False, 'from torchvision import transforms\n'), ((24185, 24214), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (24202, 24214), False, 'from torchvision import transforms\n'), ((24228, 24249), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (24247, 24249), False, 'from torchvision import transforms\n'), ((24263, 24338), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (24283, 24338), False, 'from torchvision import transforms\n'), ((24429, 24458), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (24446, 24458), False, 'from torchvision import transforms\n'), ((24472, 24523), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224)'], {'scale': '(0.7, 1.0)'}), '(224, scale=(0.7, 1.0))\n', (24500, 24523), False, 'from torchvision import transforms\n'), ((24537, 24570), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (24568, 24570), False, 'from torchvision import transforms\n'), ((24584, 24626), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['(0.3)', '(0.3)', '(0.3)', '(0.3)'], {}), '(0.3, 0.3, 0.3, 0.3)\n', (24606, 24626), False, 'from torchvision import transforms\n'), ((24640, 24668), 'torchvision.transforms.RandomGrayscale', 'transforms.RandomGrayscale', ([], {}), '()\n', (24666, 24668), False, 'from torchvision import transforms\n'), ((24682, 24703), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (24701, 24703), False, 'from torchvision import transforms\n'), ((24717, 24792), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (24737, 24792), False, 'from torchvision import transforms\n'), ((2022, 2056), 'torch.randn', 'torch.randn', (['(16)', '*self.INPUT_SHAPE'], {}), '(16, *self.INPUT_SHAPE)\n', (2033, 2056), False, 'import torch\n'), ((2078, 2119), 'torch.randint', 'torch.randint', (['(0)', 'self.num_classes', '(16,)'], {}), '(0, self.num_classes, (16,))\n', (2091, 2119), False, 'import torch\n'), ((12646, 12736), 'numpy.random.choice', 'np.random.choice', (['[True, False]'], {'p': '[0.05, 0.95]', 'size': 'other_class_cond_index.shape[0]'}), '([True, False], p=[0.05, 0.95], size=other_class_cond_index\n .shape[0])\n', (12662, 12736), True, 'import numpy as np\n'), ((12777, 12822), 'numpy.multiply', 'np.multiply', (['other_class_cond_index', 'p_ij_arr'], {}), '(other_class_cond_index, p_ij_arr)\n', (12788, 12822), True, 'import numpy as np\n'), ((16338, 16360), 'torch.randint', 'torch.randint', (['(10)', '[1]'], {}), '(10, [1])\n', (16351, 16360), False, 'import torch\n'), ((16884, 16900), 'torch.rand', 'torch.rand', (['size'], {}), '(size)\n', (16894, 16900), False, 'import torch\n'), ((18617, 18633), 'torch.rand', 'torch.rand', (['size'], {}), '(size)\n', (18627, 18633), False, 'import torch\n'), ((29314, 29338), 'numpy.array', 'np.array', (['(labels * 2 - 1)'], {}), '(labels * 2 - 1)\n', (29322, 29338), True, 'import numpy as np\n'), ((5832, 5852), 'torch.Tensor', 'torch.Tensor', (['labels'], {}), '(labels)\n', (5844, 5852), False, 'import torch\n'), ((13900, 13920), 'torch.Tensor', 'torch.Tensor', (['labels'], {}), '(labels)\n', (13912, 13920), False, 'import torch\n'), ((19207, 19304), 'torchvision.transforms.functional.rotate', 'rotate', (['x', 'angle'], {'fill': '(0,)', 'interpolation': 'torchvision.transforms.InterpolationMode.BILINEAR'}), '(x, angle, fill=(0,), interpolation=torchvision.transforms.\n InterpolationMode.BILINEAR)\n', (19213, 19304), False, 'from torchvision.transforms.functional import rotate\n'), ((6441, 6477), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (6458, 6477), True, 'import numpy as np\n'), ((6579, 6615), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (6596, 6615), True, 'import numpy as np\n'), ((6717, 6753), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (6734, 6753), True, 'import numpy as np\n'), ((6855, 6891), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (6872, 6891), True, 'import numpy as np\n'), ((7020, 7056), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (7037, 7056), True, 'import numpy as np\n'), ((7158, 7194), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (7175, 7194), True, 'import numpy as np\n'), ((7296, 7332), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (7313, 7332), True, 'import numpy as np\n'), ((7434, 7470), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (7451, 7470), True, 'import numpy as np\n'), ((7599, 7635), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (7616, 7635), True, 'import numpy as np\n'), ((7737, 7773), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (7754, 7773), True, 'import numpy as np\n'), ((7875, 7911), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (7892, 7911), True, 'import numpy as np\n'), ((8013, 8049), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (8030, 8049), True, 'import numpy as np\n'), ((8178, 8214), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (8195, 8214), True, 'import numpy as np\n'), ((8316, 8352), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (8333, 8352), True, 'import numpy as np\n'), ((8454, 8490), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (8471, 8490), True, 'import numpy as np\n'), ((8592, 8628), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (8609, 8628), True, 'import numpy as np\n'), ((8757, 8793), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (8774, 8793), True, 'import numpy as np\n'), ((8895, 8931), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (8912, 8931), True, 'import numpy as np\n'), ((9033, 9069), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (9050, 9069), True, 'import numpy as np\n'), ((9171, 9207), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (9188, 9207), True, 'import numpy as np\n'), ((9336, 9372), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (9353, 9372), True, 'import numpy as np\n'), ((9474, 9510), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (9491, 9510), True, 'import numpy as np\n'), ((9612, 9648), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (9629, 9648), True, 'import numpy as np\n'), ((9750, 9786), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (9767, 9786), True, 'import numpy as np\n'), ((9915, 9951), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (9932, 9951), True, 'import numpy as np\n'), ((10053, 10089), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (10070, 10089), True, 'import numpy as np\n'), ((10191, 10227), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (10208, 10227), True, 'import numpy as np\n'), ((10329, 10365), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (10346, 10365), True, 'import numpy as np\n'), ((10494, 10530), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (10511, 10530), True, 'import numpy as np\n'), ((10632, 10668), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (10649, 10668), True, 'import numpy as np\n'), ((10770, 10806), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (10787, 10806), True, 'import numpy as np\n'), ((10908, 10944), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (10925, 10944), True, 'import numpy as np\n'), ((11073, 11109), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (11090, 11109), True, 'import numpy as np\n'), ((11211, 11247), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (11228, 11247), True, 'import numpy as np\n'), ((11349, 11385), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (11366, 11385), True, 'import numpy as np\n'), ((11487, 11523), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (11504, 11523), True, 'import numpy as np\n'), ((11652, 11688), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (11669, 11688), True, 'import numpy as np\n'), ((11790, 11826), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (11807, 11826), True, 'import numpy as np\n'), ((11928, 11964), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (11945, 11964), True, 'import numpy as np\n'), ((12066, 12102), 'numpy.random.uniform', 'np.random.uniform', (['(-b_scale)', 'b_scale'], {}), '(-b_scale, b_scale)\n', (12083, 12102), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Reference: <NAME>, et al. "Pixeldefend: Leveraging generative models to understand and defend against adversarial examples," in ICLR, 2018.
# Reference Implementation from Authors (TensorFlow): https://github.com/yang-song/pixeldefend
# **************************************
# @Time : 2018/11/23 21:41
# @Author : <NAME> & <NAME>
# @Lab : nesa.zju.edu.cn
# @File : PD.py
# **************************************
import math
import os
import numpy as np
import torch
import torch.nn as nn
from Defenses.DefenseMethods.defenses import Defense
try:
from Defenses.DefenseMethods.External.pixel_cnn_pp.model import PixelCNN
from Defenses.DefenseMethods.External.pixel_cnn_pp.utils import decode, load_part_of_model
except:
print('please git clone the repo [] and train the generative PixelCNN model first')
raise ImportError
rescaling = lambda x: (x - 0.5) * 2
inv_rescaling = lambda x: x * 0.5 + 0.5
res_1_to_255 = lambda x: x * 127.5 + 127.5
res_255_to_1 = lambda x: (x - 127.5) / 127.5
class PixelDefend(Defense):
def __init__(self, model=None, defense_name=None, dataset=None, pixel_cnn_dir=None, device=None):
super(PixelDefend, self).__init__(model=model, defense_name=defense_name)
self.model = model
self.defense_name = defense_name
self.device = device
self.Dataset = dataset.upper()
assert self.Dataset in ['MNIST', 'CIFAR10'], "The data set must be MNIST or CIFAR10"
# load the trained PixelCNN model
# The structure of PixelCNN is fixed as follows in this implementation, the same as https://github.com/SaizhuoWang/pixel-cnn-pp
self.pixel_cnn_model = PixelCNN(nr_resnet=5, nr_filters=160, nr_logistic_mix=10, resnet_nonlinearity='concat_elu',
input_channels=3 if self.Dataset == 'CIFAR10' else 1).to(self.device)
self.pixel_cnn_model = nn.DataParallel(self.pixel_cnn_model)
self.load_pixel_cnn_model(dir=pixel_cnn_dir)
def load_pixel_cnn_model(self, dir=None):
pixel_cnn_model_location = '{}DefenseMethods/External/pixel_cnn_pp/models/{}_pixel_cnn.pth'.format(dir, self.Dataset)
print('\nstarting to load the pixel cnn model from ', pixel_cnn_model_location)
assert os.path.exists(pixel_cnn_model_location), "the pixel cnn model in {} does not exist, please try the model first !".format(
pixel_cnn_model_location)
load_part_of_model(model=self.pixel_cnn_model, path=pixel_cnn_model_location)
def de_noising_samples(self, samples=None, batch_size=20, eps=None):
"""
:param samples:
:param eps:
:return:
"""
# samples.shape = (B, C, W, H)
assert len(samples.shape) == 4 and isinstance(samples, (np.ndarray, np.generic)), \
"input samples should be type of numpy with 4 dimensions"
assert samples.shape[0] == batch_size, 'make sure the batch_size in the first dimension'
channel = samples.shape[1]
assert channel == 1 or channel == 3, "the second dimension should be the channel"
copy_samples = np.copy(samples)
copy_samples = torch.from_numpy(copy_samples).to(self.device).float()
copy_samples = rescaling(copy_samples) # [0, 1] ==> [-1, 1]
assert eps < 1.0 and eps > 0.0
int_epsilon = int(round(eps * 255.0, 0))
width, height = samples.shape[2], samples.shape[3]
for i in range(width):
for j in range(height):
output = self.pixel_cnn_model(copy_samples, sample=True)
out = decode(copy_samples, output, self.Dataset, self.device)
copy_sample_de_norm = res_1_to_255(copy_samples) # [-1, 1] ==> [0, 255]
copy_sample_int = copy_sample_de_norm.clone().int()
lb = torch.clamp(copy_sample_int - int_epsilon, min=0)
ub = torch.clamp(copy_sample_int + int_epsilon, max=255)
template = (torch.range(0, 255, step=1, dtype=torch.int).to(self.device) + torch.zeros_like(copy_sample_int, dtype=torch.int)[
..., None]).to(self.device)
lb = lb[..., None] + torch.zeros_like(template, dtype=torch.int)
ub = ub[..., None] + torch.zeros_like(template, dtype=torch.int)
template = torch.clamp((torch.lt(template, lb) + torch.gt(template, ub)), max=1, min=0).float()
template = template.permute(0, 2, 3, 1, 4)
out = out - template * 1e10 # out.shape = (B, W, H, C, 256)
out = res_255_to_1(torch.argmax(out, dim=4).permute(0, 3, 1, 2).float()) # [0, 255] -> [-1, 1]
# out.shape = (B, C, W, H)
copy_samples[:, :, i, j] = out.data[:, :, i, j]
copy_sample = inv_rescaling(copy_samples)
return copy_sample.data.cpu().numpy()
def de_noising_samples_batch(self, samples=None, batch_size=20, eps=None):
purified_images = []
number_batch = int(math.ceil(len(samples) / batch_size))
for index in range(number_batch):
start = index * batch_size
end = min((index + 1) * batch_size, len(samples))
print('\r===> in batch {:>2}, {:>4} ({:>4} in total) samples are purified ... '.format(index, end - start, end), end=' ')
rtn = self.de_noising_samples(samples=samples[start:end], batch_size=batch_size, eps=eps)
purified_images.extend(rtn)
return np.array(purified_images)
def defense(self):
print('As the defense of PixelDefend does not retrain the model, we do not implement this method')
| [
"os.path.exists",
"numpy.copy",
"Defenses.DefenseMethods.External.pixel_cnn_pp.utils.decode",
"torch.gt",
"torch.nn.DataParallel",
"torch.from_numpy",
"Defenses.DefenseMethods.External.pixel_cnn_pp.utils.load_part_of_model",
"numpy.array",
"torch.range",
"Defenses.DefenseMethods.External.pixel_cnn... | [((1955, 1992), 'torch.nn.DataParallel', 'nn.DataParallel', (['self.pixel_cnn_model'], {}), '(self.pixel_cnn_model)\n', (1970, 1992), True, 'import torch.nn as nn\n'), ((2323, 2363), 'os.path.exists', 'os.path.exists', (['pixel_cnn_model_location'], {}), '(pixel_cnn_model_location)\n', (2337, 2363), False, 'import os\n'), ((2492, 2569), 'Defenses.DefenseMethods.External.pixel_cnn_pp.utils.load_part_of_model', 'load_part_of_model', ([], {'model': 'self.pixel_cnn_model', 'path': 'pixel_cnn_model_location'}), '(model=self.pixel_cnn_model, path=pixel_cnn_model_location)\n', (2510, 2569), False, 'from Defenses.DefenseMethods.External.pixel_cnn_pp.utils import decode, load_part_of_model\n'), ((3177, 3193), 'numpy.copy', 'np.copy', (['samples'], {}), '(samples)\n', (3184, 3193), True, 'import numpy as np\n'), ((5537, 5562), 'numpy.array', 'np.array', (['purified_images'], {}), '(purified_images)\n', (5545, 5562), True, 'import numpy as np\n'), ((1722, 1875), 'Defenses.DefenseMethods.External.pixel_cnn_pp.model.PixelCNN', 'PixelCNN', ([], {'nr_resnet': '(5)', 'nr_filters': '(160)', 'nr_logistic_mix': '(10)', 'resnet_nonlinearity': '"""concat_elu"""', 'input_channels': "(3 if self.Dataset == 'CIFAR10' else 1)"}), "(nr_resnet=5, nr_filters=160, nr_logistic_mix=10,\n resnet_nonlinearity='concat_elu', input_channels=3 if self.Dataset ==\n 'CIFAR10' else 1)\n", (1730, 1875), False, 'from Defenses.DefenseMethods.External.pixel_cnn_pp.model import PixelCNN\n'), ((3652, 3707), 'Defenses.DefenseMethods.External.pixel_cnn_pp.utils.decode', 'decode', (['copy_samples', 'output', 'self.Dataset', 'self.device'], {}), '(copy_samples, output, self.Dataset, self.device)\n', (3658, 3707), False, 'from Defenses.DefenseMethods.External.pixel_cnn_pp.utils import decode, load_part_of_model\n'), ((3887, 3936), 'torch.clamp', 'torch.clamp', (['(copy_sample_int - int_epsilon)'], {'min': '(0)'}), '(copy_sample_int - int_epsilon, min=0)\n', (3898, 3936), False, 'import torch\n'), ((3958, 4009), 'torch.clamp', 'torch.clamp', (['(copy_sample_int + int_epsilon)'], {'max': '(255)'}), '(copy_sample_int + int_epsilon, max=255)\n', (3969, 4009), False, 'import torch\n'), ((4238, 4281), 'torch.zeros_like', 'torch.zeros_like', (['template'], {'dtype': 'torch.int'}), '(template, dtype=torch.int)\n', (4254, 4281), False, 'import torch\n'), ((4319, 4362), 'torch.zeros_like', 'torch.zeros_like', (['template'], {'dtype': 'torch.int'}), '(template, dtype=torch.int)\n', (4335, 4362), False, 'import torch\n'), ((3217, 3247), 'torch.from_numpy', 'torch.from_numpy', (['copy_samples'], {}), '(copy_samples)\n', (3233, 3247), False, 'import torch\n'), ((4101, 4151), 'torch.zeros_like', 'torch.zeros_like', (['copy_sample_int'], {'dtype': 'torch.int'}), '(copy_sample_int, dtype=torch.int)\n', (4117, 4151), False, 'import torch\n'), ((4404, 4426), 'torch.lt', 'torch.lt', (['template', 'lb'], {}), '(template, lb)\n', (4412, 4426), False, 'import torch\n'), ((4429, 4451), 'torch.gt', 'torch.gt', (['template', 'ub'], {}), '(template, ub)\n', (4437, 4451), False, 'import torch\n'), ((4038, 4082), 'torch.range', 'torch.range', (['(0)', '(255)'], {'step': '(1)', 'dtype': 'torch.int'}), '(0, 255, step=1, dtype=torch.int)\n', (4049, 4082), False, 'import torch\n'), ((4647, 4671), 'torch.argmax', 'torch.argmax', (['out'], {'dim': '(4)'}), '(out, dim=4)\n', (4659, 4671), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
"""
import os
from datetime import datetime
from typing import Union, Optional, Any, List, NoReturn
from numbers import Real
import wfdb
import numpy as np
np.set_printoptions(precision=5, suppress=True)
import pandas as pd
from ..utils.common import (
ArrayLike,
get_record_list_recursive,
)
from ..base import PhysioNetDataBase
__all__ = [
"MIMIC3",
]
class MIMIC3(PhysioNetDataBase):
""" NOT Finished,
MIMIC-III Critical Care Database
ABOUT mimic3
------------
1. comprising deidentified health-related data associated with over 4000 patients who stayed in critical care units of the Beth Israel Deaconess Medical Center between 2001 and 2012
2. includes information such as demographics, vital sign measurements made at the bedside (~1 data point per hour), laboratory test results, procedures, medications, caregiver notes, imaging reports, and mortality (both in and out of hospital)
NOTE
----
ISSUES
------
ref. [3]
Usage
-----
1. epidemiology
2. clinical decision-rule improvement
3. electronic tool development
References
----------
[1] https://mimic.physionet.org/
[2] https://github.com/MIT-LCP/mimic-code
[3] https://www.physionet.org/content/mimiciii/1.4/
[4] https://physionet.org/content/mimic3wdb/1.0/
[5] https://physionet.org/content/mimic3wdb-matched/1.0/
"""
def __init__(self, db_dir:Optional[str]=None, working_dir:Optional[str]=None, verbose:int=2, **kwargs:Any) -> NoReturn:
"""
Parameters
----------
db_dir: str, optional,
storage path of the database
if not specified, data will be fetched from Physionet
working_dir: str, optional,
working directory, to store intermediate files and log file
verbose: int, default 2,
log verbosity
kwargs: auxilliary key word arguments
"""
super().__init__(db_name="mimic3", db_dir=db_dir, working_dir=working_dir, verbose=verbose, **kwargs)
self.fs = 125
self.data_ext = "dat"
self.ann_ext = None # to check
self._ls_rec(db_name="mimic3wdb")
def load_data(self, ):
"""
"""
raise NotImplementedError
def get_subject_id(self, rec) -> int:
"""
"""
raise NotImplementedError
| [
"numpy.set_printoptions"
] | [((185, 232), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(5)', 'suppress': '(True)'}), '(precision=5, suppress=True)\n', (204, 232), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os
import math
import codecs
import random
import numpy as np
from glob import glob
from PIL import Image
from keras.utils import np_utils, Sequence
from sklearn.model_selection import train_test_split
class BaseSequence(Sequence):
"""
基础的数据流生成器,每次迭代返回一个batch
BaseSequence可直接用于fit_generator的generator参数
fit_generator会将BaseSequence再次封装为一个多进程的数据流生成器
而且能保证在多进程下的一个epoch中不会重复取相同的样本
"""
def __init__(self, img_paths, labels, batch_size, img_size):
assert len(img_paths) == len(labels), "len(img_paths) must equal to len(lables)"
assert img_size[0] == img_size[1], "img_size[0] must equal to img_size[1]"
self.x_y = np.hstack((np.array(img_paths).reshape(len(img_paths), 1), np.array(labels)))
self.batch_size = batch_size
self.img_size = img_size
def __len__(self):
return math.ceil(len(self.x_y) / self.batch_size)
@staticmethod
def center_img(img, size=None, fill_value=255):
"""
center img in a square background
"""
h, w = img.shape[:2]
if size is None:
size = max(h, w)
shape = (size, size) + img.shape[2:]
background = np.full(shape, fill_value, np.uint8)
center_x = (size - w) // 2
center_y = (size - h) // 2
background[center_y:center_y + h, center_x:center_x + w] = img
return background
def preprocess_img(self, img_path):
"""
image preprocessing
you can add your special preprocess method here
"""
img = Image.open(img_path)
img = img.resize((self.img_size[0], self.img_size[0]))
img = img.convert('RGB')
img = np.array(img)
img = img[:, :, ::-1]
return img
def __getitem__(self, idx):
batch_x = self.x_y[idx * self.batch_size: (idx + 1) * self.batch_size, 0]
batch_y = self.x_y[idx * self.batch_size: (idx + 1) * self.batch_size, 1:]
batch_x = np.array([self.preprocess_img(img_path) for img_path in batch_x])
batch_y = np.array(batch_y).astype(np.float32)
return batch_x, batch_y
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
np.random.shuffle(self.x_y)
def data_flow(train_data_dir, batch_size, num_classes, input_size): # need modify
label_files = glob(os.path.join(train_data_dir, '*.txt'))
random.shuffle(label_files)
img_paths = []
labels = []
for index, file_path in enumerate(label_files):
with codecs.open(file_path, 'r', 'utf-8') as f:
line = f.readline()
line_split = line.strip().split(', ')
if len(line_split) != 2:
print('%s contain error lable' % os.path.basename(file_path))
continue
img_name = line_split[0]
label = int(line_split[1])
img_paths.append(os.path.join(train_data_dir, img_name))
labels.append(label)
labels = np_utils.to_categorical(labels, num_classes)
train_img_paths, validation_img_paths, train_labels, validation_labels = \
train_test_split(img_paths, labels, test_size=0.2, random_state=0)
print('total samples: %d, training samples: %d, validation samples: %d' % (len(img_paths), len(train_img_paths), len(validation_img_paths)))
train_sequence = BaseSequence(train_img_paths, train_labels, batch_size, [input_size, input_size])
validation_sequence = BaseSequence(validation_img_paths, validation_labels, batch_size, [input_size, input_size])
# # 构造多进程的数据流生成器
# train_enqueuer = OrderedEnqueuer(train_sequence, use_multiprocessing=True, shuffle=True)
# validation_enqueuer = OrderedEnqueuer(validation_sequence, use_multiprocessing=True, shuffle=True)
#
# # 启动数据生成器
# n_cpu = multiprocessing.cpu_count()
# train_enqueuer.start(workers=int(n_cpu * 0.7), max_queue_size=10)
# validation_enqueuer.start(workers=1, max_queue_size=10)
# train_data_generator = train_enqueuer.get()
# validation_data_generator = validation_enqueuer.get()
# return train_enqueuer, validation_enqueuer, train_data_generator, validation_data_generator
return train_sequence, validation_sequence
if __name__ == '__main__':
# train_enqueuer, validation_enqueuer, train_data_generator, validation_data_generator = data_flow(dog_cat_data_path, batch_size)
# for i in range(10):
# train_data_batch = next(train_data_generator)
# train_enqueuer.stop()
# validation_enqueuer.stop()
train_sequence, validation_sequence = data_flow(train_data_dir, batch_size)
batch_data, bacth_label = train_sequence.__getitem__(5)
label_name = ['cat', 'dog']
for index, data in enumerate(batch_data):
img = Image.fromarray(data[:, :, ::-1])
img.save('./debug/%d_%s.jpg' % (index, label_name[int(bacth_label[index][1])]))
train_sequence.on_epoch_end()
batch_data, bacth_label = train_sequence.__getitem__(5)
for index, data in enumerate(batch_data):
img = Image.fromarray(data[:, :, ::-1])
img.save('./debug/%d_2_%s.jpg' % (index, label_name[int(bacth_label[index][1])]))
train_sequence.on_epoch_end()
batch_data, bacth_label = train_sequence.__getitem__(5)
for index, data in enumerate(batch_data):
img = Image.fromarray(data[:, :, ::-1])
img.save('./debug/%d_3_%s.jpg' % (index, label_name[int(bacth_label[index][1])]))
print('end')
| [
"PIL.Image.fromarray",
"PIL.Image.open",
"random.shuffle",
"sklearn.model_selection.train_test_split",
"os.path.join",
"numpy.array",
"keras.utils.np_utils.to_categorical",
"os.path.basename",
"numpy.full",
"codecs.open",
"numpy.random.shuffle"
] | [((2422, 2449), 'random.shuffle', 'random.shuffle', (['label_files'], {}), '(label_files)\n', (2436, 2449), False, 'import random\n'), ((2974, 3018), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['labels', 'num_classes'], {}), '(labels, num_classes)\n', (2997, 3018), False, 'from keras.utils import np_utils, Sequence\n'), ((3106, 3172), 'sklearn.model_selection.train_test_split', 'train_test_split', (['img_paths', 'labels'], {'test_size': '(0.2)', 'random_state': '(0)'}), '(img_paths, labels, test_size=0.2, random_state=0)\n', (3122, 3172), False, 'from sklearn.model_selection import train_test_split\n'), ((1212, 1248), 'numpy.full', 'np.full', (['shape', 'fill_value', 'np.uint8'], {}), '(shape, fill_value, np.uint8)\n', (1219, 1248), True, 'import numpy as np\n'), ((1579, 1599), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1589, 1599), False, 'from PIL import Image\n'), ((1710, 1723), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1718, 1723), True, 'import numpy as np\n'), ((2243, 2270), 'numpy.random.shuffle', 'np.random.shuffle', (['self.x_y'], {}), '(self.x_y)\n', (2260, 2270), True, 'import numpy as np\n'), ((2379, 2416), 'os.path.join', 'os.path.join', (['train_data_dir', '"""*.txt"""'], {}), "(train_data_dir, '*.txt')\n", (2391, 2416), False, 'import os\n'), ((4753, 4786), 'PIL.Image.fromarray', 'Image.fromarray', (['data[:, :, ::-1]'], {}), '(data[:, :, ::-1])\n', (4768, 4786), False, 'from PIL import Image\n'), ((5029, 5062), 'PIL.Image.fromarray', 'Image.fromarray', (['data[:, :, ::-1]'], {}), '(data[:, :, ::-1])\n', (5044, 5062), False, 'from PIL import Image\n'), ((5307, 5340), 'PIL.Image.fromarray', 'Image.fromarray', (['data[:, :, ::-1]'], {}), '(data[:, :, ::-1])\n', (5322, 5340), False, 'from PIL import Image\n'), ((2550, 2586), 'codecs.open', 'codecs.open', (['file_path', '"""r"""', '"""utf-8"""'], {}), "(file_path, 'r', 'utf-8')\n", (2561, 2586), False, 'import codecs\n'), ((2892, 2930), 'os.path.join', 'os.path.join', (['train_data_dir', 'img_name'], {}), '(train_data_dir, img_name)\n', (2904, 2930), False, 'import os\n'), ((755, 771), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (763, 771), True, 'import numpy as np\n'), ((2073, 2090), 'numpy.array', 'np.array', (['batch_y'], {}), '(batch_y)\n', (2081, 2090), True, 'import numpy as np\n'), ((2749, 2776), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (2765, 2776), False, 'import os\n'), ((707, 726), 'numpy.array', 'np.array', (['img_paths'], {}), '(img_paths)\n', (715, 726), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.