code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import numpy as np
class Metric:
"""
"""
def __init__(self, name):
"""
"""
self.name = name
def __repr__(self):
return self.name
def _check_inputs(self, trues, preds, true_rels):
""" validate inputs and convert to ndarray
TODO: right now it's just casting inputs
later more in-depth inputs checking needed
"""
# TODO: check inputs are 2d lists or array
# otherwise, wrap it into 2d
if true_rels is not None:
# true_rels = np.array(true_rels, dtype=np.float64)
true_rels = [np.array(rel).astype(np.float64) for rel in true_rels]
return (
[np.array(t) for t in trues],
[np.array(p) for p in preds],
true_rels
)
def compute(self, trues, preds):
"""
"""
raise NotImplementedError()
class PerUserMetric(Metric):
"""
"""
def __init__(self, name, topk):
"""
"""
super().__init__(name)
self.topk = topk
def _score(self, true, pred, true_rel=None):
"""
"""
raise NotImplementedError()
def compute(self, trues, preds, true_rels=None, stats={'mean':np.mean}):
"""
Inputs:
trues (list of list of int): contains lists of `true` indices of items per user
preds (list of list of int): contians lists of predicted indices of items per user
true_rels (list of list of float): contains weight (relevance) of true indices
stats (dict[str]:ufuncs): desired stats to be computed over users
Outputs:
list of float: statistics of scores over users
list of int: list of users whose score could not computed
"""
trues, preds, true_rels = self._check_inputs(trues, preds, true_rels)
scores = []
err = []
for i, (true, pred) in enumerate(zip(trues, preds)):
if len(true) == 0:
err.append(i)
continue
# if it's weighted by values
if true_rels is not None:
true_rel = true_rels[i]
else:
true_rel = None
s = self._score(true, pred, true_rel)
scores.append(s)
# get stats
results = {k:fnc(scores) for k, fnc in stats.items()}
# outputs result
return results, err
class PerElementMetric(Metric):
"""
"""
pass
class PerCorpusMetric(Metric):
"""
"""
pass
| [
"numpy.array"
] | [((707, 718), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (715, 718), True, 'import numpy as np\n'), ((749, 760), 'numpy.array', 'np.array', (['p'], {}), '(p)\n', (757, 760), True, 'import numpy as np\n'), ((622, 635), 'numpy.array', 'np.array', (['rel'], {}), '(rel)\n', (630, 635), True, 'import numpy as np\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 09:54:29 2018
@author: akiranagamori
"""
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy import signal
default_path = '/Users/akiranagamori/Documents/GitHub/python-code/';
save_path = '/Users/akiranagamori/Documents/GitHub/python-code/Data';
os.chdir(save_path)
output = np.load('output_temp.npy').item()
os.chdir(default_path)
Fs = 1000;
meanForce = np.mean(output['Muscle Force'][4*Fs:])
CoVForce = np.std(output['Muscle Force'][4*Fs:])/meanForce
#Force = output['Tendon Force'][4*Fs:];
#f,Pxx = signal.periodogram(Force-np.mean(Force),Fs);
fig1 = plt.figure()
plt.plot(output['Spike Train'][0,:])
#fig2 = plt.figure()
#ax2 = fig2.add_subplot(111);
#ax2.plot(f,Pxx);
#ax2.set_xlim([0,30]);
| [
"numpy.load",
"matplotlib.pyplot.plot",
"numpy.std",
"matplotlib.pyplot.figure",
"numpy.mean",
"os.chdir"
] | [((356, 375), 'os.chdir', 'os.chdir', (['save_path'], {}), '(save_path)\n', (364, 375), False, 'import os\n'), ((419, 441), 'os.chdir', 'os.chdir', (['default_path'], {}), '(default_path)\n', (427, 441), False, 'import os\n'), ((466, 506), 'numpy.mean', 'np.mean', (["output['Muscle Force'][4 * Fs:]"], {}), "(output['Muscle Force'][4 * Fs:])\n", (473, 506), True, 'import numpy as np\n'), ((667, 679), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (677, 679), True, 'import matplotlib.pyplot as plt\n'), ((680, 717), 'matplotlib.pyplot.plot', 'plt.plot', (["output['Spike Train'][0, :]"], {}), "(output['Spike Train'][0, :])\n", (688, 717), True, 'import matplotlib.pyplot as plt\n'), ((516, 555), 'numpy.std', 'np.std', (["output['Muscle Force'][4 * Fs:]"], {}), "(output['Muscle Force'][4 * Fs:])\n", (522, 555), True, 'import numpy as np\n'), ((385, 411), 'numpy.load', 'np.load', (['"""output_temp.npy"""'], {}), "('output_temp.npy')\n", (392, 411), True, 'import numpy as np\n')] |
"""
SIW Data loader, as given in Mnist tutorial
"""
import json
import imageio as io
import matplotlib.pyplot as plt
import torch
import torchvision.utils as v_utils
from torchvision import datasets, transforms
import os
import numpy as np
import random
from torch.utils.data import DataLoader, TensorDataset, Dataset
import imgaug.augmenters as iaa
import cv2
# data augment from 'imgaug' --> Add (value=(-40,40), per_channel=True), GammaContrast (gamma=(0.5,1.5))
seq = iaa.Sequential([
iaa.Add(value=(-40,40), per_channel=True), # Add color
iaa.GammaContrast(gamma=(0.5,1.5)) # GammaContrast with a gamma of 0.5 to 1.5
])
def siw_file_metadata(path):
# For example:
# path: Train/live/003/003-1-1-1-1.mov
# path: Train/spoof/003/003-1-2-1-1.mov
fldr, path = os.path.split(path)
# live_spoof = os.path.split(os.path.split(fldr)[0])[1]
path, extension = os.path.splitext(path)
client_id, sensor_id, type_id, medium_id, session_id = path.split("_")[0].split("-")
attack_type = {"1": None, "2": "print", "3": "replay"}[type_id]
if attack_type is not None:
attack_type = f"{attack_type}/{medium_id}"
return client_id, attack_type, sensor_id, type_id, medium_id, session_id
filenameToPILImage = lambda x: Image.open(x)
# img_size = 224
def get_gray_transforms():
return transforms.Compose([
filenameToPILImage,
transforms.Resize((32, 32)),
transforms.Grayscale(),
transforms.ToTensor(),
])
def get_valid_transforms(img_size=256, norm_mu=[0.485, 0.456, 0.406], norm_sig=[0.229, 0.224, 0.225]):
return transforms.Compose([
filenameToPILImage,
transforms.Resize((img_size, img_size)),
transforms.ToTensor(),
transforms.Normalize(norm_mu, norm_sig)
])
def imshow(image,depth):
fig = plt.figure()
ax = fig.add_subplot(1, 2, 1)
image = plt.imshow(image)
ax.set_title('Image')
ax = fig.add_subplot(1, 2, 1)
#image = plt.imshow(depth_image)
plt.tight_layout()
ax.set_title("Depth Image")
ax.axis('off')
plt.show()
class SiwDataset(Dataset):
def __init__(self,dataset_type,dir_path,transform=None,protocol="Protocol_1"):
self.main_dir = "/storage/alperen/sodecsapp/datasets/SiW"
self.protocol = protocol
self.dataset_type = dataset_type
self.dir_path = dir_path
self.transform = transform
self.real_annotation_path = os.path.join(dir_path,protocol,dataset_type,"for_real.lst")
self.attack_annotation_path = os.path.join(dir_path,protocol,dataset_type,"for_attack.lst")
self.annotations = []
def depth_namer(x,i):
if self.dataset_type == "train":
return os.path.join(x.replace("Train","Train_depth"),i)
else:
return None
#### Real Annotations ####
with open(self.real_annotation_path, 'r') as f:
# Read the lst file with stripping \n characters
annotation_list = list(map(str.strip,f.readlines()))
for annotation in annotation_list:
video_name, subject_id = annotation.split(" ")
frame_dir = os.path.join(self.main_dir,video_name)
for i in os.listdir(frame_dir):
self.annotations.append((os.path.join(frame_dir,i), depth_namer(frame_dir,i) , 1))
### Spoofs ####
with open(self.attack_annotation_path, 'r') as f:
# Read the lst file with stripping \n characters
annotation_list = list(map(str.strip,f.readlines()))
for annotation in annotation_list:
video_name, subject_id, attack_type = annotation.split(" ")
frame_dir = os.path.join(self.main_dir,video_name)
for i in os.listdir(frame_dir):
self.annotations.append((os.path.join(frame_dir,i), None, 0))
if self.dataset_type == "train":
self.annotations = random.choices(self.annotations, k=50000)
def __len__(self):
return(len(self.annotations))
def __getitem__(self, idx):
img_path, img_depth_path, label = self.annotations[idx]
image_x = np.zeros((256, 256, 3))
map_x = np.zeros((32, 32))
image_x = cv2.resize(cv2.imread(img_path), (256, 256))
if img_depth_path is not None:
map_x = cv2.resize(cv2.imread(img_depth_path, 0), (32, 32))
# data augment from 'imgaug' --> Add (value=(-40,40), per_channel=True), GammaContrast (gamma=(0.5,1.5))
if self.dataset_type == "train":
image_x = seq.augment_image(image_x)
sample = {'image_x': image_x, 'map_x': map_x, 'spoofing_label': label}
if self.transform:
sample = self.transform(sample)
return sample
"""
class SiwDataLoader:
def __init__(self, config):
self.config = config
self.transform_to_tensor = transforms.Compose([transforms.ToTensor()])
#self.train_dataset = SiwDataset(dataset_type="train",json_path="data/train.json",transform=self.transform_to_tensor)
#self.val_dataset = SiwDataset(dataset_type="dev",json_path="data/val.json",transform=self.transform_to_tensor)
#self.test_dataset = SiwDataset(dataset_type="eval",json_path="data/test.json",transform=self.transform_to_tensor)
if config.data_mode == "json":
self.train_loader = DataLoader(self.train_dataset,
batch_size=self.config.batch_size,
shuffle=True,
num_workers=self.config.data_loader_workers)
train_len = len(self.train_dataset)
self.train_iterations = (train_len + self.config.batch_size - 1) // self.config.batch_size
self.val_loader = DataLoader(self.val_dataset,
batch_size=self.config.batch_size,
shuffle=True,
num_workers=self.config.data_loader_workers)
val_len = len(self.val_dataset)
self.val_iterations = (val_len + self.config.batch_size - 1) // self.config.batch_size
self.test_loader = DataLoader(self.test_dataset,
batch_size=self.config.batch_size,
shuffle=True,
num_workers=self.config.data_loader_workers)
test_len = len(self.test_dataset)
self.test_iterations = (test_len + self.config.batch_size - 1) // self.config.batch_size
"""
if __name__ == "__main__":
dataset = SiwDataset(dataset_type="train",dir_path="/storage/alperen/sodecsapp/datasets/SiW/lists",protocol="Protocol_1")
print(len(dataset)) | [
"matplotlib.pyplot.show",
"os.path.join",
"imgaug.augmenters.GammaContrast",
"matplotlib.pyplot.imshow",
"random.choices",
"numpy.zeros",
"torchvision.transforms.ToTensor",
"cv2.imread",
"matplotlib.pyplot.figure",
"os.path.splitext",
"imgaug.augmenters.Add",
"torchvision.transforms.Grayscale"... | [((790, 809), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (803, 809), False, 'import os\n'), ((892, 914), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (908, 914), False, 'import os\n'), ((1828, 1840), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1838, 1840), True, 'import matplotlib.pyplot as plt\n'), ((1887, 1904), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (1897, 1904), True, 'import matplotlib.pyplot as plt\n'), ((2006, 2024), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2022, 2024), True, 'import matplotlib.pyplot as plt\n'), ((2080, 2090), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2088, 2090), True, 'import matplotlib.pyplot as plt\n'), ((496, 538), 'imgaug.augmenters.Add', 'iaa.Add', ([], {'value': '(-40, 40)', 'per_channel': '(True)'}), '(value=(-40, 40), per_channel=True)\n', (503, 538), True, 'import imgaug.augmenters as iaa\n'), ((556, 591), 'imgaug.augmenters.GammaContrast', 'iaa.GammaContrast', ([], {'gamma': '(0.5, 1.5)'}), '(gamma=(0.5, 1.5))\n', (573, 591), True, 'import imgaug.augmenters as iaa\n'), ((2448, 2510), 'os.path.join', 'os.path.join', (['dir_path', 'protocol', 'dataset_type', '"""for_real.lst"""'], {}), "(dir_path, protocol, dataset_type, 'for_real.lst')\n", (2460, 2510), False, 'import os\n'), ((2546, 2610), 'os.path.join', 'os.path.join', (['dir_path', 'protocol', 'dataset_type', '"""for_attack.lst"""'], {}), "(dir_path, protocol, dataset_type, 'for_attack.lst')\n", (2558, 2610), False, 'import os\n'), ((4233, 4256), 'numpy.zeros', 'np.zeros', (['(256, 256, 3)'], {}), '((256, 256, 3))\n', (4241, 4256), True, 'import numpy as np\n'), ((4273, 4291), 'numpy.zeros', 'np.zeros', (['(32, 32)'], {}), '((32, 32))\n', (4281, 4291), True, 'import numpy as np\n'), ((1392, 1419), 'torchvision.transforms.Resize', 'transforms.Resize', (['(32, 32)'], {}), '((32, 32))\n', (1409, 1419), False, 'from torchvision import datasets, transforms\n'), ((1429, 1451), 'torchvision.transforms.Grayscale', 'transforms.Grayscale', ([], {}), '()\n', (1449, 1451), False, 'from torchvision import datasets, transforms\n'), ((1461, 1482), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1480, 1482), False, 'from torchvision import datasets, transforms\n'), ((1663, 1702), 'torchvision.transforms.Resize', 'transforms.Resize', (['(img_size, img_size)'], {}), '((img_size, img_size))\n', (1680, 1702), False, 'from torchvision import datasets, transforms\n'), ((1712, 1733), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1731, 1733), False, 'from torchvision import datasets, transforms\n'), ((1743, 1782), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['norm_mu', 'norm_sig'], {}), '(norm_mu, norm_sig)\n', (1763, 1782), False, 'from torchvision import datasets, transforms\n'), ((3999, 4040), 'random.choices', 'random.choices', (['self.annotations'], {'k': '(50000)'}), '(self.annotations, k=50000)\n', (4013, 4040), False, 'import random\n'), ((4322, 4342), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (4332, 4342), False, 'import cv2\n'), ((3191, 3230), 'os.path.join', 'os.path.join', (['self.main_dir', 'video_name'], {}), '(self.main_dir, video_name)\n', (3203, 3230), False, 'import os\n'), ((3255, 3276), 'os.listdir', 'os.listdir', (['frame_dir'], {}), '(frame_dir)\n', (3265, 3276), False, 'import os\n'), ((3758, 3797), 'os.path.join', 'os.path.join', (['self.main_dir', 'video_name'], {}), '(self.main_dir, video_name)\n', (3770, 3797), False, 'import os\n'), ((3822, 3843), 'os.listdir', 'os.listdir', (['frame_dir'], {}), '(frame_dir)\n', (3832, 3843), False, 'import os\n'), ((4427, 4456), 'cv2.imread', 'cv2.imread', (['img_depth_path', '(0)'], {}), '(img_depth_path, 0)\n', (4437, 4456), False, 'import cv2\n'), ((3323, 3349), 'os.path.join', 'os.path.join', (['frame_dir', 'i'], {}), '(frame_dir, i)\n', (3335, 3349), False, 'import os\n'), ((3890, 3916), 'os.path.join', 'os.path.join', (['frame_dir', 'i'], {}), '(frame_dir, i)\n', (3902, 3916), False, 'import os\n')] |
from .base import QA
import glob
import os
import collections
import numpy as np
import fitsio
from astropy.table import Table
import desiutil.log
from desispec.qproc.io import read_qframe
from desispec.io import read_fiberflat
from desispec.calibfinder import CalibFinder
class QAFiberflat(QA):
"""docstring """
def __init__(self):
self.output_type = "PER_CAMFIBER"
pass
def valid_obstype(self, obstype):
return ( obstype.upper() == "FLAT" )
def run(self, indir):
'''TODO: document'''
log = desiutil.log.get_logger()
results = list()
infiles = glob.glob(os.path.join(indir, 'qframe-*.fits'))
if len(infiles) == 0 :
log.error("no qframe in {}".format(indir))
return None
for filename in infiles:
qframe = read_qframe(filename)
night = int(qframe.meta['NIGHT'])
expid = int(qframe.meta['EXPID'])
cam = qframe.meta['CAMERA'][0].upper()
spectro = int(qframe.meta['CAMERA'][1])
try :
cfinder = CalibFinder([qframe.meta])
except :
log.error("failed to find calib for qframe {}".format(filename))
continue
if not cfinder.haskey("FIBERFLAT") :
log.warning("no known fiberflat for qframe {}".format(filename))
continue
fflat = read_fiberflat(cfinder.findfile("FIBERFLAT"))
tmp = np.median(fflat.fiberflat,axis=1)
reference_fflat = tmp/np.median(tmp)
tmp = np.median(qframe.flux,axis=1)
this_fflat = tmp/np.median(tmp)
for f,fiber in enumerate(qframe.fibermap["FIBER"]) :
results.append(collections.OrderedDict(
NIGHT=night, EXPID=expid, SPECTRO=spectro, CAM=cam, FIBER=fiber,FIBERFLAT=this_fflat[f],REF_FIBERFLAT=reference_fflat[f]))
if len(results)==0 :
return None
return Table(results, names=results[0].keys())
| [
"desispec.calibfinder.CalibFinder",
"numpy.median",
"desispec.qproc.io.read_qframe",
"collections.OrderedDict",
"os.path.join"
] | [((636, 672), 'os.path.join', 'os.path.join', (['indir', '"""qframe-*.fits"""'], {}), "(indir, 'qframe-*.fits')\n", (648, 672), False, 'import os\n'), ((843, 864), 'desispec.qproc.io.read_qframe', 'read_qframe', (['filename'], {}), '(filename)\n', (854, 864), False, 'from desispec.qproc.io import read_qframe\n'), ((1499, 1533), 'numpy.median', 'np.median', (['fflat.fiberflat'], {'axis': '(1)'}), '(fflat.fiberflat, axis=1)\n', (1508, 1533), True, 'import numpy as np\n'), ((1613, 1643), 'numpy.median', 'np.median', (['qframe.flux'], {'axis': '(1)'}), '(qframe.flux, axis=1)\n', (1622, 1643), True, 'import numpy as np\n'), ((1106, 1132), 'desispec.calibfinder.CalibFinder', 'CalibFinder', (['[qframe.meta]'], {}), '([qframe.meta])\n', (1117, 1132), False, 'from desispec.calibfinder import CalibFinder\n'), ((1567, 1581), 'numpy.median', 'np.median', (['tmp'], {}), '(tmp)\n', (1576, 1581), True, 'import numpy as np\n'), ((1672, 1686), 'numpy.median', 'np.median', (['tmp'], {}), '(tmp)\n', (1681, 1686), True, 'import numpy as np\n'), ((1784, 1935), 'collections.OrderedDict', 'collections.OrderedDict', ([], {'NIGHT': 'night', 'EXPID': 'expid', 'SPECTRO': 'spectro', 'CAM': 'cam', 'FIBER': 'fiber', 'FIBERFLAT': 'this_fflat[f]', 'REF_FIBERFLAT': 'reference_fflat[f]'}), '(NIGHT=night, EXPID=expid, SPECTRO=spectro, CAM=cam,\n FIBER=fiber, FIBERFLAT=this_fflat[f], REF_FIBERFLAT=reference_fflat[f])\n', (1807, 1935), False, 'import collections\n')] |
'''
The contents of this file are focused on the plotting of the Data structure
in various projections and formats
These functions do this in whatever matplotlib instance you've got going on,
unless you toggle <show>
e.g. consider that below each function, I've added
#show: if True, opens a window and shows the plot. Otherwise, adds it to
#whatever matplotlib figure instance you have open
'''
#===============================================================================
# IMPORTS
#===============================================================================
import numpy as np
import matplotlib.pyplot as plt
#===============================================================================
# FUNCTIONS
#===============================================================================
#generates a scatter plot of your Timestep object
def scatter(t, x, y, show=False, s=5.0, color='k', marker='o', **kwargs):
#t: the Timestep object to plot
#x: the x-axis parameter
#y: the y-axis parameter
plt.scatter(t[x], t[y], s=s, c=color, marker=marker, **kwargs)
if show:
plt.xlabel(x)
plt.ylabel(y)
plt.show()
#sticks a big fat red dot wherever the specific star(s) is, given an id(s)
#if vx and vy are specified, an arrow is drawn
def trace_particle(t, id, x, y, vx=None, vy=None, vscale=0.02, show=False, s=50., color='r', marker='o', **kwargs):
#t (Timestep): the Timestep object that your particle is plotted over
#id (int, can be array-like): the (list of) id(s) for your particle(s)
#x (str): the x-axis parameter
#y (str): the y-axis parameter
#vx (str, optional): the x-axis arrow parameter
#vy (str, optional): the y-axis arrow parameter
#vscale (float, >0, optional): scales the arrow size
#TODO: more customization of the arrow, probably
#make sure id is always a numpy array of id values
if not hasattr(id, "__len__"): #if true, id is not array-like:
id = [id]
else:
if type(id)!=type(np.array([])):
id = np.array(id)
#get the indices of the particles from the id's
#gotta be a 1-line way to do this with numpy, but I couldn't figure it out
n = []
for i in id:
n.append(t['id'].index(i))
#plot the particles
for i in n:
plt.scatter(t[x][i], t[y][i], s=s, c=color, marker=marker, **kwargs)
if vx and vy:
#TODO: figure out how to pass kwargs to the arrow and scatter separately
plt.arrow(t[x][i], t[y][i], t[vx][i]*vscale, t[vy][i]*vscale, color=color, head_width=1)
if show:
plt.xlabel(x)
plt.ylabel(y)
plt.show()
#plots a histogram of the Timestep
#see np.hist for usage
def hist(t, x, show=False, *args, **kwargs):
#t (Timestep): the Timestep object being plotted
#x (str): the axis parameter
h = plt.hist(t[x], range=range, bins=bins, *args, **kwargs)
if show:
plt.xlabel(x)
plt.show()
return h
def hist2d(t, x, y, show=False, *args, **kwargs):
#t (Timestep): the Timestep object being plotted
#x (str): the x-axis parameter
#y (str): the y-axis parameter
h = plt.hist2d(t[x], t[y], *args, **kwargs)
if show:
plt.xlabel(x)
plt.ylabel(y)
plt.show()
return h
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.hist2d",
"matplotlib.pyplot.arrow",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((1029, 1091), 'matplotlib.pyplot.scatter', 'plt.scatter', (['t[x]', 't[y]'], {'s': 's', 'c': 'color', 'marker': 'marker'}), '(t[x], t[y], s=s, c=color, marker=marker, **kwargs)\n', (1040, 1091), True, 'import matplotlib.pyplot as plt\n'), ((2860, 2915), 'matplotlib.pyplot.hist', 'plt.hist', (['t[x]', '*args'], {'range': 'range', 'bins': 'bins'}), '(t[x], *args, range=range, bins=bins, **kwargs)\n', (2868, 2915), True, 'import matplotlib.pyplot as plt\n'), ((3168, 3207), 'matplotlib.pyplot.hist2d', 'plt.hist2d', (['t[x]', 't[y]', '*args'], {}), '(t[x], t[y], *args, **kwargs)\n', (3178, 3207), True, 'import matplotlib.pyplot as plt\n'), ((1114, 1127), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x'], {}), '(x)\n', (1124, 1127), True, 'import matplotlib.pyplot as plt\n'), ((1136, 1149), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y'], {}), '(y)\n', (1146, 1149), True, 'import matplotlib.pyplot as plt\n'), ((1158, 1168), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1166, 1168), True, 'import matplotlib.pyplot as plt\n'), ((2307, 2375), 'matplotlib.pyplot.scatter', 'plt.scatter', (['t[x][i]', 't[y][i]'], {'s': 's', 'c': 'color', 'marker': 'marker'}), '(t[x][i], t[y][i], s=s, c=color, marker=marker, **kwargs)\n', (2318, 2375), True, 'import matplotlib.pyplot as plt\n'), ((2606, 2619), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x'], {}), '(x)\n', (2616, 2619), True, 'import matplotlib.pyplot as plt\n'), ((2628, 2641), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y'], {}), '(y)\n', (2638, 2641), True, 'import matplotlib.pyplot as plt\n'), ((2650, 2660), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2658, 2660), True, 'import matplotlib.pyplot as plt\n'), ((2938, 2951), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x'], {}), '(x)\n', (2948, 2951), True, 'import matplotlib.pyplot as plt\n'), ((2960, 2970), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2968, 2970), True, 'import matplotlib.pyplot as plt\n'), ((3230, 3243), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['x'], {}), '(x)\n', (3240, 3243), True, 'import matplotlib.pyplot as plt\n'), ((3252, 3265), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y'], {}), '(y)\n', (3262, 3265), True, 'import matplotlib.pyplot as plt\n'), ((3274, 3284), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3282, 3284), True, 'import matplotlib.pyplot as plt\n'), ((2050, 2062), 'numpy.array', 'np.array', (['id'], {}), '(id)\n', (2058, 2062), True, 'import numpy as np\n'), ((2495, 2592), 'matplotlib.pyplot.arrow', 'plt.arrow', (['t[x][i]', 't[y][i]', '(t[vx][i] * vscale)', '(t[vy][i] * vscale)'], {'color': 'color', 'head_width': '(1)'}), '(t[x][i], t[y][i], t[vx][i] * vscale, t[vy][i] * vscale, color=\n color, head_width=1)\n', (2504, 2592), True, 'import matplotlib.pyplot as plt\n'), ((2018, 2030), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2026, 2030), True, 'import numpy as np\n')] |
"""
Some codes from https://github.com/Newmu/dcgan_code
"""
from __future__ import division
import math
import json
import random
import pprint
import scipy.misc
import numpy as np
from time import gmtime, strftime
from six.moves import xrange
import tensorflow as tf
import tensorflow.contrib.slim as slim
pp = pprint.PrettyPrinter()
get_stddev = lambda x, k_h, k_w: 1/math.sqrt(k_w*k_h*x.get_shape()[-1])
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def get_image(image_path, input_height, input_width,
resize_height=64, resize_width=64,
crop=True, grayscale=False):
image = imread(image_path, grayscale)
return transform(image, input_height, input_width,
resize_height, resize_width, crop)
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imread(path, grayscale = False):
if (grayscale):
return scipy.misc.imread(path, flatten = True).astype(np.float)
else:
return scipy.misc.imread(path).astype(np.float)
def merge_images(images, size):
return inverse_transform(images)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
if (images.shape[3] in (3,4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3]==1:
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = image[:,:,0]
return img
else:
raise ValueError('in merge(images,size) images parameter '
'must have dimensions: HxW or HxWx3 or HxWx4')
def imsave(images, size, path):
image = np.squeeze(merge(images, size))
return scipy.misc.imsave(path, image)
def center_crop(x, crop_h, crop_w,
resize_h=64, resize_w=64):
if crop_w is None:
crop_w = crop_h
h, w = x.shape[:2]
j = int(round((h - crop_h)/2.))
i = int(round((w - crop_w)/2.))
return scipy.misc.imresize(
x[j:j+crop_h, i:i+crop_w], [resize_h, resize_w])
def transform(image, input_height, input_width,
resize_height=64, resize_width=64, crop=True):
if crop:
cropped_image = center_crop(
image, input_height, input_width,
resize_height, resize_width)
else:
cropped_image = scipy.misc.imresize(image, [resize_height, resize_width])
return np.array(cropped_image)/127.5 - 1.
def inverse_transform(images):
return (images+1.)/2.
def to_json(output_path, *layers):
with open(output_path, "w") as layer_f:
lines = ""
for w, b, bn in layers:
layer_idx = w.name.split('/')[0].split('h')[1]
B = b.eval()
if "lin/" in w.name:
W = w.eval()
depth = W.shape[1]
else:
W = np.rollaxis(w.eval(), 2, 0)
depth = W.shape[0]
biases = {"sy": 1, "sx": 1, "depth": depth, "w": ['%.2f' % elem for elem in list(B)]}
if bn != None:
gamma = bn.gamma.eval()
beta = bn.beta.eval()
gamma = {"sy": 1, "sx": 1, "depth": depth, "w": ['%.2f' % elem for elem in list(gamma)]}
beta = {"sy": 1, "sx": 1, "depth": depth, "w": ['%.2f' % elem for elem in list(beta)]}
else:
gamma = {"sy": 1, "sx": 1, "depth": 0, "w": []}
beta = {"sy": 1, "sx": 1, "depth": 0, "w": []}
if "lin/" in w.name:
fs = []
for w in W.T:
fs.append({"sy": 1, "sx": 1, "depth": W.shape[0], "w": ['%.2f' % elem for elem in list(w)]})
lines += """
var layer_%s = {
"layer_type": "fc",
"sy": 1, "sx": 1,
"out_sx": 1, "out_sy": 1,
"stride": 1, "pad": 0,
"out_depth": %s, "in_depth": %s,
"biases": %s,
"gamma": %s,
"beta": %s,
"filters": %s
};""" % (layer_idx.split('_')[0], W.shape[1], W.shape[0], biases, gamma, beta, fs)
else:
fs = []
for w_ in W:
fs.append({"sy": 5, "sx": 5, "depth": W.shape[3], "w": ['%.2f' % elem for elem in list(w_.flatten())]})
lines += """
var layer_%s = {
"layer_type": "deconv",
"sy": 5, "sx": 5,
"out_sx": %s, "out_sy": %s,
"stride": 2, "pad": 1,
"out_depth": %s, "in_depth": %s,
"biases": %s,
"gamma": %s,
"beta": %s,
"filters": %s
};""" % (layer_idx, 2**(int(layer_idx)+2), 2**(int(layer_idx)+2),
W.shape[0], W.shape[3], biases, gamma, beta, fs)
layer_f.write(" ".join(lines.replace("'","").split()))
def make_gif(images, fname, duration=2, true_image=False):
import moviepy.editor as mpy
def make_frame(t):
try:
x = images[int(len(images)/duration*t)]
except:
x = images[-1]
if true_image:
return x.astype(np.uint8)
else:
return ((x+1)/2*255).astype(np.uint8)
clip = mpy.VideoClip(make_frame, duration=duration)
clip.write_gif(fname, fps = len(images) / duration)
def visualize(sess, dcgan, config, option):
image_frame_dim = int(math.ceil(config.batch_size**.5))
if option == 0:
z_sample = np.random.uniform(-0.5, 0.5, size=(config.batch_size, dcgan.z_dim))
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_%s.png' % strftime("%Y-%m-%d-%H-%M-%S", gmtime()))
elif option == 1:
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(dcgan.z_dim):
print(" [*] %d" % idx)
z_sample = np.random.uniform(-1, 1, size=(config.batch_size , dcgan.z_dim))
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
if config.dataset == "mnist":
y = np.random.choice(10, config.batch_size)
y_one_hot = np.zeros((config.batch_size, 10))
y_one_hot[np.arange(config.batch_size), y] = 1
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
else:
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_arange_%s.png' % (idx))
elif option == 2:
values = np.arange(0, 1, 1./config.batch_size)
for idx in [random.randint(0, dcgan.z_dim - 1) for _ in xrange(dcgan.z_dim)]:
print(" [*] %d" % idx)
z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
z_sample = np.tile(z, (config.batch_size, 1))
#z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
if config.dataset == "mnist":
y = np.random.choice(10, config.batch_size)
y_one_hot = np.zeros((config.batch_size, 10))
y_one_hot[np.arange(config.batch_size), y] = 1
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample, dcgan.y: y_one_hot})
else:
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
try:
make_gif(samples, './samples/test_gif_%s.gif' % (idx))
except:
save_images(samples, [image_frame_dim, image_frame_dim], './samples/test_%s.png' % strftime("%Y-%m-%d-%H-%M-%S", gmtime()))
elif option == 3:
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(dcgan.z_dim):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample):
z[idx] = values[kdx]
samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
make_gif(samples, './samples/test_gif_%s.gif' % (idx))
elif option == 4:
image_set = []
values = np.arange(0, 1, 1./config.batch_size)
for idx in xrange(dcgan.z_dim):
print(" [*] %d" % idx)
z_sample = np.zeros([config.batch_size, dcgan.z_dim])
for kdx, z in enumerate(z_sample): z[idx] = values[kdx]
image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))
new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) \
for idx in range(64) + range(63, -1, -1)]
make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8)
def image_manifold_size(num_images):
manifold_h = int(np.floor(np.sqrt(num_images)))
manifold_w = int(np.ceil(np.sqrt(num_images)))
assert manifold_h * manifold_w == num_images
return manifold_h, manifold_w
def dump_script(dirname, script_file, file_list=None):
import glob, os, shutil, sys
dest = os.path.join(dirname, 'script')
os.mkdir(dest)
print('copying files to {}'.format(dest))
if file_list is None:
file_list = glob.glob("*.py")
for file in file_list:
print('copying {}'.format(file))
shutil.copy2(file, dest)
if script_file is not None:
print('copying {}'.format(script_file))
shutil.copy2(script_file, dest)
with open(os.path.join(dest, "command.txt"), "w") as f:
f.write(" ".join(sys.argv) + "\n")
def generated_label_accuracy(dataset, samples):
if dataset == 'mnist':
with tf.gfile.GFile('./mnist_dcnn/graph_optimized.pb', 'rb') as f:
graph_def_optimized = tf.GraphDef()
graph_def_optimized.ParseFromString(f.read())
G = tf.Graph()
with G.as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.01)
config = tf.ConfigProto(device_count = {'GPU': 1}, gpu_options=gpu_options)
num_test = 100
with tf.Session(config=config) as sess:
pred_class, = tf.import_graph_def(graph_def_optimized, return_elements=['pred_class:0'])
x = G.get_tensor_by_name('import/x:0')
keep_prob = G.get_tensor_by_name('import/Placeholder:0') # import/keep_prob:0
test_images = (samples.transpose((1,0,2,3,4)).
reshape((10, 10)+samples.shape[1:]).
# transpose((0,1,2,3,4,5)).
reshape((10, -1)+samples.shape[2:]))
acc_sum = 0.0
num_sum = 0
for y_actual, class_samples in enumerate(test_images):
for ii in range(num_test, class_samples.shape[0]+1, num_test):
y = sess.run(pred_class, feed_dict={x: class_samples[ii-num_test:ii], keep_prob: 1.0})
acc = (y == y_actual).astype(float).mean()
num_sum += 1
acc_sum += acc
mean_acc = acc_sum/num_sum
return mean_acc
else:
raise ValueError('generated label acc only implemented for mnist')
| [
"os.mkdir",
"tensorflow.trainable_variables",
"tensorflow.ConfigProto",
"numpy.arange",
"numpy.tile",
"glob.glob",
"tensorflow.GPUOptions",
"os.path.join",
"random.randint",
"numpy.random.choice",
"tensorflow.GraphDef",
"math.ceil",
"shutil.copy2",
"tensorflow.Session",
"moviepy.editor.V... | [((314, 336), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {}), '()\n', (334, 336), False, 'import pprint\n'), ((452, 476), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (474, 476), True, 'import tensorflow as tf\n'), ((479, 540), 'tensorflow.contrib.slim.model_analyzer.analyze_vars', 'slim.model_analyzer.analyze_vars', (['model_vars'], {'print_info': '(True)'}), '(model_vars, print_info=True)\n', (511, 540), True, 'import tensorflow.contrib.slim as slim\n'), ((5180, 5224), 'moviepy.editor.VideoClip', 'mpy.VideoClip', (['make_frame'], {'duration': 'duration'}), '(make_frame, duration=duration)\n', (5193, 5224), True, 'import moviepy.editor as mpy\n'), ((8822, 8853), 'os.path.join', 'os.path.join', (['dirname', '"""script"""'], {}), "(dirname, 'script')\n", (8834, 8853), False, 'import glob, os, shutil, sys\n'), ((8856, 8870), 'os.mkdir', 'os.mkdir', (['dest'], {}), '(dest)\n', (8864, 8870), False, 'import glob, os, shutil, sys\n'), ((1326, 1365), 'numpy.zeros', 'np.zeros', (['(h * size[0], w * size[1], c)'], {}), '((h * size[0], w * size[1], c))\n', (1334, 1365), True, 'import numpy as np\n'), ((5348, 5383), 'math.ceil', 'math.ceil', (['(config.batch_size ** 0.5)'], {}), '(config.batch_size ** 0.5)\n', (5357, 5383), False, 'import math\n'), ((5415, 5482), 'numpy.random.uniform', 'np.random.uniform', (['(-0.5)', '(0.5)'], {'size': '(config.batch_size, dcgan.z_dim)'}), '(-0.5, 0.5, size=(config.batch_size, dcgan.z_dim))\n', (5432, 5482), True, 'import numpy as np\n'), ((8955, 8972), 'glob.glob', 'glob.glob', (['"""*.py"""'], {}), "('*.py')\n", (8964, 8972), False, 'import glob, os, shutil, sys\n'), ((9039, 9063), 'shutil.copy2', 'shutil.copy2', (['file', 'dest'], {}), '(file, dest)\n', (9051, 9063), False, 'import glob, os, shutil, sys\n'), ((9143, 9174), 'shutil.copy2', 'shutil.copy2', (['script_file', 'dest'], {}), '(script_file, dest)\n', (9155, 9174), False, 'import glob, os, shutil, sys\n'), ((9533, 9543), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (9541, 9543), True, 'import tensorflow as tf\n'), ((1563, 1599), 'numpy.zeros', 'np.zeros', (['(h * size[0], w * size[1])'], {}), '((h * size[0], w * size[1]))\n', (1571, 1599), True, 'import numpy as np\n'), ((2638, 2661), 'numpy.array', 'np.array', (['cropped_image'], {}), '(cropped_image)\n', (2646, 2661), True, 'import numpy as np\n'), ((5713, 5753), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1.0 / config.batch_size)'], {}), '(0, 1, 1.0 / config.batch_size)\n', (5722, 5753), True, 'import numpy as np\n'), ((5766, 5785), 'six.moves.xrange', 'xrange', (['dcgan.z_dim'], {}), '(dcgan.z_dim)\n', (5772, 5785), False, 'from six.moves import xrange\n'), ((8574, 8593), 'numpy.sqrt', 'np.sqrt', (['num_images'], {}), '(num_images)\n', (8581, 8593), True, 'import numpy as np\n'), ((8623, 8642), 'numpy.sqrt', 'np.sqrt', (['num_images'], {}), '(num_images)\n', (8630, 8642), True, 'import numpy as np\n'), ((9188, 9221), 'os.path.join', 'os.path.join', (['dest', '"""command.txt"""'], {}), "(dest, 'command.txt')\n", (9200, 9221), False, 'import glob, os, shutil, sys\n'), ((9360, 9415), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['"""./mnist_dcnn/graph_optimized.pb"""', '"""rb"""'], {}), "('./mnist_dcnn/graph_optimized.pb', 'rb')\n", (9374, 9415), True, 'import tensorflow as tf\n'), ((9450, 9463), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (9461, 9463), True, 'import tensorflow as tf\n'), ((9589, 9640), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.01)'}), '(per_process_gpu_memory_fraction=0.01)\n', (9602, 9640), True, 'import tensorflow as tf\n'), ((9656, 9720), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'GPU': 1}", 'gpu_options': 'gpu_options'}), "(device_count={'GPU': 1}, gpu_options=gpu_options)\n", (9670, 9720), True, 'import tensorflow as tf\n'), ((5833, 5896), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)'], {'size': '(config.batch_size, dcgan.z_dim)'}), '(-1, 1, size=(config.batch_size, dcgan.z_dim))\n', (5850, 5896), True, 'import numpy as np\n'), ((6482, 6522), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1.0 / config.batch_size)'], {}), '(0, 1, 1.0 / config.batch_size)\n', (6491, 6522), True, 'import numpy as np\n'), ((9755, 9780), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (9765, 9780), True, 'import tensorflow as tf\n'), ((9812, 9886), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def_optimized'], {'return_elements': "['pred_class:0']"}), "(graph_def_optimized, return_elements=['pred_class:0'])\n", (9831, 9886), True, 'import tensorflow as tf\n'), ((5669, 5677), 'time.gmtime', 'gmtime', ([], {}), '()\n', (5675, 5677), False, 'from time import gmtime, strftime\n'), ((6017, 6056), 'numpy.random.choice', 'np.random.choice', (['(10)', 'config.batch_size'], {}), '(10, config.batch_size)\n', (6033, 6056), True, 'import numpy as np\n'), ((6077, 6110), 'numpy.zeros', 'np.zeros', (['(config.batch_size, 10)'], {}), '((config.batch_size, 10))\n', (6085, 6110), True, 'import numpy as np\n'), ((6536, 6570), 'random.randint', 'random.randint', (['(0)', '(dcgan.z_dim - 1)'], {}), '(0, dcgan.z_dim - 1)\n', (6550, 6570), False, 'import random\n'), ((6641, 6687), 'numpy.random.uniform', 'np.random.uniform', (['(-0.2)', '(0.2)'], {'size': 'dcgan.z_dim'}), '(-0.2, 0.2, size=dcgan.z_dim)\n', (6658, 6687), True, 'import numpy as np\n'), ((6707, 6741), 'numpy.tile', 'np.tile', (['z', '(config.batch_size, 1)'], {}), '(z, (config.batch_size, 1))\n', (6714, 6741), True, 'import numpy as np\n'), ((7504, 7544), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1.0 / config.batch_size)'], {}), '(0, 1, 1.0 / config.batch_size)\n', (7513, 7544), True, 'import numpy as np\n'), ((7557, 7576), 'six.moves.xrange', 'xrange', (['dcgan.z_dim'], {}), '(dcgan.z_dim)\n', (7563, 7576), False, 'from six.moves import xrange\n'), ((6580, 6599), 'six.moves.xrange', 'xrange', (['dcgan.z_dim'], {}), '(dcgan.z_dim)\n', (6586, 6599), False, 'from six.moves import xrange\n'), ((6922, 6961), 'numpy.random.choice', 'np.random.choice', (['(10)', 'config.batch_size'], {}), '(10, config.batch_size)\n', (6938, 6961), True, 'import numpy as np\n'), ((6982, 7015), 'numpy.zeros', 'np.zeros', (['(config.batch_size, 10)'], {}), '((config.batch_size, 10))\n', (6990, 7015), True, 'import numpy as np\n'), ((7624, 7666), 'numpy.zeros', 'np.zeros', (['[config.batch_size, dcgan.z_dim]'], {}), '([config.batch_size, dcgan.z_dim])\n', (7632, 7666), True, 'import numpy as np\n'), ((7922, 7962), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1.0 / config.batch_size)'], {}), '(0, 1, 1.0 / config.batch_size)\n', (7931, 7962), True, 'import numpy as np\n'), ((7976, 7995), 'six.moves.xrange', 'xrange', (['dcgan.z_dim'], {}), '(dcgan.z_dim)\n', (7982, 7995), False, 'from six.moves import xrange\n'), ((6129, 6157), 'numpy.arange', 'np.arange', (['config.batch_size'], {}), '(config.batch_size)\n', (6138, 6157), True, 'import numpy as np\n'), ((8043, 8085), 'numpy.zeros', 'np.zeros', (['[config.batch_size, dcgan.z_dim]'], {}), '([config.batch_size, dcgan.z_dim])\n', (8051, 8085), True, 'import numpy as np\n'), ((7034, 7062), 'numpy.arange', 'np.arange', (['config.batch_size'], {}), '(config.batch_size)\n', (7043, 7062), True, 'import numpy as np\n'), ((8323, 8370), 'numpy.array', 'np.array', (['[images[idx] for images in image_set]'], {}), '([images[idx] for images in image_set])\n', (8331, 8370), True, 'import numpy as np\n'), ((7460, 7468), 'time.gmtime', 'gmtime', ([], {}), '()\n', (7466, 7468), False, 'from time import gmtime, strftime\n')] |
import gym
import numpy as np
from models import ActorNetwork,CriticNetwork
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils import clip_grad_norm_
from torch.distributions.normal import Normal
class learner():
def __init__(
self,
scenario,
seed=123,
learning_rate=5e-4,
gamma=0.99,
total_timesteps=1e6,
nsteps=1024,
batch_size=64,
vf_itrs=25,
Lambda=0.97,
damping=0.1,
max_kl=0.01,
entropy_eps=1e-6,
cgrad_update_steps=10,
accept_ratio=0.1,
train_mode=True
):
self.env=gym.make(scenario)
self.gamma=gamma
self.train_mode=train_mode
self.max_kl=max_kl
self.Lambda=Lambda
self.vf_itrs=vf_itrs
self.batch_size=batch_size
self.nsteps=nsteps
self.total_timesteps=total_timesteps
self.entropy_eps=entropy_eps
self.cgrad_update_steps=cgrad_update_steps
self.accept_ratio=accept_ratio
self.damping=damping
self.device=torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.actor=ActorNetwork(self.env.observation_space.shape[0],self.env.action_space.shape[0])
self.old_actor=ActorNetwork(self.env.observation_space.shape[0],self.env.action_space.shape[0])
self.old_actor.load_state_dict(self.actor.state_dict())
self.critic=CriticNetwork(self.env.observation_space.shape[0])
self.optimizer=torch.optim.Adam(self.critic.parameters(),lr=learning_rate)
def select_action(self,state):
state=torch.from_numpy(state)
with torch.no_grad():
mean,std=self.actor(state)
normal_dist=Normal(mean,std)
action=normal_dist.sample()
return action.detach().numpy().squeeze()
def update(self,states,actions,rewards):
self.optimizer.zero_grad()
states=torch.from_numpy(states).to(self.device)
actions=torch.from_numpy(actions).to(self.device)
rewards=torch.from_numpy(rewards).to(self.device)
with torch.no_grad():
values=self.critic(states)
batch_size=rewards.shape[0]
returns=torch.Tensor(batch_size).to(self.device)
deltas=torch.Tensor(batch_size).to(self.device)
advantage=torch.Tensor(batch_size).to(self.device)
prev_return=0
prev_value=0
prev_advantage=0
for i in reversed(range(batch_size)):
returns[i]=rewards[i]+self.gamma*prev_return
deltas[i]=rewards[i]+self.gamma*prev_value-values[i].data
# ref: https://arxiv.org/pdf/1506.02438.pdf(GAE)
advantage[i]=deltas[i]+self.gamma*self.Lambda*prev_advantage
prev_return=returns[i]
prev_value=values[i].data
prev_advantage=advantage[i]
advantage = (advantage - advantage.mean())/(advantage.std()+self.entropy_eps)
values=self.critic(states)
#-----------------------------------
with torch.no_grad():
old_mean,old_std=self.old_actor(states)
#--------get surr grad------------
mean,std=self.actor(states)
normal_dist = Normal(mean, std)
log_prob=normal_dist.log_prob(actions).sum(dim=1, keepdim=True)
old_normal_dist = Normal(mean, std)
old_log_prob=old_normal_dist.log_prob(actions).sum(dim=1, keepdim=True)
# weight sample
surr_loss=-torch.exp(log_prob-old_log_prob)*advantage
surr_loss=surr_loss.mean()
#-----------------------------
surr_grad=torch.autograd.grad(surr_loss,self.actor.parameters())
flat_surr_grad=torch.cat([grad.view(-1) for grad in surr_grad]).data
fisher_matrix=self._fisher_matrix(-flat_surr_grad,states,old_mean,old_std)
nature_grad=self._conjugated_gradient(-flat_surr_grad,self.cgrad_update_steps,fisher_matrix)
non_fmatrix=self._fisher_matrix(nature_grad,states,old_mean,old_std)
non_scale_kl=0.5*(nature_grad * non_fmatrix).sum(0,keepdim=True)
scale_ratio=torch.sqrt(non_scale_kl/self.max_kl)
final_nature_grad=nature_grad/scale_ratio[0]
expected_improve=(-flat_surr_grad*nature_grad).sum(0,keepdim=True)/scale_ratio[0]
prev_params=torch.cat([param.data.view(-1) for param in self.actor.parameters()])
#---------update actor
for _n_backtracks,stepfrac in enumerate(0.5**np.arange(self.cgrad_update_steps)):
new_loss = prev_params + stepfrac*final_nature_grad
self._set_flat_params_by(new_loss)
with torch.no_grad():
new_mean,new_std=self.actor(states)
new_normal_dist = Normal(new_mean, new_std)
new_log_prob=normal_dist.log_prob(actions).sum(dim=1, keepdim=True)
new_surr_loss=-torch.exp(new_log_prob-old_log_prob)*advantage
new_surr_loss=new_surr_loss.mean()
actual_improve=surr_loss-new_surr_loss
e_improve=expected_improve*stepfrac
ratio = actual_improve /e_improve
if ratio.item()>self.accept_ratio and actual_improve.item()>0:
break
#---------update critic
for _ in range(self.vf_itrs):
if self.batch_size>states.shape[0]:
batch_idxs=np.arange(states.shape[0])
else:
batch_idxs=np.random.choice(states.shape[0],size=self.batch_size,replace=True)
mini_states=states[batch_idxs]
mini_returns=returns[batch_idxs]
update_value=self.critic(mini_states)
v_loss=(mini_returns-update_value).pow(2).mean()
self.optimizer.zero_grad()
v_loss.backward()
self.optimizer.step()
def _fisher_matrix(self,v,obs,old_mean,old_std):
kl=self._get_kl(obs,old_mean,old_std)
kl=kl.mean()
kl_grads=torch.autograd.grad(kl,self.actor.parameters(),create_graph=True)
#kl_grads=torch.gard(kl,self.actor.parameters(),create_graph=True)
flat_kl_grads=torch.cat([grad.view(-1) for grad in kl_grads])
kl_v=(flat_kl_grads*torch.autograd.Variable(v)).sum()
kl_second_grads=torch.autograd.grad(kl_v,self.actor.parameters())
flat_kl_second_grads=torch.cat([grad.contiguous().view(-1) for grad in kl_second_grads])
return flat_kl_second_grads+self.damping*v
def _get_kl(self,obs,old_mean,old_std):
mean,std=self.actor(obs)
kl=-torch.log(std/old_std)+(std.pow(2)+(mean-old_mean).pow(2))/(2*old_std.pow(2))-0.5
return kl.sum(1,keepdim=True)
def _conjugated_gradient(self,surr_grad,update_steps,fmatrix,residual_limit=1e-10):
r=surr_grad.clone()
p=surr_grad.clone()
r_dot_r=torch.dot(r,r)
x=torch.zeros(surr_grad.size()).to(self.device)
for i in range(update_steps):
alpha=r_dot_r/torch.dot(p,fmatrix)
x=x+alpha*p
r=r-alpha*fmatrix
new_r_dot_r=torch.dot(r,r)
beta=new_r_dot_r/r_dot_r
p=r+beta*p
r_dot_r=new_r_dot_r
if r_dot_r<residual_limit:
break
return x
def _set_flat_params_by(self,flat_params):
prev_idx=0
for param in self.actor.parameters():
flat_size=int(np.prod(list(param.size())))
param.data.copy_(flat_params[prev_idx:prev_idx+flat_size].view(param.size()))
prev_idx+=flat_size
if __name__=='__main__':
obs_dim=3
act_dim=1
horizon=128
states=np.random.randn(horizon,obs_dim).astype('float32')
actions=np.random.randn(horizon,act_dim).astype('float32')
returns=np.random.randn(horizon)
agent=learner('Pendulum-v0')
agent.update(states,actions,returns)
| [
"torch.dot",
"gym.make",
"numpy.random.randn",
"torch.sqrt",
"torch.autograd.Variable",
"torch.distributions.normal.Normal",
"models.ActorNetwork",
"torch.exp",
"torch.Tensor",
"torch.cuda.is_available",
"numpy.arange",
"numpy.random.choice",
"models.CriticNetwork",
"torch.no_grad",
"tor... | [((7754, 7778), 'numpy.random.randn', 'np.random.randn', (['horizon'], {}), '(horizon)\n', (7769, 7778), True, 'import numpy as np\n'), ((664, 682), 'gym.make', 'gym.make', (['scenario'], {}), '(scenario)\n', (672, 682), False, 'import gym\n'), ((1190, 1276), 'models.ActorNetwork', 'ActorNetwork', (['self.env.observation_space.shape[0]', 'self.env.action_space.shape[0]'], {}), '(self.env.observation_space.shape[0], self.env.action_space.\n shape[0])\n', (1202, 1276), False, 'from models import ActorNetwork, CriticNetwork\n'), ((1294, 1380), 'models.ActorNetwork', 'ActorNetwork', (['self.env.observation_space.shape[0]', 'self.env.action_space.shape[0]'], {}), '(self.env.observation_space.shape[0], self.env.action_space.\n shape[0])\n', (1306, 1380), False, 'from models import ActorNetwork, CriticNetwork\n'), ((1459, 1509), 'models.CriticNetwork', 'CriticNetwork', (['self.env.observation_space.shape[0]'], {}), '(self.env.observation_space.shape[0])\n', (1472, 1509), False, 'from models import ActorNetwork, CriticNetwork\n'), ((1643, 1666), 'torch.from_numpy', 'torch.from_numpy', (['state'], {}), '(state)\n', (1659, 1666), False, 'import torch\n'), ((3246, 3263), 'torch.distributions.normal.Normal', 'Normal', (['mean', 'std'], {}), '(mean, std)\n', (3252, 3263), False, 'from torch.distributions.normal import Normal\n'), ((3362, 3379), 'torch.distributions.normal.Normal', 'Normal', (['mean', 'std'], {}), '(mean, std)\n', (3368, 3379), False, 'from torch.distributions.normal import Normal\n'), ((4126, 4164), 'torch.sqrt', 'torch.sqrt', (['(non_scale_kl / self.max_kl)'], {}), '(non_scale_kl / self.max_kl)\n', (4136, 4164), False, 'import torch\n'), ((6835, 6850), 'torch.dot', 'torch.dot', (['r', 'r'], {}), '(r, r)\n', (6844, 6850), False, 'import torch\n'), ((1680, 1695), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1693, 1695), False, 'import torch\n'), ((1760, 1777), 'torch.distributions.normal.Normal', 'Normal', (['mean', 'std'], {}), '(mean, std)\n', (1766, 1777), False, 'from torch.distributions.normal import Normal\n'), ((2139, 2154), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2152, 2154), False, 'import torch\n'), ((3075, 3090), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3088, 3090), False, 'import torch\n'), ((7070, 7085), 'torch.dot', 'torch.dot', (['r', 'r'], {}), '(r, r)\n', (7079, 7085), False, 'import torch\n'), ((7628, 7661), 'numpy.random.randn', 'np.random.randn', (['horizon', 'obs_dim'], {}), '(horizon, obs_dim)\n', (7643, 7661), True, 'import numpy as np\n'), ((7691, 7724), 'numpy.random.randn', 'np.random.randn', (['horizon', 'act_dim'], {}), '(horizon, act_dim)\n', (7706, 7724), True, 'import numpy as np\n'), ((1133, 1158), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1156, 1158), False, 'import torch\n'), ((1968, 1992), 'torch.from_numpy', 'torch.from_numpy', (['states'], {}), '(states)\n', (1984, 1992), False, 'import torch\n'), ((2025, 2050), 'torch.from_numpy', 'torch.from_numpy', (['actions'], {}), '(actions)\n', (2041, 2050), False, 'import torch\n'), ((2083, 2108), 'torch.from_numpy', 'torch.from_numpy', (['rewards'], {}), '(rewards)\n', (2099, 2108), False, 'import torch\n'), ((2248, 2272), 'torch.Tensor', 'torch.Tensor', (['batch_size'], {}), '(batch_size)\n', (2260, 2272), False, 'import torch\n'), ((2304, 2328), 'torch.Tensor', 'torch.Tensor', (['batch_size'], {}), '(batch_size)\n', (2316, 2328), False, 'import torch\n'), ((2363, 2387), 'torch.Tensor', 'torch.Tensor', (['batch_size'], {}), '(batch_size)\n', (2375, 2387), False, 'import torch\n'), ((3503, 3537), 'torch.exp', 'torch.exp', (['(log_prob - old_log_prob)'], {}), '(log_prob - old_log_prob)\n', (3512, 3537), False, 'import torch\n'), ((4486, 4520), 'numpy.arange', 'np.arange', (['self.cgrad_update_steps'], {}), '(self.cgrad_update_steps)\n', (4495, 4520), True, 'import numpy as np\n'), ((4651, 4666), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4664, 4666), False, 'import torch\n'), ((4754, 4779), 'torch.distributions.normal.Normal', 'Normal', (['new_mean', 'new_std'], {}), '(new_mean, new_std)\n', (4760, 4779), False, 'from torch.distributions.normal import Normal\n'), ((5384, 5410), 'numpy.arange', 'np.arange', (['states.shape[0]'], {}), '(states.shape[0])\n', (5393, 5410), True, 'import numpy as np\n'), ((5456, 5525), 'numpy.random.choice', 'np.random.choice', (['states.shape[0]'], {'size': 'self.batch_size', 'replace': '(True)'}), '(states.shape[0], size=self.batch_size, replace=True)\n', (5472, 5525), True, 'import numpy as np\n'), ((6971, 6992), 'torch.dot', 'torch.dot', (['p', 'fmatrix'], {}), '(p, fmatrix)\n', (6980, 6992), False, 'import torch\n'), ((6207, 6233), 'torch.autograd.Variable', 'torch.autograd.Variable', (['v'], {}), '(v)\n', (6230, 6233), False, 'import torch\n'), ((6554, 6578), 'torch.log', 'torch.log', (['(std / old_std)'], {}), '(std / old_std)\n', (6563, 6578), False, 'import torch\n'), ((4895, 4933), 'torch.exp', 'torch.exp', (['(new_log_prob - old_log_prob)'], {}), '(new_log_prob - old_log_prob)\n', (4904, 4933), False, 'import torch\n')] |
import sep
import numpy as np
from astropy.io import fits
from scipy.stats import iqr
from sfft.utils.pyAstroMatic.PYSEx import PY_SEx
__author__ = "<NAME> <<EMAIL>>"
__version__ = "v1.0"
class SEx_SkySubtract:
@staticmethod
def SSS(FITS_obj, FITS_skysub=None, GAIN_KEY='GAIN', SATUR_KEY='SATURATE', ESATUR_KEY='ESATUR', \
BACK_SIZE=64, BACK_FILTERSIZE=3, DETECT_THRESH=1.5, DETECT_MINAREA=5, DETECT_MAXAREA=0):
# * Generate SExtractor OBJECT-MASK
PL = ['X_IMAGE', 'Y_IMAGE', 'FLUX_AUTO', 'FLUXERR_AUTO', 'MAG_AUTO', 'MAGERR_AUTO']
Mask_DET = PY_SEx.PS(FITS_obj=FITS_obj, PL=PL, GAIN_KEY=GAIN_KEY, SATUR_KEY=SATUR_KEY, \
BACK_TYPE='AUTO', BACK_SIZE=BACK_SIZE, BACK_FILTERSIZE=BACK_FILTERSIZE, \
DETECT_THRESH=DETECT_THRESH, DETECT_MINAREA=DETECT_MINAREA, DETECT_MAXAREA=DETECT_MAXAREA, \
BACKPHOTO_TYPE='GLOBAL', CHECKIMAGE_TYPE='OBJECTS')[1][0].astype(bool)
# * Extract SExtractor SKY-MAP from the Unmasked Image
# NOTE: here we use faster sep package other than SExtractor.
PixA_obj = fits.getdata(FITS_obj, ext=0).T
_PixA = PixA_obj.astype(np.float64, copy=True) # default copy=True, just to emphasize
_PixA[Mask_DET] = np.nan
if not _PixA.flags['C_CONTIGUOUS']: _PixA = np.ascontiguousarray(_PixA)
PixA_sky = sep.Background(_PixA, bw=BACK_SIZE, bh=BACK_SIZE, \
fw=BACK_FILTERSIZE, fh=BACK_FILTERSIZE).back()
PixA_skysub = PixA_obj - PixA_sky
# * Make simple statistics for the SKY-MAP
Q1 = np.percentile(PixA_sky, 25)
Q3 = np.percentile(PixA_sky, 55)
IQR = iqr(PixA_sky)
SKYDIP = Q1 - 1.5*IQR # NOTE outlier rejected dip
SKYPEAK = Q3 + 1.5*IQR # NOTE outlier rejected peak
if FITS_skysub is not None:
with fits.open(FITS_obj) as hdl:
hdl[0].header['SKYDIP'] = (SKYDIP, 'MeLOn: IQR-MINIMUM of SEx-SKY-MAP')
hdl[0].header['SKYPEAK'] = (SKYPEAK, 'MeLOn: IQR-MAXIMUM of SEx-SKY-MAP')
ESATUR = float(hdl[0].header['SATURATE']) - SKYPEAK # NOTE a conservative value
hdl[0].header['ESATUR'] = (ESATUR, 'MeLOn: Effective SATURATE after SEx-SKY-SUB')
hdl[0].data[:, :] = PixA_skysub.T
hdl.writeto(FITS_skysub, overwrite=True)
return SKYDIP, SKYPEAK, PixA_sky, PixA_skysub
| [
"scipy.stats.iqr",
"astropy.io.fits.getdata",
"sfft.utils.pyAstroMatic.PYSEx.PY_SEx.PS",
"numpy.percentile",
"sep.Background",
"astropy.io.fits.open",
"numpy.ascontiguousarray"
] | [((1573, 1600), 'numpy.percentile', 'np.percentile', (['PixA_sky', '(25)'], {}), '(PixA_sky, 25)\n', (1586, 1600), True, 'import numpy as np\n'), ((1614, 1641), 'numpy.percentile', 'np.percentile', (['PixA_sky', '(55)'], {}), '(PixA_sky, 55)\n', (1627, 1641), True, 'import numpy as np\n'), ((1656, 1669), 'scipy.stats.iqr', 'iqr', (['PixA_sky'], {}), '(PixA_sky)\n', (1659, 1669), False, 'from scipy.stats import iqr\n'), ((1094, 1123), 'astropy.io.fits.getdata', 'fits.getdata', (['FITS_obj'], {'ext': '(0)'}), '(FITS_obj, ext=0)\n', (1106, 1123), False, 'from astropy.io import fits\n'), ((1308, 1335), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['_PixA'], {}), '(_PixA)\n', (1328, 1335), True, 'import numpy as np\n'), ((1355, 1449), 'sep.Background', 'sep.Background', (['_PixA'], {'bw': 'BACK_SIZE', 'bh': 'BACK_SIZE', 'fw': 'BACK_FILTERSIZE', 'fh': 'BACK_FILTERSIZE'}), '(_PixA, bw=BACK_SIZE, bh=BACK_SIZE, fw=BACK_FILTERSIZE, fh=\n BACK_FILTERSIZE)\n', (1369, 1449), False, 'import sep\n'), ((1847, 1866), 'astropy.io.fits.open', 'fits.open', (['FITS_obj'], {}), '(FITS_obj)\n', (1856, 1866), False, 'from astropy.io import fits\n'), ((586, 893), 'sfft.utils.pyAstroMatic.PYSEx.PY_SEx.PS', 'PY_SEx.PS', ([], {'FITS_obj': 'FITS_obj', 'PL': 'PL', 'GAIN_KEY': 'GAIN_KEY', 'SATUR_KEY': 'SATUR_KEY', 'BACK_TYPE': '"""AUTO"""', 'BACK_SIZE': 'BACK_SIZE', 'BACK_FILTERSIZE': 'BACK_FILTERSIZE', 'DETECT_THRESH': 'DETECT_THRESH', 'DETECT_MINAREA': 'DETECT_MINAREA', 'DETECT_MAXAREA': 'DETECT_MAXAREA', 'BACKPHOTO_TYPE': '"""GLOBAL"""', 'CHECKIMAGE_TYPE': '"""OBJECTS"""'}), "(FITS_obj=FITS_obj, PL=PL, GAIN_KEY=GAIN_KEY, SATUR_KEY=SATUR_KEY,\n BACK_TYPE='AUTO', BACK_SIZE=BACK_SIZE, BACK_FILTERSIZE=BACK_FILTERSIZE,\n DETECT_THRESH=DETECT_THRESH, DETECT_MINAREA=DETECT_MINAREA,\n DETECT_MAXAREA=DETECT_MAXAREA, BACKPHOTO_TYPE='GLOBAL', CHECKIMAGE_TYPE\n ='OBJECTS')\n", (595, 893), False, 'from sfft.utils.pyAstroMatic.PYSEx import PY_SEx\n')] |
"""Defines abstract base classes for classifiers and regressors."""
import abc
import numbers
import numpy as np
from .fit import Fittable
from ..utils import validate_samples
from ..utils import validate_int
class Predictor(Fittable, metaclass=abc.ABCMeta):
"""Abstract base class for both classifiers and regressors."""
@abc.abstractmethod
def predict(self, *args, **kwargs):
pass
class Classifier(Predictor, metaclass=abc.ABCMeta):
"""Abstract base class for classifiers.
Properties
----------
classes : numpy.ndarray
List of distinct class labels. These will usually be determined during
model fitting.
"""
classes: np.ndarray = None
def _preprocess_classes(self, y, max_classes):
"""Extract distinct classes from a response variable vector.
This also converts the response variable vector to numeric indices
pointing to the corresponding class in the `classes` attribute.
Parameters
----------
y : array-like
Categorical response variable vector.
Returns
-------
indices : numpy.ndarray
Indices pointing to the class of each item in `y`.
"""
# Validate `max_classes`
if max_classes is not None:
max_classes = validate_int(max_classes, "max_classes", minimum=2)
# Extract unique classes, convert response vector to indices.
self.classes, indices = np.unique(y, return_inverse=True)
# Doing classification with 1 (or 0?) classes is useless
if len(self.classes) < 2:
raise ValueError(
"Response vector must contain at least two distinct classes.")
# Make sure we don't have too many classes
if max_classes is not None:
if len(self.classes) > max_classes:
raise ValueError(
"Response vector contains too many distinct classes.")
return indices
@abc.abstractmethod
def predict_prob(self, *args, **kwargs):
"""Return estimated probability that the response corresponding to a
set of features belongs to each possible class.
This method should return a matrix of shape (n_observations, n_classes).
"""
raise NotImplementedError()
def predict(self, *args, **kwargs):
"""Return the estimated class label for each input."""
p = self.predict_prob(*args, **kwargs)
return self.classes[np.argmax(p, axis=1)]
def mcr(self, x, y, *args, **kwargs):
"""Compute the misclassification rate of the model for given values of
the explanatory and response variables.
Parameters
----------
x : array-like, shape (n, p)
Explanatory variables.
y : array-like, shape (n,)
Response variable.
args : sequence, optional
Positional arguments to pass to this regressor's predict() method.
kwargs : dict, optional
Keyword arguments to pass to this regressor's predict() method.
Returns
-------
mcr : float
The misclassification rate.
"""
# Validate input
x, y = validate_samples(x, y, n_dim=(None, 1), equal_lengths=True)
return np.mean(y != self.predict(x, *args, **kwargs))
class Regressor(Predictor, metaclass=abc.ABCMeta):
"""Abstract base class for regressors."""
def mse(self, x, y, *args, **kwargs):
"""Compute the mean squared error of the model for given values of the
explanatory and response variables.
Parameters
----------
x : array-like, shape (n, p)
Explanatory variables.
y : array-like, shape (n,)
Response variable.
args : sequence, optional
Positional arguments to pass to this regressor's predict() method.
kwargs : dict, optional
Keyword arguments to pass to this regressor's predict() method.
Returns
-------
mse : float
The mean squared prediction error.
"""
# Validate input
x, y = validate_samples(x, y, n_dim=(None, 1), equal_lengths=True)
return np.mean((y - self.predict(x, *args, **kwargs)) ** 2)
def mae(self, x, y, *args, **kwargs):
"""Compute the mean absolute error of the model for given values of the
explanatory and response variables.
Parameters
----------
x : array-like, shape (n, p)
Explanatory variables.
y : array-like, shape (n,)
Response variable.
args : sequence, optional
Positional arguments to pass to this regressor's predict() method.
kwargs : dict, optional
Keyword arguments to pass to this regressor's predict() method.
Returns
-------
mae : float
The mean absolute prediction error.
"""
# Validate input
x, y = validate_samples(x, y, n_dim=(None, 1), equal_lengths=True)
return np.mean(np.abs(y - self.predict(x, *args, **kwargs)))
| [
"numpy.unique",
"numpy.argmax"
] | [((1479, 1512), 'numpy.unique', 'np.unique', (['y'], {'return_inverse': '(True)'}), '(y, return_inverse=True)\n', (1488, 1512), True, 'import numpy as np\n'), ((2503, 2523), 'numpy.argmax', 'np.argmax', (['p'], {'axis': '(1)'}), '(p, axis=1)\n', (2512, 2523), True, 'import numpy as np\n')] |
import os.path as osp
import re
import cv2
import torch
import numpy as np
from PIL import Image
from scipy import interpolate
from torch import from_numpy
# the header of writeFlow()
TAG_CHAR = np.array([202021.25], np.float32)
def make_colorwheel():
"""
Generates a color wheel for optical flow visualization as presented in:
Baker et al. "A Database and Evaluation Methodology
for Optical Flow" (ICCV, 2007)
URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
Code follows the original C++ source code of <NAME>.
Code follows the the Matlab source code of Deqing Sun.
Returns:
np.ndarray: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros((ncols, 3))
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.floor(255 * np.arange(0, RY) / RY)
col = col + RY
# YG
colorwheel[col:col + YG, 0] = 255 - np.floor(255 * np.arange(0, YG) / YG)
colorwheel[col:col + YG, 1] = 255
col = col + YG
# GC
colorwheel[col:col + GC, 1] = 255
colorwheel[col:col + GC, 2] = np.floor(255 * np.arange(0, GC) / GC)
col = col + GC
# CB
colorwheel[col:col + CB, 1] = 255 - np.floor(255 * np.arange(CB) / CB)
colorwheel[col:col + CB, 2] = 255
col = col + CB
# BM
colorwheel[col:col + BM, 2] = 255
colorwheel[col:col + BM, 0] = np.floor(255 * np.arange(0, BM) / BM)
col = col + BM
# MR
colorwheel[col:col + MR, 2] = 255 - np.floor(255 * np.arange(MR) / MR)
colorwheel[col:col + MR, 0] = 255
return colorwheel
def flow_uv_to_colors(u, v, convert_to_bgr=False):
"""Applies the flow color wheel to (possibly clipped) flow components u
andv.
According to the C++ source code of <NAME>
According to the Matlab source code of Deqing Sun
Args:
u (np.ndarray): Input horizontal flow of shape [H,W]
v (np.ndarray): Input vertical flow of shape [H,W]
convert_to_bgr (bool, optional): Convert output image
to BGR. Defaults to False.
Returns:
np.ndarray: Flow visualization image of shape [H,W,3]
"""
flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
colorwheel = make_colorwheel() # shape [55x3]
ncols = colorwheel.shape[0]
rad = np.sqrt(np.square(u) + np.square(v))
a = np.arctan2(-v, -u) / np.pi
fk = (a + 1) / 2 * (ncols - 1)
k0 = np.floor(fk).astype(np.int32)
k1 = k0 + 1
k1[k1 == ncols] = 0
f = fk - k0
for i in range(colorwheel.shape[1]):
tmp = colorwheel[:, i]
col0 = tmp[k0] / 255.0
col1 = tmp[k1] / 255.0
col = (1 - f) * col0 + f * col1
idx = (rad <= 1)
col[idx] = 1 - rad[idx] * (1 - col[idx])
col[~idx] = col[~idx] * 0.75 # out of range
# Note the 2-i => BGR instead of RGB
ch_idx = 2 - i if convert_to_bgr else i
flow_image[:, :, ch_idx] = np.floor(255 * col)
return flow_image
def better_flow_to_image(flow_uv,
alpha=0.5,
max_flow=724,
clip_flow=None,
convert_to_bgr=False):
"""Used for visualize extremely large-distance flow"""
assert flow_uv.ndim == 3, 'input flow must have three dimensions'
assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
if clip_flow is not None:
flow_uv = np.clip(flow_uv, 0, clip_flow)
u = flow_uv[:, :, 0]
v = flow_uv[:, :, 1]
rad = np.sqrt(np.square(u) + np.square(v))
rad_max = max_flow
param_with_alpha = np.power(rad / max_flow, alpha)
epsilon = 1e-5
u = param_with_alpha * u / (rad_max + epsilon)
v = param_with_alpha * v / (rad_max + epsilon)
return flow_uv_to_colors(u, v, convert_to_bgr)
def forward_interpolate(flow):
"""Interpolate flow for warm start, from RAFT."""
flow = flow.detach().cpu().numpy()
dx, dy = flow[0], flow[1]
ht, wd = dx.shape
x0, y0 = np.meshgrid(np.arange(wd), np.arange(ht))
x1 = x0 + dx
y1 = y0 + dy
x1 = x1.reshape(-1)
y1 = y1.reshape(-1)
dx = dx.reshape(-1)
dy = dy.reshape(-1)
valid = (x1 > 0) & (x1 < wd) & (y1 > 0) & (y1 < ht)
x1 = x1[valid]
y1 = y1[valid]
dx = dx[valid]
dy = dy[valid]
flow_x = interpolate.griddata((x1, y1),
dx, (x0, y0),
method='nearest',
fill_value=0)
flow_y = interpolate.griddata((x1, y1),
dy, (x0, y0),
method='nearest',
fill_value=0)
flow = np.stack([flow_x, flow_y], axis=0)
return from_numpy(flow).float()
def readFlow(fn):
"""Read .flo file in Middlebury format."""
# Code adapted from:
# http://stackoverflow.com/questions/28013200/reading-middlebury- \
# flow-files-with-python-bytes-array-numpy
# WARNING: this will work on little-endian architectures
# (eg Intel x86) only!
# print 'fn = %s'%(fn)
with open(fn, 'rb') as f:
magic = np.fromfile(f, np.float32, count=1)
if 202021.25 != magic:
print('Magic number incorrect. Invalid .flo file')
return None
else:
w = np.fromfile(f, np.int32, count=1)
h = np.fromfile(f, np.int32, count=1)
# print 'Reading %d x %d flo file\n' % (w, h)
data = np.fromfile(f, np.float32, count=2 * int(w) * int(h))
# Reshape data into 3D array (columns, rows, bands)
# The reshape here is for visualization, the original code
# is (w,h,2)
return np.resize(data, (int(h), int(w), 2))
def writeFlow(filename, uv, v=None):
"""Write optical flow to file, from RAFT.
If v is None, uv is assumed to contain both u and v channels, stacked in
depth. Original code by <NAME>, adapted from <NAME>.
"""
nBands = 2
if v is None:
assert (uv.ndim == 3)
assert (uv.shape[2] == 2)
u = uv[:, :, 0]
v = uv[:, :, 1]
else:
u = uv
assert (u.shape == v.shape)
height, width = u.shape
f = open(filename, 'wb')
# write the header
f.write(TAG_CHAR)
np.array(width).astype(np.int32).tofile(f)
np.array(height).astype(np.int32).tofile(f)
# arrange into matrix form
tmp = np.zeros((height, width * nBands))
tmp[:, np.arange(width) * 2] = u
tmp[:, np.arange(width) * 2 + 1] = v
tmp.astype(np.float32).tofile(f)
f.close()
def readPFM(file):
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if header == b'PF':
color = True
elif header == b'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(rb'^(\d+)\s(\d+)\s$', file.readline())
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data
def read_gen(file_name, pil=False):
ext = osp.splitext(file_name)[-1]
if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg':
return Image.open(file_name)
elif ext == '.bin' or ext == '.raw':
return np.load(file_name)
elif ext == '.flo':
return readFlow(file_name).astype(np.float32)
elif ext == '.pfm':
flow = readPFM(file_name).astype(np.float32)
if len(flow.shape) == 2:
return flow
else:
return flow[:, :, :-1]
return []
def readFlowKITTI(filename):
flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_COLOR)
flow = flow[:, :, ::-1].astype(np.float32)
flow, valid = flow[:, :, :2], flow[:, :, 2]
flow = (flow - 2**15) / 64.0
return flow, valid
def writeFlowKITTI(filename, uv):
uv = 64.0 * uv + 2**15
valid = np.ones([uv.shape[0], uv.shape[1], 1])
uv = np.concatenate([uv, valid], axis=-1).astype(np.uint16)
cv2.imwrite(filename, uv[..., ::-1])
def convert_360_gt(flow_gt):
'''Convert gt to 360 flow'''
flow_gt = flow_gt.unsqueeze(dim=0)
flow_gt[:, 0] = torch.where(flow_gt[:, 0] > (flow_gt.shape[3] // 2),
flow_gt[:, 0] - flow_gt.shape[3],
flow_gt[:, 0])
flow_gt[:, 0] = torch.where(flow_gt[:, 0] < -(flow_gt.shape[3] // 2),
flow_gt.shape[3] + flow_gt[:, 0],
flow_gt[:, 0])
return flow_gt.squeeze()
| [
"numpy.load",
"numpy.arctan2",
"numpy.floor",
"numpy.ones",
"numpy.clip",
"numpy.arange",
"numpy.power",
"cv2.imwrite",
"numpy.reshape",
"numpy.stack",
"scipy.interpolate.griddata",
"torch.where",
"numpy.square",
"numpy.flipud",
"numpy.concatenate",
"torch.from_numpy",
"numpy.fromfil... | [((196, 229), 'numpy.array', 'np.array', (['[202021.25]', 'np.float32'], {}), '([202021.25], np.float32)\n', (204, 229), True, 'import numpy as np\n'), ((804, 824), 'numpy.zeros', 'np.zeros', (['(ncols, 3)'], {}), '((ncols, 3))\n', (812, 824), True, 'import numpy as np\n'), ((2235, 2282), 'numpy.zeros', 'np.zeros', (['(u.shape[0], u.shape[1], 3)', 'np.uint8'], {}), '((u.shape[0], u.shape[1], 3), np.uint8)\n', (2243, 2282), True, 'import numpy as np\n'), ((3671, 3702), 'numpy.power', 'np.power', (['(rad / max_flow)', 'alpha'], {}), '(rad / max_flow, alpha)\n', (3679, 3702), True, 'import numpy as np\n'), ((4388, 4464), 'scipy.interpolate.griddata', 'interpolate.griddata', (['(x1, y1)', 'dx', '(x0, y0)'], {'method': '"""nearest"""', 'fill_value': '(0)'}), "((x1, y1), dx, (x0, y0), method='nearest', fill_value=0)\n", (4408, 4464), False, 'from scipy import interpolate\n'), ((4581, 4657), 'scipy.interpolate.griddata', 'interpolate.griddata', (['(x1, y1)', 'dy', '(x0, y0)'], {'method': '"""nearest"""', 'fill_value': '(0)'}), "((x1, y1), dy, (x0, y0), method='nearest', fill_value=0)\n", (4601, 4657), False, 'from scipy import interpolate\n'), ((4772, 4806), 'numpy.stack', 'np.stack', (['[flow_x, flow_y]'], {'axis': '(0)'}), '([flow_x, flow_y], axis=0)\n', (4780, 4806), True, 'import numpy as np\n'), ((6501, 6535), 'numpy.zeros', 'np.zeros', (['(height, width * nBands)'], {}), '((height, width * nBands))\n', (6509, 6535), True, 'import numpy as np\n'), ((7363, 7394), 'numpy.fromfile', 'np.fromfile', (['file', "(endian + 'f')"], {}), "(file, endian + 'f')\n", (7374, 7394), True, 'import numpy as np\n'), ((7468, 7491), 'numpy.reshape', 'np.reshape', (['data', 'shape'], {}), '(data, shape)\n', (7478, 7491), True, 'import numpy as np\n'), ((7503, 7518), 'numpy.flipud', 'np.flipud', (['data'], {}), '(data)\n', (7512, 7518), True, 'import numpy as np\n'), ((8114, 8174), 'cv2.imread', 'cv2.imread', (['filename', '(cv2.IMREAD_ANYDEPTH | cv2.IMREAD_COLOR)'], {}), '(filename, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_COLOR)\n', (8124, 8174), False, 'import cv2\n'), ((8401, 8439), 'numpy.ones', 'np.ones', (['[uv.shape[0], uv.shape[1], 1]'], {}), '([uv.shape[0], uv.shape[1], 1])\n', (8408, 8439), True, 'import numpy as np\n'), ((8508, 8544), 'cv2.imwrite', 'cv2.imwrite', (['filename', 'uv[..., ::-1]'], {}), '(filename, uv[..., ::-1])\n', (8519, 8544), False, 'import cv2\n'), ((8668, 8772), 'torch.where', 'torch.where', (['(flow_gt[:, 0] > flow_gt.shape[3] // 2)', '(flow_gt[:, 0] - flow_gt.shape[3])', 'flow_gt[:, 0]'], {}), '(flow_gt[:, 0] > flow_gt.shape[3] // 2, flow_gt[:, 0] - flow_gt.\n shape[3], flow_gt[:, 0])\n', (8679, 8772), False, 'import torch\n'), ((8854, 8960), 'torch.where', 'torch.where', (['(flow_gt[:, 0] < -(flow_gt.shape[3] // 2))', '(flow_gt.shape[3] + flow_gt[:, 0])', 'flow_gt[:, 0]'], {}), '(flow_gt[:, 0] < -(flow_gt.shape[3] // 2), flow_gt.shape[3] +\n flow_gt[:, 0], flow_gt[:, 0])\n', (8865, 8960), False, 'import torch\n'), ((2421, 2439), 'numpy.arctan2', 'np.arctan2', (['(-v)', '(-u)'], {}), '(-v, -u)\n', (2431, 2439), True, 'import numpy as np\n'), ((3007, 3026), 'numpy.floor', 'np.floor', (['(255 * col)'], {}), '(255 * col)\n', (3015, 3026), True, 'import numpy as np\n'), ((3497, 3527), 'numpy.clip', 'np.clip', (['flow_uv', '(0)', 'clip_flow'], {}), '(flow_uv, 0, clip_flow)\n', (3504, 3527), True, 'import numpy as np\n'), ((4079, 4092), 'numpy.arange', 'np.arange', (['wd'], {}), '(wd)\n', (4088, 4092), True, 'import numpy as np\n'), ((4094, 4107), 'numpy.arange', 'np.arange', (['ht'], {}), '(ht)\n', (4103, 4107), True, 'import numpy as np\n'), ((5216, 5251), 'numpy.fromfile', 'np.fromfile', (['f', 'np.float32'], {'count': '(1)'}), '(f, np.float32, count=1)\n', (5227, 5251), True, 'import numpy as np\n'), ((7583, 7606), 'os.path.splitext', 'osp.splitext', (['file_name'], {}), '(file_name)\n', (7595, 7606), True, 'import os.path as osp\n'), ((7700, 7721), 'PIL.Image.open', 'Image.open', (['file_name'], {}), '(file_name)\n', (7710, 7721), False, 'from PIL import Image\n'), ((2384, 2396), 'numpy.square', 'np.square', (['u'], {}), '(u)\n', (2393, 2396), True, 'import numpy as np\n'), ((2399, 2411), 'numpy.square', 'np.square', (['v'], {}), '(v)\n', (2408, 2411), True, 'import numpy as np\n'), ((2492, 2504), 'numpy.floor', 'np.floor', (['fk'], {}), '(fk)\n', (2500, 2504), True, 'import numpy as np\n'), ((3596, 3608), 'numpy.square', 'np.square', (['u'], {}), '(u)\n', (3605, 3608), True, 'import numpy as np\n'), ((3611, 3623), 'numpy.square', 'np.square', (['v'], {}), '(v)\n', (3620, 3623), True, 'import numpy as np\n'), ((4818, 4834), 'torch.from_numpy', 'from_numpy', (['flow'], {}), '(flow)\n', (4828, 4834), False, 'from torch import from_numpy\n'), ((5400, 5433), 'numpy.fromfile', 'np.fromfile', (['f', 'np.int32'], {'count': '(1)'}), '(f, np.int32, count=1)\n', (5411, 5433), True, 'import numpy as np\n'), ((5450, 5483), 'numpy.fromfile', 'np.fromfile', (['f', 'np.int32'], {'count': '(1)'}), '(f, np.int32, count=1)\n', (5461, 5483), True, 'import numpy as np\n'), ((7778, 7796), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (7785, 7796), True, 'import numpy as np\n'), ((8449, 8485), 'numpy.concatenate', 'np.concatenate', (['[uv, valid]'], {'axis': '(-1)'}), '([uv, valid], axis=-1)\n', (8463, 8485), True, 'import numpy as np\n'), ((918, 934), 'numpy.arange', 'np.arange', (['(0)', 'RY'], {}), '(0, RY)\n', (927, 934), True, 'import numpy as np\n'), ((1200, 1216), 'numpy.arange', 'np.arange', (['(0)', 'GC'], {}), '(0, GC)\n', (1209, 1216), True, 'import numpy as np\n'), ((1479, 1495), 'numpy.arange', 'np.arange', (['(0)', 'BM'], {}), '(0, BM)\n', (1488, 1495), True, 'import numpy as np\n'), ((6547, 6563), 'numpy.arange', 'np.arange', (['width'], {}), '(width)\n', (6556, 6563), True, 'import numpy as np\n'), ((1024, 1040), 'numpy.arange', 'np.arange', (['(0)', 'YG'], {}), '(0, YG)\n', (1033, 1040), True, 'import numpy as np\n'), ((1306, 1319), 'numpy.arange', 'np.arange', (['CB'], {}), '(CB)\n', (1315, 1319), True, 'import numpy as np\n'), ((1585, 1598), 'numpy.arange', 'np.arange', (['MR'], {}), '(MR)\n', (1594, 1598), True, 'import numpy as np\n'), ((6369, 6384), 'numpy.array', 'np.array', (['width'], {}), '(width)\n', (6377, 6384), True, 'import numpy as np\n'), ((6416, 6432), 'numpy.array', 'np.array', (['height'], {}), '(height)\n', (6424, 6432), True, 'import numpy as np\n'), ((6584, 6600), 'numpy.arange', 'np.arange', (['width'], {}), '(width)\n', (6593, 6600), True, 'import numpy as np\n')] |
from pyrfuniverse.envs import RFUniverseGymWrapper
import numpy as np
from gym import spaces
from gym.utils import seeding
class BalanceBallEnv(RFUniverseGymWrapper):
metadata = {'render.modes': ['human']}
def __init__(self, executable_file=None):
super().__init__(
executable_file,
rigidbody_channel_id='0a121c78-63c6-4f4f-8c71-186a6951d928',
game_object_channel_id='28c03571-e7fa-494a-9bf7-bfa15ee729ab',
)
self.action_space = spaces.Box(low=-2.0, high=2.0, shape=(2,), dtype=np.float32)
self.observation_space = spaces.Box(
low=np.array([-1, -1, -3, -2, -3, -float('inf'), -float('inf'), -float('inf')]),
high=np.array([1, 1, 3, 4, 3, float('inf'), float('inf'), float('inf')]),
dtype=np.float32
)
self.t = 0
self.r = 0
self.max_steps = 50
def step(self, a: np.ndarray):
"""
Params:
a: 2-d numpy array. The first dimension is for cube's x axis rotation, while the second dimension is for
cube's z axis rotation.
"""
self.game_object_channel.set_action(
'Rotate',
index=0,
rotation=[a[0], 0, a[1]]
)
self._step()
done = False
info = {}
cube_position = self.game_object_channel.data[0]['position']
sphere_position = self.rigidbody_channel.data[0]['position']
is_fail = self._check_fail(cube_position, sphere_position)
if is_fail:
self.r = -1
done = True
self.reset()
info['done'] = True
info['is_success'] = False
elif self.t == self.max_steps:
self.r = 0.1
done = True
self.reset()
info['done'] = True
info['is_success'] = True
else:
self.r = 0.1
self.t += 1
info['done'] = False
return self._get_obs(), self.r, done, info
def _get_obs(self):
cube_quaternion = self.game_object_channel.data[0]['quaternion']
cube_position = self.game_object_channel.data[0]['position']
sphere_position = self.rigidbody_channel.data[0]['position']
sphere_velocify = self.rigidbody_channel.data[0]['velocity']
rotation = np.array([cube_quaternion[0], cube_quaternion[2]])
relative_position = np.array(sphere_position) - np.array(cube_position)
sphere_velocify = np.array(sphere_velocify)
return np.concatenate((rotation, relative_position, sphere_velocify))
def reset(self):
self.t = 0
cubeRotationX = self._generate_random_float(-10, 10)
cubeRotationZ = self._generate_random_float(-10, 10)
spherePositionX = self._generate_random_float(-1.5, 1.5)
spherePositionZ = self._generate_random_float(-1.5, 1.5)
self.env_param_channel.set_float_parameter('cubeRotationX', cubeRotationX)
self.env_param_channel.set_float_parameter('cubeRotationZ', cubeRotationZ)
self.env_param_channel.set_float_parameter('spherePositionX', spherePositionX)
self.env_param_channel.set_float_parameter('spherePositionZ', spherePositionZ)
self.env.reset()
return self._get_obs()
def seed(self, seed=1234):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def render(self, mode='human'):
self._step()
def _generate_random_float(self, min: float, max: float) -> float:
assert min < max, \
'Min value is {}, while max value is {}.'.format(min, max)
random_float = np.random.rand()
random_float = random_float * (max - min) + min
return random_float
def _check_fail(self, cube_position, sphere_position):
if sphere_position[1] - cube_position[1] < -2:
return True
if abs(sphere_position[0] - cube_position[0]) > 3:
return True
if abs(sphere_position[2] - cube_position[2]) > 3:
return True
return False
class BalanceBallEnvV0(BalanceBallEnv):
def __init__(self):
super().__init__('/home/haoyuan/workspace/rfuniverse/build/BalanceBall/RFUniverse.x86_64')
| [
"numpy.array",
"gym.spaces.Box",
"numpy.random.rand",
"numpy.concatenate",
"gym.utils.seeding.np_random"
] | [((500, 560), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(-2.0)', 'high': '(2.0)', 'shape': '(2,)', 'dtype': 'np.float32'}), '(low=-2.0, high=2.0, shape=(2,), dtype=np.float32)\n', (510, 560), False, 'from gym import spaces\n'), ((2340, 2390), 'numpy.array', 'np.array', (['[cube_quaternion[0], cube_quaternion[2]]'], {}), '([cube_quaternion[0], cube_quaternion[2]])\n', (2348, 2390), True, 'import numpy as np\n'), ((2497, 2522), 'numpy.array', 'np.array', (['sphere_velocify'], {}), '(sphere_velocify)\n', (2505, 2522), True, 'import numpy as np\n'), ((2539, 2601), 'numpy.concatenate', 'np.concatenate', (['(rotation, relative_position, sphere_velocify)'], {}), '((rotation, relative_position, sphere_velocify))\n', (2553, 2601), True, 'import numpy as np\n'), ((3357, 3380), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (3374, 3380), False, 'from gym.utils import seeding\n'), ((3655, 3671), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3669, 3671), True, 'import numpy as np\n'), ((2419, 2444), 'numpy.array', 'np.array', (['sphere_position'], {}), '(sphere_position)\n', (2427, 2444), True, 'import numpy as np\n'), ((2447, 2470), 'numpy.array', 'np.array', (['cube_position'], {}), '(cube_position)\n', (2455, 2470), True, 'import numpy as np\n')] |
import os
from collections import Counter
import numpy as np
def flow_from_directory(data_generator, path, size, shuffle=True):
return data_generator.flow_from_directory(
path,
target_size=(size, size),
batch_size=32,
class_mode="categorical",
shuffle=shuffle,
seed=42)
def get_classes_to_sizes(path):
classes_to_sizes = {}
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith('.jpg'):
class_name = os.path.basename(root)
if not class_name in classes_to_sizes:
classes_to_sizes[class_name] = 0
classes_to_sizes[class_name] += 1
return classes_to_sizes
# Attribution: https://stackoverflow.com/a/42587192/137996
def get_class_weights(generator):
counter = Counter(generator.classes)
max_val = float(max(counter.values()))
return {class_id : max_val/num_images for class_id, num_images in counter.items()}
def get_y_true(generator, steps):
y_true = []
for i in range(steps):
X_batch, y_true_batch = next(generator)
y_true = y_true + np.argmax(y_true_batch, axis=1).tolist()
return y_true
def get_classes(generator):
return list(map(lambda a: a[0], sorted(generator.class_indices.items(), key=lambda a: a[1])))
def save_labels(labels, path):
with open(path, "w") as f:
for label in labels:
f.write(label + "\n")
| [
"collections.Counter",
"os.walk",
"numpy.argmax",
"os.path.basename"
] | [((369, 382), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (376, 382), False, 'import os\n'), ((795, 821), 'collections.Counter', 'Counter', (['generator.classes'], {}), '(generator.classes)\n', (802, 821), False, 'from collections import Counter\n'), ((478, 500), 'os.path.basename', 'os.path.basename', (['root'], {}), '(root)\n', (494, 500), False, 'import os\n'), ((1158, 1189), 'numpy.argmax', 'np.argmax', (['y_true_batch'], {'axis': '(1)'}), '(y_true_batch, axis=1)\n', (1167, 1189), True, 'import numpy as np\n')] |
'''
Copyright 2017 <NAME>, <NAME>, <NAME> and the Max Planck Gesellschaft. All rights reserved.
This software is provided for research purposes only.
By using this software you agree to the terms of the MANO/SMPL+H Model license here http://mano.is.tue.mpg.de/license
More information about MANO/SMPL+H is available at http://mano.is.tue.mpg.de.
For comments or questions, please email us at: <EMAIL>
About this file:
================
This file defines a wrapper for the loading functions of the MANO model.
Modules included:
- load_model:
loads the MANO model from a given file location (i.e. a .pkl file location),
or a dictionary object.
'''
import chumpy as ch
import numpy as np
import cv2
class Rodrigues(ch.Ch):
dterms = 'rt'
def compute_r(self):
return cv2.Rodrigues(self.rt.r)[0]
def compute_dr_wrt(self, wrt):
if wrt is self.rt:
return cv2.Rodrigues(self.rt.r)[1].T
def lrotmin(p):
if isinstance(p, np.ndarray):
p = p.ravel()[3:]
return np.concatenate(
[(cv2.Rodrigues(np.array(pp))[0] - np.eye(3)).ravel()
for pp in p.reshape((-1, 3))]).ravel()
if p.ndim != 2 or p.shape[1] != 3:
p = p.reshape((-1, 3))
p = p[1:]
return ch.concatenate([(Rodrigues(pp) - ch.eye(3)).ravel()
for pp in p]).ravel()
def posemap(s):
if s == 'lrotmin':
return lrotmin
else:
raise Exception('Unknown posemapping: %s' % (str(s), ))
| [
"chumpy.eye",
"cv2.Rodrigues",
"numpy.array",
"numpy.eye"
] | [((792, 816), 'cv2.Rodrigues', 'cv2.Rodrigues', (['self.rt.r'], {}), '(self.rt.r)\n', (805, 816), False, 'import cv2\n'), ((902, 926), 'cv2.Rodrigues', 'cv2.Rodrigues', (['self.rt.r'], {}), '(self.rt.r)\n', (915, 926), False, 'import cv2\n'), ((1287, 1296), 'chumpy.eye', 'ch.eye', (['(3)'], {}), '(3)\n', (1293, 1296), True, 'import chumpy as ch\n'), ((1088, 1097), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1094, 1097), True, 'import numpy as np\n'), ((1069, 1081), 'numpy.array', 'np.array', (['pp'], {}), '(pp)\n', (1077, 1081), True, 'import numpy as np\n')] |
import numpy as np
import cv2
from skimage.feature import hog
from scipy.ndimage.measurements import label
# Define a function to return HOG features and visualization
def get_hog_features(img, orient, pix_per_cell, cell_per_block,
vis=False, feature_vec=True):
# Call with two outputs if vis==True
if vis == True:
features, hog_image = hog(img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
block_norm= 'L2-Hys',
cells_per_block=(cell_per_block, cell_per_block),
transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features, hog_image
# Otherwise call with one output
else:
features = hog(img, orientations=orient,
pixels_per_cell=(pix_per_cell, pix_per_cell),
cells_per_block=(cell_per_block, cell_per_block),
block_norm= 'L2-Hys',
transform_sqrt=True,
visualise=vis, feature_vector=feature_vec)
return features.astype(np.float32)
def bin_spatial(img, size=(32, 32)):
# Use cv2.resize().ravel() to create the feature vector
features = cv2.resize(img, size).ravel().astype(np.float32)
# Return the feature vector
return features
def color_hist(img, nbins=32, bins_range=(0, 256)):
# Compute the histogram of the color channels separately
channel1_hist = np.histogram(img[:,:,0], bins=nbins, range=bins_range)
channel2_hist = np.histogram(img[:,:,1], bins=nbins, range=bins_range)
channel3_hist = np.histogram(img[:,:,2], bins=nbins, range=bins_range)
bin_edges = channel1_hist[1]
bin_centers = (bin_edges[1:] + bin_edges[0:len(bin_edges)-1])/2
# Concatenate the histograms into a single feature vector
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
# Return the individual histograms, bin_centers and feature vector
return channel1_hist, channel2_hist, channel3_hist, bin_centers, hist_features.astype(np.float32)
def draw_boxes(img, bboxes, color=(0, 0, 255), thick=6):
# Make a copy of the image
imcopy = np.copy(img)
# Iterate through the bounding boxes
for bbox in bboxes:
# Draw a rectangle given bbox coordinates
cv2.rectangle(imcopy, bbox[0], bbox[1], color, thick)
# Return the image copy with boxes drawn
return imcopy
def add_heat(heatmap, bbox_list):
# Iterate through list of bboxes
for box in bbox_list:
# Add += 1 for all pixels inside each bbox
# Assuming each "box" takes the form ((x1, y1), (x2, y2))
heatmap[box[0][1]:box[1][1], box[0][0]:box[1][0]] += 1
# Return updated heatmap
return heatmap
def apply_threshold(heatmap, threshold):
# Zero out pixels below the threshold
heatmap[heatmap <= threshold] = 0
# Return thresholded map
return heatmap
def draw_labeled_bboxes(img, heatmap):
labels = label(heatmap)
# Iterate through all detected cars
for car_number in range(1, labels[1]+1):
# Find pixels with each car_number label value
nonzero = (labels[0] == car_number).nonzero()
# Identify x and y values of those pixels
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Define a bounding box based on min/max x and y
bbox = ((np.min(nonzerox), np.min(nonzeroy)), (np.max(nonzerox), np.max(nonzeroy)))
# Draw the box on the image
cv2.rectangle(img, bbox[0], bbox[1], (0,0,255), 6)
# Return the image
return img
| [
"cv2.resize",
"numpy.copy",
"scipy.ndimage.measurements.label",
"skimage.feature.hog",
"numpy.histogram",
"numpy.min",
"numpy.array",
"numpy.max",
"cv2.rectangle",
"numpy.concatenate"
] | [((1593, 1649), 'numpy.histogram', 'np.histogram', (['img[:, :, 0]'], {'bins': 'nbins', 'range': 'bins_range'}), '(img[:, :, 0], bins=nbins, range=bins_range)\n', (1605, 1649), True, 'import numpy as np\n'), ((1668, 1724), 'numpy.histogram', 'np.histogram', (['img[:, :, 1]'], {'bins': 'nbins', 'range': 'bins_range'}), '(img[:, :, 1], bins=nbins, range=bins_range)\n', (1680, 1724), True, 'import numpy as np\n'), ((1743, 1799), 'numpy.histogram', 'np.histogram', (['img[:, :, 2]'], {'bins': 'nbins', 'range': 'bins_range'}), '(img[:, :, 2], bins=nbins, range=bins_range)\n', (1755, 1799), True, 'import numpy as np\n'), ((1987, 2057), 'numpy.concatenate', 'np.concatenate', (['(channel1_hist[0], channel2_hist[0], channel3_hist[0])'], {}), '((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n', (2001, 2057), True, 'import numpy as np\n'), ((2334, 2346), 'numpy.copy', 'np.copy', (['img'], {}), '(img)\n', (2341, 2346), True, 'import numpy as np\n'), ((3137, 3151), 'scipy.ndimage.measurements.label', 'label', (['heatmap'], {}), '(heatmap)\n', (3142, 3151), False, 'from scipy.ndimage.measurements import label\n'), ((379, 597), 'skimage.feature.hog', 'hog', (['img'], {'orientations': 'orient', 'pixels_per_cell': '(pix_per_cell, pix_per_cell)', 'block_norm': '"""L2-Hys"""', 'cells_per_block': '(cell_per_block, cell_per_block)', 'transform_sqrt': '(True)', 'visualise': 'vis', 'feature_vector': 'feature_vec'}), "(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),\n block_norm='L2-Hys', cells_per_block=(cell_per_block, cell_per_block),\n transform_sqrt=True, visualise=vis, feature_vector=feature_vec)\n", (382, 597), False, 'from skimage.feature import hog\n'), ((871, 1089), 'skimage.feature.hog', 'hog', (['img'], {'orientations': 'orient', 'pixels_per_cell': '(pix_per_cell, pix_per_cell)', 'cells_per_block': '(cell_per_block, cell_per_block)', 'block_norm': '"""L2-Hys"""', 'transform_sqrt': '(True)', 'visualise': 'vis', 'feature_vector': 'feature_vec'}), "(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block), block_norm='L2-Hys',\n transform_sqrt=True, visualise=vis, feature_vector=feature_vec)\n", (874, 1089), False, 'from skimage.feature import hog\n'), ((2470, 2523), 'cv2.rectangle', 'cv2.rectangle', (['imcopy', 'bbox[0]', 'bbox[1]', 'color', 'thick'], {}), '(imcopy, bbox[0], bbox[1], color, thick)\n', (2483, 2523), False, 'import cv2\n'), ((3415, 3435), 'numpy.array', 'np.array', (['nonzero[0]'], {}), '(nonzero[0])\n', (3423, 3435), True, 'import numpy as np\n'), ((3455, 3475), 'numpy.array', 'np.array', (['nonzero[1]'], {}), '(nonzero[1])\n', (3463, 3475), True, 'import numpy as np\n'), ((3669, 3721), 'cv2.rectangle', 'cv2.rectangle', (['img', 'bbox[0]', 'bbox[1]', '(0, 0, 255)', '(6)'], {}), '(img, bbox[0], bbox[1], (0, 0, 255), 6)\n', (3682, 3721), False, 'import cv2\n'), ((3550, 3566), 'numpy.min', 'np.min', (['nonzerox'], {}), '(nonzerox)\n', (3556, 3566), True, 'import numpy as np\n'), ((3568, 3584), 'numpy.min', 'np.min', (['nonzeroy'], {}), '(nonzeroy)\n', (3574, 3584), True, 'import numpy as np\n'), ((3588, 3604), 'numpy.max', 'np.max', (['nonzerox'], {}), '(nonzerox)\n', (3594, 3604), True, 'import numpy as np\n'), ((3606, 3622), 'numpy.max', 'np.max', (['nonzeroy'], {}), '(nonzeroy)\n', (3612, 3622), True, 'import numpy as np\n'), ((1357, 1378), 'cv2.resize', 'cv2.resize', (['img', 'size'], {}), '(img, size)\n', (1367, 1378), False, 'import cv2\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Renders mesh using OpenDr / Pytorch-3d for visualization.
Part of code is modified from https://github.com/akanazawa/hmr
"""
import sys
import numpy as np
import cv2
import pdb
from PIL import Image, ImageDraw
from opendr.camera import ProjectPoints
from opendr.renderer import ColoredRenderer
from opendr.lighting import LambertianPointLight
class OpendrRenderer(object):
def __init__(self,
img_size=224,
mesh_color=np.array([0.5, 0.5, 0.5]),):
self.w = img_size
self.h = img_size
self.color = mesh_color
self.img_size = img_size
self.flength = 500.
def render(self, verts, faces, bg_img):
verts = verts.copy()
faces = faces.copy()
input_size = 500
f = 10
verts[:, 0] = (verts[:, 0] - input_size) / input_size
verts[:, 1] = (verts[:, 1] - input_size) / input_size
verts[:, 2] /= (5 * 112)
verts[:, 2] += f
cam_for_render = np.array([f, 1, 1]) * input_size
rend_img = self.__call__(
img=bg_img, cam=cam_for_render,
verts=verts, faces=faces, color=self.color)
return rend_img
def __call__(self,
verts,
faces,
cam=None,
img=None,
do_alpha=False,
far=None,
near=None,
color = np.array([0, 0, 255]),
img_size=None):
"""
cam is 3D [f, px, py]
"""
if img is not None:
h, w = img.shape[:2]
elif img_size is not None:
h = img_size[0]
w = img_size[1]
else:
h = self.h
w = self.w
if cam is None:
cam = [self.flength, w / 2., h / 2.]
use_cam = ProjectPoints(
f=cam[0] * np.ones(2),
rt=np.zeros(3),
t=np.zeros(3),
k=np.zeros(5),
c=cam[1:3])
if near is None:
near = np.maximum(np.min(verts[:, 2]) - 25, 0.1)
if far is None:
far = np.maximum(np.max(verts[:, 2]) + 25, 25)
return_value = render_model(
verts,
faces,
w,
h,
use_cam,
do_alpha=do_alpha,
img=img,
far=far,
near=near,
color=color)
imtmp = return_value
image = (imtmp * 255).astype('uint8')
return image
def _create_renderer(w=640,
h=480,
rt=np.zeros(3),
t=np.zeros(3),
f=None,
c=None,
k=None,
near=.5,
far=10.):
f = np.array([w, w]) / 2. if f is None else f
c = np.array([w, h]) / 2. if c is None else c
k = np.zeros(5) if k is None else k
rn = ColoredRenderer()
rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k)
rn.frustum = {'near': near, 'far': far, 'height': h, 'width': w}
return rn
def _rotateY(points, angle):
"""Rotate the points by a specified angle."""
ry = np.array([[np.cos(angle), 0., np.sin(angle)], [0., 1., 0.],
[-np.sin(angle), 0., np.cos(angle)]])
return np.dot(points, ry)
def simple_renderer(rn,
verts,
faces,
yrot=np.radians(70),
color=np.array([0, 0, 255])
):
# Rendered model color
rn.set(v=verts, f=faces, vc=color, bgcolor=np.ones(3))
albedo = rn.vc
# Construct Back Light (on back right corner)
rn.vc = LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([-200, -100, -100]), yrot),
vc=albedo,
light_color=np.array([1, 1, 1]))
# Construct Left Light
rn.vc += LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
# light_pos=_rotateY(np.array([800, 10, 300]), yrot),
light_pos=_rotateY(np.array([800, 10, 300]), yrot),
vc=albedo,
light_color=np.array([1, 1, 1]))
# Construct Right Light
rn.vc += LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([-500, 500, 1000]), yrot),
# light_pos=_rotateY(np.array([-500, 500, 1000]), yrot),
vc=albedo,
light_color=np.array([.7, .7, .7]))
return rn.r
def get_alpha(imtmp, bgval=1.):
h, w = imtmp.shape[:2]
alpha = (~np.all(imtmp == bgval, axis=2)).astype(imtmp.dtype)
b_channel, g_channel, r_channel = cv2.split(imtmp)
im_RGBA = cv2.merge((b_channel, g_channel, r_channel, alpha.astype(
imtmp.dtype)))
return im_RGBA
def append_alpha(imtmp):
alpha = np.ones_like(imtmp[:, :, 0]).astype(imtmp.dtype)
if np.issubdtype(imtmp.dtype, np.uint8):
alpha = alpha * 255
b_channel, g_channel, r_channel = cv2.split(imtmp)
im_RGBA = cv2.merge((b_channel, g_channel, r_channel, alpha))
return im_RGBA
def render_model(verts,
faces,
w,
h,
cam,
near=0.5,
far=25,
img=None,
do_alpha=False,
color=None):
rn = _create_renderer(
w=w, h=h, near=near, far=far, rt=cam.rt, t=cam.t, f=cam.f, c=cam.c)
# Uses img as background, otherwise white background.
if img is not None:
rn.background_image = img / 255. if img.max() > 1.1 else img
imtmp = simple_renderer(rn, verts, faces, color=color)
# If white bg, make transparent.
if img is None and do_alpha:
imtmp = get_alpha(imtmp)
elif img is not None and do_alpha:
imtmp = append_alpha(imtmp)
return imtmp | [
"numpy.radians",
"opendr.camera.ProjectPoints",
"numpy.ones_like",
"opendr.renderer.ColoredRenderer",
"numpy.zeros",
"numpy.ones",
"numpy.all",
"numpy.min",
"cv2.split",
"numpy.array",
"numpy.sin",
"numpy.cos",
"numpy.max",
"numpy.dot",
"cv2.merge",
"numpy.issubdtype"
] | [((2664, 2675), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2672, 2675), True, 'import numpy as np\n'), ((2700, 2711), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2708, 2711), True, 'import numpy as np\n'), ((3012, 3029), 'opendr.renderer.ColoredRenderer', 'ColoredRenderer', ([], {}), '()\n', (3027, 3029), False, 'from opendr.renderer import ColoredRenderer\n'), ((3047, 3087), 'opendr.camera.ProjectPoints', 'ProjectPoints', ([], {'rt': 'rt', 't': 't', 'f': 'f', 'c': 'c', 'k': 'k'}), '(rt=rt, t=t, f=f, c=c, k=k)\n', (3060, 3087), False, 'from opendr.camera import ProjectPoints\n'), ((3389, 3407), 'numpy.dot', 'np.dot', (['points', 'ry'], {}), '(points, ry)\n', (3395, 3407), True, 'import numpy as np\n'), ((3513, 3527), 'numpy.radians', 'np.radians', (['(70)'], {}), '(70)\n', (3523, 3527), True, 'import numpy as np\n'), ((3555, 3576), 'numpy.array', 'np.array', (['[0, 0, 255]'], {}), '([0, 0, 255])\n', (3563, 3576), True, 'import numpy as np\n'), ((4782, 4798), 'cv2.split', 'cv2.split', (['imtmp'], {}), '(imtmp)\n', (4791, 4798), False, 'import cv2\n'), ((5009, 5045), 'numpy.issubdtype', 'np.issubdtype', (['imtmp.dtype', 'np.uint8'], {}), '(imtmp.dtype, np.uint8)\n', (5022, 5045), True, 'import numpy as np\n'), ((5113, 5129), 'cv2.split', 'cv2.split', (['imtmp'], {}), '(imtmp)\n', (5122, 5129), False, 'import cv2\n'), ((5144, 5195), 'cv2.merge', 'cv2.merge', (['(b_channel, g_channel, r_channel, alpha)'], {}), '((b_channel, g_channel, r_channel, alpha))\n', (5153, 5195), False, 'import cv2\n'), ((514, 539), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (522, 539), True, 'import numpy as np\n'), ((1495, 1516), 'numpy.array', 'np.array', (['[0, 0, 255]'], {}), '([0, 0, 255])\n', (1503, 1516), True, 'import numpy as np\n'), ((2970, 2981), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (2978, 2981), True, 'import numpy as np\n'), ((1049, 1068), 'numpy.array', 'np.array', (['[f, 1, 1]'], {}), '([f, 1, 1])\n', (1057, 1068), True, 'import numpy as np\n'), ((2870, 2886), 'numpy.array', 'np.array', (['[w, w]'], {}), '([w, w])\n', (2878, 2886), True, 'import numpy as np\n'), ((2920, 2936), 'numpy.array', 'np.array', (['[w, h]'], {}), '([w, h])\n', (2928, 2936), True, 'import numpy as np\n'), ((3675, 3685), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (3682, 3685), True, 'import numpy as np\n'), ((3955, 3974), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (3963, 3974), True, 'import numpy as np\n'), ((4261, 4280), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (4269, 4280), True, 'import numpy as np\n'), ((4574, 4599), 'numpy.array', 'np.array', (['[0.7, 0.7, 0.7]'], {}), '([0.7, 0.7, 0.7])\n', (4582, 4599), True, 'import numpy as np\n'), ((4953, 4981), 'numpy.ones_like', 'np.ones_like', (['imtmp[:, :, 0]'], {}), '(imtmp[:, :, 0])\n', (4965, 4981), True, 'import numpy as np\n'), ((1975, 1986), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1983, 1986), True, 'import numpy as np\n'), ((2002, 2013), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2010, 2013), True, 'import numpy as np\n'), ((2029, 2040), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (2037, 2040), True, 'import numpy as np\n'), ((3272, 3285), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (3278, 3285), True, 'import numpy as np\n'), ((3291, 3304), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (3297, 3304), True, 'import numpy as np\n'), ((3361, 3374), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (3367, 3374), True, 'import numpy as np\n'), ((3879, 3907), 'numpy.array', 'np.array', (['[-200, -100, -100]'], {}), '([-200, -100, -100])\n', (3887, 3907), True, 'import numpy as np\n'), ((4189, 4213), 'numpy.array', 'np.array', (['[800, 10, 300]'], {}), '([800, 10, 300])\n', (4197, 4213), True, 'import numpy as np\n'), ((4434, 4461), 'numpy.array', 'np.array', (['[-500, 500, 1000]'], {}), '([-500, 500, 1000])\n', (4442, 4461), True, 'import numpy as np\n'), ((4691, 4721), 'numpy.all', 'np.all', (['(imtmp == bgval)'], {'axis': '(2)'}), '(imtmp == bgval, axis=2)\n', (4697, 4721), True, 'import numpy as np\n'), ((1948, 1958), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (1955, 1958), True, 'import numpy as np\n'), ((2122, 2141), 'numpy.min', 'np.min', (['verts[:, 2]'], {}), '(verts[:, 2])\n', (2128, 2141), True, 'import numpy as np\n'), ((2206, 2225), 'numpy.max', 'np.max', (['verts[:, 2]'], {}), '(verts[:, 2])\n', (2212, 2225), True, 'import numpy as np\n'), ((3342, 3355), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (3348, 3355), True, 'import numpy as np\n')] |
import pickle
import os.path
import numpy as np
from keras.layers.embeddings import Embedding
from keras.models import Sequential, load_model
from sklearn.metrics import f1_score, accuracy_score
from keras.layers import LSTM, Dense, TimeDistributed, Bidirectional, Dropout
import warnings
warnings.filterwarnings('ignore')
epochs = 10
def get_data():
train_set, valid_set, test_set, dicts = pickle.load(open("atis.pkl", 'rb'), encoding='latin1')
words2idx, labels2idx = dicts['words2idx'], dicts['labels2idx']
x_train, _, y_train = train_set
x_valid, _, y_valid = valid_set
x_test, _, y_test = test_set
# Create index to word/label dicts
idx2words = {words2idx[k]:k for k in words2idx}
idx2labels = {labels2idx[k]:k for k in labels2idx}
n_classes = len(idx2labels)
n_vocab = len(idx2words)
return x_train, y_train, x_valid, y_valid, x_test, y_test, n_classes, n_vocab, idx2words, idx2labels
def builg_model(n_vocab, n_classes):
model = Sequential()
model.add(Embedding(n_vocab, 100, input_length=None))
model.add(Dropout(0.25))
model.add(Bidirectional(LSTM(100, return_sequences=True)))
model.add(TimeDistributed(Dense(n_classes, activation='softmax')))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
return model
if __name__ == '__main__':
x_train, y_train, x_valid, y_valid, x_test, y_test, n_classes, n_vocab, idx2words, idx2labels = get_data()
if os.path.exists('SLU.h5'):
model = load_model('SLU.h5')
else:
model = builg_model(n_vocab, n_classes)
for iteration in range(1, epochs+1):
print('---------------- Iteration {} ----------------'.format(iteration))
for i in range(len(x_train)):
model.train_on_batch(x_train[i][np.newaxis,:], np.eye(n_classes)[y_train[i]][np.newaxis, :])
model.save('SLU.h5')
y_pred = []
for i in range(len(x_test)):
y_pred.append(np.argmax(model.predict_on_batch(x_test[i][np.newaxis,:]), -1)[0])
accuracy = np.mean([accuracy_score(y_test[i], y_pred[i]) for i in range(len(y_test))])
f1 = np.mean([f1_score(y_test[i], y_pred[i], average='weighted') for i in range(len(y_test))])
print('Test Accuracy: {} \nTest F1: {}'.format(accuracy, f1))
# show example
sample_indices = np.random.randint(0, len(x_test), size=10)
sample_texts = [x_test[i] for i in sample_indices]
sample_labels = [y_test[i] for i in sample_indices]
pred_labels = [np.argmax(model.predict(sample_texts[i][np.newaxis, :]), -1)[0] for i in range(len(sample_indices))]
for i in range(len(sample_indices)):
sentence = [idx2words[j] for j in sample_texts[i]]
real_label = [idx2labels[j] for j in sample_labels[i]]
pred_label = [idx2labels[j] for j in pred_labels[i]]
print('Sentence: {} \nReal: {} \nPredict: {} \n'.format(sentence, real_label, pred_label))
| [
"keras.models.load_model",
"keras.layers.embeddings.Embedding",
"numpy.eye",
"warnings.filterwarnings",
"keras.layers.Dropout",
"keras.layers.LSTM",
"sklearn.metrics.accuracy_score",
"sklearn.metrics.f1_score",
"keras.layers.Dense",
"keras.models.Sequential"
] | [((289, 322), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (312, 322), False, 'import warnings\n'), ((992, 1004), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1002, 1004), False, 'from keras.models import Sequential, load_model\n'), ((1019, 1061), 'keras.layers.embeddings.Embedding', 'Embedding', (['n_vocab', '(100)'], {'input_length': 'None'}), '(n_vocab, 100, input_length=None)\n', (1028, 1061), False, 'from keras.layers.embeddings import Embedding\n'), ((1077, 1090), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (1084, 1090), False, 'from keras.layers import LSTM, Dense, TimeDistributed, Bidirectional, Dropout\n'), ((1543, 1563), 'keras.models.load_model', 'load_model', (['"""SLU.h5"""'], {}), "('SLU.h5')\n", (1553, 1563), False, 'from keras.models import Sequential, load_model\n'), ((1120, 1152), 'keras.layers.LSTM', 'LSTM', (['(100)'], {'return_sequences': '(True)'}), '(100, return_sequences=True)\n', (1124, 1152), False, 'from keras.layers import LSTM, Dense, TimeDistributed, Bidirectional, Dropout\n'), ((1185, 1223), 'keras.layers.Dense', 'Dense', (['n_classes'], {'activation': '"""softmax"""'}), "(n_classes, activation='softmax')\n", (1190, 1223), False, 'from keras.layers import LSTM, Dense, TimeDistributed, Bidirectional, Dropout\n'), ((2098, 2134), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test[i]', 'y_pred[i]'], {}), '(y_test[i], y_pred[i])\n', (2112, 2134), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((2187, 2237), 'sklearn.metrics.f1_score', 'f1_score', (['y_test[i]', 'y_pred[i]'], {'average': '"""weighted"""'}), "(y_test[i], y_pred[i], average='weighted')\n", (2195, 2237), False, 'from sklearn.metrics import f1_score, accuracy_score\n'), ((1843, 1860), 'numpy.eye', 'np.eye', (['n_classes'], {}), '(n_classes)\n', (1849, 1860), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""
Implements some graphical transforms for images.
"""
import random
from typing import Tuple
import cv2
import numpy as np
def size_from_string(size: str) -> Tuple[int, int]:
msg = None
try:
new_size = tuple(map(int, size.strip().split(",")))
assert len(new_size) == 2
except Exception as e:
msg = "Expected H,W got {}. e.what(): {}".format(size, e)
if msg is not None:
raise ValueError(msg)
return new_size
def create_gradient(size=(120, 160), hor=True, ver=True, low=0.3):
assert isinstance(size, tuple)
assert len(size) == 2
if hor:
vec1 = np.linspace(low, 1.0, size[0])
vec1 = vec1[:, None]
else:
vec1 = np.ones((size[0], 1))
if ver:
vec2 = np.linspace(low, 1.0, size[1])
vec2 = vec2[None, :]
else:
vec2 = np.ones((1, size[1]))
return vec1.dot(vec2)
def scale_no_overlflow(img: np.ndarray, scale: float, img_type=np.uint8) -> np.ndarray:
float_img = img.astype(np.float) * scale
np.clip(float_img, 0, 255, out=float_img)
return float_img.astype(img_type)
def apply_color_filter(img: np.ndarray) -> np.ndarray:
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Keep the red or the bright pixels (all road marks, basically)
mask = (img[:, :, 2] > 100) | (img_gray > 100)
mask = mask[:, :, None]
return img * mask
def brightness(img: np.ndarray, scale_range=(0.3, 1.7)) -> np.ndarray:
assert isinstance(scale_range, tuple)
assert len(scale_range) == 2
scale = scale_range[0] + np.random.random() * (scale_range[1] - scale_range[0])
return scale_no_overlflow(img, scale)
def spot(img: np.ndarray, kernel_size=51, sigma=10, scale_factor=100.0) -> np.ndarray:
scale = kernel_size * sigma * scale_factor
kernel = cv2.getGaussianKernel(kernel_size, sigma)
kernel = (kernel.dot(kernel.transpose()) * scale).astype(np.uint8)
kernel = kernel[:, :, None]
height, width = img.shape[:2]
start_height = int(np.random.random() * (height - kernel_size))
start_width = int(np.random.random() * (width - kernel_size))
img[start_height:start_height + kernel_size, start_width:start_width + kernel_size, :] += kernel
return img
def gradient_lighting(img: np.ndarray) -> np.ndarray:
choices = [True, False]
hor = random.choice(choices)
ver = random.choice(choices)
scale = 0.3 + random.random() * 2.0
grad = create_gradient(img.shape[:2], hor=hor, ver=ver)
grad *= scale
grad = grad[:, :, np.newaxis]
return img * grad
def overflow(img: np.ndarray) -> np.ndarray:
assert img.dtype == np.uint8
return img + 127
def invert(img: np.ndarray) -> np.ndarray:
assert img.dtype == np.uint8
return 255 - img
| [
"cv2.cvtColor",
"random.choice",
"numpy.clip",
"cv2.getGaussianKernel",
"numpy.ones",
"random.random",
"numpy.random.random",
"numpy.linspace"
] | [((1057, 1098), 'numpy.clip', 'np.clip', (['float_img', '(0)', '(255)'], {'out': 'float_img'}), '(float_img, 0, 255, out=float_img)\n', (1064, 1098), True, 'import numpy as np\n'), ((1209, 1246), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1221, 1246), False, 'import cv2\n'), ((1841, 1882), 'cv2.getGaussianKernel', 'cv2.getGaussianKernel', (['kernel_size', 'sigma'], {}), '(kernel_size, sigma)\n', (1862, 1882), False, 'import cv2\n'), ((2365, 2387), 'random.choice', 'random.choice', (['choices'], {}), '(choices)\n', (2378, 2387), False, 'import random\n'), ((2398, 2420), 'random.choice', 'random.choice', (['choices'], {}), '(choices)\n', (2411, 2420), False, 'import random\n'), ((649, 679), 'numpy.linspace', 'np.linspace', (['low', '(1.0)', 'size[0]'], {}), '(low, 1.0, size[0])\n', (660, 679), True, 'import numpy as np\n'), ((734, 755), 'numpy.ones', 'np.ones', (['(size[0], 1)'], {}), '((size[0], 1))\n', (741, 755), True, 'import numpy as np\n'), ((784, 814), 'numpy.linspace', 'np.linspace', (['low', '(1.0)', 'size[1]'], {}), '(low, 1.0, size[1])\n', (795, 814), True, 'import numpy as np\n'), ((869, 890), 'numpy.ones', 'np.ones', (['(1, size[1])'], {}), '((1, size[1]))\n', (876, 890), True, 'import numpy as np\n'), ((1595, 1613), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1611, 1613), True, 'import numpy as np\n'), ((2044, 2062), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2060, 2062), True, 'import numpy as np\n'), ((2111, 2129), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2127, 2129), True, 'import numpy as np\n'), ((2439, 2454), 'random.random', 'random.random', ([], {}), '()\n', (2452, 2454), False, 'import random\n')] |
#! /usr/bin/python
import os
import sys
import json
import luigi
import numpy as np
import nifty.tools as nt
import cluster_tools.utils.volume_utils as vu
import cluster_tools.utils.function_utils as fu
from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask
from cluster_tools.utils.task_utils import DummyTask
class ThresholdBase(luigi.Task):
""" Threshold base class
"""
task_name = 'threshold'
src_file = os.path.abspath(__file__)
allow_retry = False
input_path = luigi.Parameter()
input_key = luigi.Parameter()
output_path = luigi.Parameter()
output_key = luigi.Parameter()
threshold = luigi.FloatParameter()
threshold_mode = luigi.Parameter(default='greater')
channel = luigi.Parameter(default=None)
# task that is required before running this task
dependency = luigi.TaskParameter(DummyTask())
threshold_modes = ('greater', 'less', 'equal')
@staticmethod
def default_task_config():
# we use this to get also get the common default config
config = LocalTask.default_task_config()
config.update({'sigma_prefilter': 0})
return config
def requires(self):
return self.dependency
def run_impl(self):
shebang, block_shape, roi_begin, roi_end, block_list_path\
= self.global_config_values(with_block_list_path=True)
self.init(shebang)
# get shape and make block config
shape = vu.get_shape(self.input_path, self.input_key)
assert self.threshold_mode in self.threshold_modes
config = self.get_task_config()
config.update({'input_path': self.input_path,
'input_key': self.input_key,
'output_path': self.output_path,
'output_key': self.output_key,
'block_shape': block_shape,
'threshold': self.threshold,
'threshold_mode': self.threshold_mode})
# get chunks
chunks = config.pop('chunks', None)
if chunks is None:
chunks = tuple(bs // 2 for bs in block_shape)
# check if we have a multi-channel volume and specify a channel
# to apply the threshold to
if self.channel is None:
# if no channel is specified, we need 3d input
assert len(shape) == 3, str(len(shape))
else:
# if channel is specified, we need 4d input
assert isinstance(self.channel, (int, tuple, list))
assert len(shape) == 4, str(len(shape))
if isinstance(self.channel, int):
assert shape[0] > self.channel, "%i, %i" % (shape[0], self.channel)
else:
assert all(isinstance(chan, int) for chan in self.channel)
assert shape[0] > max(self.channel), "%i, %i" % (shape[0], max(self.channel))
shape = shape[1:]
config.update({'channel': self.channel})
# clip chunks
chunks = tuple(min(ch, sh) for ch, sh in zip(chunks, shape))
# make output dataset
compression = config.pop('compression', 'gzip')
with vu.file_reader(self.output_path) as f:
f.require_dataset(self.output_key, shape=shape, dtype='uint8',
compression=compression, chunks=chunks)
block_list = vu.blocks_in_volume(shape, block_shape, roi_begin, roi_end,
block_list_path=block_list_path)
n_jobs = min(len(block_list), self.max_jobs)
# we only have a single job to find the labeling
self.prepare_jobs(n_jobs, block_list, config)
self.submit_jobs(n_jobs)
# wait till jobs finish and check for job success
self.wait_for_jobs()
# log the save-path again
self.check_jobs(n_jobs)
class ThresholdLocal(ThresholdBase, LocalTask):
"""
Threshold on local machine
"""
pass
class ThresholdSlurm(ThresholdBase, SlurmTask):
"""
Threshold on slurm cluster
"""
pass
class ThresholdLSF(ThresholdBase, LSFTask):
"""
Threshold on lsf cluster
"""
pass
def _threshold_block(block_id, blocking,
ds_in, ds_out, threshold,
threshold_mode, channel, sigma):
fu.log("start processing block %i" % block_id)
block = blocking.getBlock(block_id)
bb = vu.block_to_bb(block)
bb = vu.block_to_bb(block)
if channel is None:
input_ = ds_in[bb]
else:
channel_ = [channel] if isinstance(channel, int) else channel
in_shape = (len(channel_),) + tuple(b.stop - b.start for b in bb)
input_ = np.zeros(in_shape, dtype=ds_in.dtype)
for chan_id, chan in enumerate(channel_):
bb_inp = (slice(chan, chan + 1),) + bb
input_[chan_id] = ds_in[bb_inp].squeeze()
input_ = np.mean(input_, axis=0)
input_ = vu.normalize(input_)
if sigma > 0:
input_ = vu.apply_filter(input_, 'gaussianSmoothing', sigma)
input_ = vu.normalize(input_)
if threshold_mode == 'greater':
input_ = input_ > threshold
elif threshold_mode == 'less':
input_ = input_ < threshold
elif threshold_mode == 'equal':
input_ = input_ == threshold
else:
raise RuntimeError("Thresholding Mode %s not supported" % threshold_mode)
ds_out[bb] = input_.astype('uint8')
fu.log_block_success(block_id)
def threshold(job_id, config_path):
fu.log("start processing job %i" % job_id)
fu.log("reading config from %s" % config_path)
with open(config_path, 'r') as f:
config = json.load(f)
input_path = config['input_path']
input_key = config['input_key']
output_path = config['output_path']
output_key = config['output_key']
block_list = config['block_list']
block_shape = config['block_shape']
threshold = config['threshold']
threshold_mode = config['threshold_mode']
sigma = config.get('sigma_prefilter', 0)
channel = config.get('channel', None)
fu.log("Applying threshold %f with mode %s" % (threshold, threshold_mode))
with vu.file_reader(input_path, 'r') as f_in, vu.file_reader(output_path) as f_out:
ds_in = f_in[input_key]
ds_out = f_out[output_key]
shape = ds_in.shape
if channel is not None:
shape = shape[1:]
assert len(shape) == 3
blocking = nt.blocking([0, 0, 0], list(shape), block_shape)
[_threshold_block(block_id, blocking,
ds_in, ds_out, threshold,
threshold_mode, channel, sigma) for block_id in block_list]
fu.log_job_success(job_id)
if __name__ == '__main__':
path = sys.argv[1]
assert os.path.exists(path), path
job_id = int(os.path.split(path)[1].split('.')[0].split('_')[-1])
threshold(job_id, path)
| [
"cluster_tools.utils.volume_utils.file_reader",
"cluster_tools.cluster_tasks.LocalTask.default_task_config",
"numpy.mean",
"cluster_tools.utils.task_utils.DummyTask",
"cluster_tools.utils.volume_utils.apply_filter",
"cluster_tools.utils.volume_utils.normalize",
"luigi.Parameter",
"os.path.abspath",
... | [((445, 470), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (460, 470), False, 'import os\n'), ((513, 530), 'luigi.Parameter', 'luigi.Parameter', ([], {}), '()\n', (528, 530), False, 'import luigi\n'), ((547, 564), 'luigi.Parameter', 'luigi.Parameter', ([], {}), '()\n', (562, 564), False, 'import luigi\n'), ((583, 600), 'luigi.Parameter', 'luigi.Parameter', ([], {}), '()\n', (598, 600), False, 'import luigi\n'), ((618, 635), 'luigi.Parameter', 'luigi.Parameter', ([], {}), '()\n', (633, 635), False, 'import luigi\n'), ((652, 674), 'luigi.FloatParameter', 'luigi.FloatParameter', ([], {}), '()\n', (672, 674), False, 'import luigi\n'), ((696, 730), 'luigi.Parameter', 'luigi.Parameter', ([], {'default': '"""greater"""'}), "(default='greater')\n", (711, 730), False, 'import luigi\n'), ((745, 774), 'luigi.Parameter', 'luigi.Parameter', ([], {'default': 'None'}), '(default=None)\n', (760, 774), False, 'import luigi\n'), ((4325, 4371), 'cluster_tools.utils.function_utils.log', 'fu.log', (["('start processing block %i' % block_id)"], {}), "('start processing block %i' % block_id)\n", (4331, 4371), True, 'import cluster_tools.utils.function_utils as fu\n'), ((4421, 4442), 'cluster_tools.utils.volume_utils.block_to_bb', 'vu.block_to_bb', (['block'], {}), '(block)\n', (4435, 4442), True, 'import cluster_tools.utils.volume_utils as vu\n'), ((4453, 4474), 'cluster_tools.utils.volume_utils.block_to_bb', 'vu.block_to_bb', (['block'], {}), '(block)\n', (4467, 4474), True, 'import cluster_tools.utils.volume_utils as vu\n'), ((4945, 4965), 'cluster_tools.utils.volume_utils.normalize', 'vu.normalize', (['input_'], {}), '(input_)\n', (4957, 4965), True, 'import cluster_tools.utils.volume_utils as vu\n'), ((5445, 5475), 'cluster_tools.utils.function_utils.log_block_success', 'fu.log_block_success', (['block_id'], {}), '(block_id)\n', (5465, 5475), True, 'import cluster_tools.utils.function_utils as fu\n'), ((5519, 5561), 'cluster_tools.utils.function_utils.log', 'fu.log', (["('start processing job %i' % job_id)"], {}), "('start processing job %i' % job_id)\n", (5525, 5561), True, 'import cluster_tools.utils.function_utils as fu\n'), ((5566, 5612), 'cluster_tools.utils.function_utils.log', 'fu.log', (["('reading config from %s' % config_path)"], {}), "('reading config from %s' % config_path)\n", (5572, 5612), True, 'import cluster_tools.utils.function_utils as fu\n'), ((6087, 6161), 'cluster_tools.utils.function_utils.log', 'fu.log', (["('Applying threshold %f with mode %s' % (threshold, threshold_mode))"], {}), "('Applying threshold %f with mode %s' % (threshold, threshold_mode))\n", (6093, 6161), True, 'import cluster_tools.utils.function_utils as fu\n'), ((6700, 6726), 'cluster_tools.utils.function_utils.log_job_success', 'fu.log_job_success', (['job_id'], {}), '(job_id)\n', (6718, 6726), True, 'import cluster_tools.utils.function_utils as fu\n'), ((6790, 6810), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (6804, 6810), False, 'import os\n'), ((865, 876), 'cluster_tools.utils.task_utils.DummyTask', 'DummyTask', ([], {}), '()\n', (874, 876), False, 'from cluster_tools.utils.task_utils import DummyTask\n'), ((1061, 1092), 'cluster_tools.cluster_tasks.LocalTask.default_task_config', 'LocalTask.default_task_config', ([], {}), '()\n', (1090, 1092), False, 'from cluster_tools.cluster_tasks import SlurmTask, LocalTask, LSFTask\n'), ((1462, 1507), 'cluster_tools.utils.volume_utils.get_shape', 'vu.get_shape', (['self.input_path', 'self.input_key'], {}), '(self.input_path, self.input_key)\n', (1474, 1507), True, 'import cluster_tools.utils.volume_utils as vu\n'), ((3379, 3476), 'cluster_tools.utils.volume_utils.blocks_in_volume', 'vu.blocks_in_volume', (['shape', 'block_shape', 'roi_begin', 'roi_end'], {'block_list_path': 'block_list_path'}), '(shape, block_shape, roi_begin, roi_end, block_list_path\n =block_list_path)\n', (3398, 3476), True, 'import cluster_tools.utils.volume_utils as vu\n'), ((4697, 4734), 'numpy.zeros', 'np.zeros', (['in_shape'], {'dtype': 'ds_in.dtype'}), '(in_shape, dtype=ds_in.dtype)\n', (4705, 4734), True, 'import numpy as np\n'), ((4907, 4930), 'numpy.mean', 'np.mean', (['input_'], {'axis': '(0)'}), '(input_, axis=0)\n', (4914, 4930), True, 'import numpy as np\n'), ((5001, 5052), 'cluster_tools.utils.volume_utils.apply_filter', 'vu.apply_filter', (['input_', '"""gaussianSmoothing"""', 'sigma'], {}), "(input_, 'gaussianSmoothing', sigma)\n", (5016, 5052), True, 'import cluster_tools.utils.volume_utils as vu\n'), ((5070, 5090), 'cluster_tools.utils.volume_utils.normalize', 'vu.normalize', (['input_'], {}), '(input_)\n', (5082, 5090), True, 'import cluster_tools.utils.volume_utils as vu\n'), ((5669, 5681), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5678, 5681), False, 'import json\n'), ((6172, 6203), 'cluster_tools.utils.volume_utils.file_reader', 'vu.file_reader', (['input_path', '"""r"""'], {}), "(input_path, 'r')\n", (6186, 6203), True, 'import cluster_tools.utils.volume_utils as vu\n'), ((6213, 6240), 'cluster_tools.utils.volume_utils.file_reader', 'vu.file_reader', (['output_path'], {}), '(output_path)\n', (6227, 6240), True, 'import cluster_tools.utils.volume_utils as vu\n'), ((3172, 3204), 'cluster_tools.utils.volume_utils.file_reader', 'vu.file_reader', (['self.output_path'], {}), '(self.output_path)\n', (3186, 3204), True, 'import cluster_tools.utils.volume_utils as vu\n'), ((6834, 6853), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (6847, 6853), False, 'import os\n')] |
import numpy as np
class BaselineRegressor:
def __init__(self):
pass
def fit(self, x_train, y_train):
pass
def predict(self, x_test):
return np.zeros([x_test.shape[0]])
| [
"numpy.zeros"
] | [((180, 207), 'numpy.zeros', 'np.zeros', (['[x_test.shape[0]]'], {}), '([x_test.shape[0]])\n', (188, 207), True, 'import numpy as np\n')] |
import numpy as np
from flatland.envs.malfunction_generators import malfunction_from_params
from flatland.envs.observations import GlobalObsForRailEnv, TreeObsForRailEnv
from flatland.envs.predictions import ShortestPathPredictorForRailEnv
from flatland.envs.rail_env import RailEnv
from flatland.envs.rail_generators import rail_from_grid_transition_map
from flatland.envs.schedule_generators import random_schedule_generator
from flatland.utils.simple_rail import make_simple_rail2
def test_random_seeding():
# Set fixed malfunction duration for this test
rail, rail_map = make_simple_rail2()
# Move target to unreachable position in order to not interfere with test
for idx in range(100):
env = RailEnv(width=25, height=30, rail_generator=rail_from_grid_transition_map(rail),
schedule_generator=random_schedule_generator(seed=12), number_of_agents=10)
env.reset(True, True, False, random_seed=1)
env.agents[0].target = (0, 0)
for step in range(10):
actions = {}
actions[0] = 2
env.step(actions)
agent_positions = []
env.agents[0].initial_position == (3, 2)
env.agents[1].initial_position == (3, 5)
env.agents[2].initial_position == (3, 6)
env.agents[3].initial_position == (5, 6)
env.agents[4].initial_position == (3, 4)
env.agents[5].initial_position == (3, 1)
env.agents[6].initial_position == (3, 9)
env.agents[7].initial_position == (4, 6)
env.agents[8].initial_position == (0, 3)
env.agents[9].initial_position == (3, 7)
# Test generation print
# for a in range(env.get_num_agents()):
# print("env.agents[{}].initial_position == {}".format(a,env.agents[a].initial_position))
# print("env.agents[0].initial_position == {}".format(env.agents[0].initial_position))
# print("assert env.agents[0].position == {}".format(env.agents[0].position))
def test_seeding_and_observations():
# Test if two different instances diverge with different observations
rail, rail_map = make_simple_rail2()
# Make two seperate envs with different observation builders
# Global Observation
env = RailEnv(width=25, height=30, rail_generator=rail_from_grid_transition_map(rail),
schedule_generator=random_schedule_generator(seed=12), number_of_agents=10,
obs_builder_object=GlobalObsForRailEnv())
# Tree Observation
env2 = RailEnv(width=25, height=30, rail_generator=rail_from_grid_transition_map(rail),
schedule_generator=random_schedule_generator(seed=12), number_of_agents=10,
obs_builder_object=TreeObsForRailEnv(max_depth=2, predictor=ShortestPathPredictorForRailEnv()))
env.reset(False, False, False, random_seed=12)
env2.reset(False, False, False, random_seed=12)
# Check that both environments produce the same initial start positions
assert env.agents[0].initial_position == env2.agents[0].initial_position
assert env.agents[1].initial_position == env2.agents[1].initial_position
assert env.agents[2].initial_position == env2.agents[2].initial_position
assert env.agents[3].initial_position == env2.agents[3].initial_position
assert env.agents[4].initial_position == env2.agents[4].initial_position
assert env.agents[5].initial_position == env2.agents[5].initial_position
assert env.agents[6].initial_position == env2.agents[6].initial_position
assert env.agents[7].initial_position == env2.agents[7].initial_position
assert env.agents[8].initial_position == env2.agents[8].initial_position
assert env.agents[9].initial_position == env2.agents[9].initial_position
action_dict = {}
for step in range(10):
for a in range(env.get_num_agents()):
action = np.random.randint(4)
action_dict[a] = action
env.step(action_dict)
env2.step(action_dict)
# Check that both environments end up in the same position
assert env.agents[0].position == env2.agents[0].position
assert env.agents[1].position == env2.agents[1].position
assert env.agents[2].position == env2.agents[2].position
assert env.agents[3].position == env2.agents[3].position
assert env.agents[4].position == env2.agents[4].position
assert env.agents[5].position == env2.agents[5].position
assert env.agents[6].position == env2.agents[6].position
assert env.agents[7].position == env2.agents[7].position
assert env.agents[8].position == env2.agents[8].position
assert env.agents[9].position == env2.agents[9].position
for a in range(env.get_num_agents()):
print("assert env.agents[{}].position == env2.agents[{}].position".format(a, a))
def test_seeding_and_malfunction():
# Test if two different instances diverge with different observations
rail, rail_map = make_simple_rail2()
stochastic_data = {'prop_malfunction': 0.4,
'malfunction_rate': 2,
'min_duration': 10,
'max_duration': 10}
# Make two seperate envs with different and see if the exhibit the same malfunctions
# Global Observation
for tests in range(1, 100):
env = RailEnv(width=25, height=30, rail_generator=rail_from_grid_transition_map(rail),
schedule_generator=random_schedule_generator(), number_of_agents=10,
obs_builder_object=GlobalObsForRailEnv())
# Tree Observation
env2 = RailEnv(width=25, height=30, rail_generator=rail_from_grid_transition_map(rail),
schedule_generator=random_schedule_generator(), number_of_agents=10,
obs_builder_object=GlobalObsForRailEnv())
env.reset(True, False, True, random_seed=tests)
env2.reset(True, False, True, random_seed=tests)
# Check that both environments produce the same initial start positions
assert env.agents[0].initial_position == env2.agents[0].initial_position
assert env.agents[1].initial_position == env2.agents[1].initial_position
assert env.agents[2].initial_position == env2.agents[2].initial_position
assert env.agents[3].initial_position == env2.agents[3].initial_position
assert env.agents[4].initial_position == env2.agents[4].initial_position
assert env.agents[5].initial_position == env2.agents[5].initial_position
assert env.agents[6].initial_position == env2.agents[6].initial_position
assert env.agents[7].initial_position == env2.agents[7].initial_position
assert env.agents[8].initial_position == env2.agents[8].initial_position
assert env.agents[9].initial_position == env2.agents[9].initial_position
action_dict = {}
for step in range(10):
for a in range(env.get_num_agents()):
action = np.random.randint(4)
action_dict[a] = action
# print("----------------------")
# print(env.agents[a].malfunction_data, env.agents[a].status)
# print(env2.agents[a].malfunction_data, env2.agents[a].status)
_, reward1, done1, _ = env.step(action_dict)
_, reward2, done2, _ = env2.step(action_dict)
for a in range(env.get_num_agents()):
assert reward1[a] == reward2[a]
assert done1[a] == done2[a]
# Check that both environments end up in the same position
assert env.agents[0].position == env2.agents[0].position
assert env.agents[1].position == env2.agents[1].position
assert env.agents[2].position == env2.agents[2].position
assert env.agents[3].position == env2.agents[3].position
assert env.agents[4].position == env2.agents[4].position
assert env.agents[5].position == env2.agents[5].position
assert env.agents[6].position == env2.agents[6].position
assert env.agents[7].position == env2.agents[7].position
assert env.agents[8].position == env2.agents[8].position
assert env.agents[9].position == env2.agents[9].position
| [
"flatland.envs.predictions.ShortestPathPredictorForRailEnv",
"flatland.envs.schedule_generators.random_schedule_generator",
"flatland.envs.observations.GlobalObsForRailEnv",
"numpy.random.randint",
"flatland.utils.simple_rail.make_simple_rail2",
"flatland.envs.rail_generators.rail_from_grid_transition_map... | [((586, 605), 'flatland.utils.simple_rail.make_simple_rail2', 'make_simple_rail2', ([], {}), '()\n', (603, 605), False, 'from flatland.utils.simple_rail import make_simple_rail2\n'), ((2126, 2145), 'flatland.utils.simple_rail.make_simple_rail2', 'make_simple_rail2', ([], {}), '()\n', (2143, 2145), False, 'from flatland.utils.simple_rail import make_simple_rail2\n'), ((4931, 4950), 'flatland.utils.simple_rail.make_simple_rail2', 'make_simple_rail2', ([], {}), '()\n', (4948, 4950), False, 'from flatland.utils.simple_rail import make_simple_rail2\n'), ((2291, 2326), 'flatland.envs.rail_generators.rail_from_grid_transition_map', 'rail_from_grid_transition_map', (['rail'], {}), '(rail)\n', (2320, 2326), False, 'from flatland.envs.rail_generators import rail_from_grid_transition_map\n'), ((2365, 2399), 'flatland.envs.schedule_generators.random_schedule_generator', 'random_schedule_generator', ([], {'seed': '(12)'}), '(seed=12)\n', (2390, 2399), False, 'from flatland.envs.schedule_generators import random_schedule_generator\n'), ((2459, 2480), 'flatland.envs.observations.GlobalObsForRailEnv', 'GlobalObsForRailEnv', ([], {}), '()\n', (2478, 2480), False, 'from flatland.envs.observations import GlobalObsForRailEnv, TreeObsForRailEnv\n'), ((2560, 2595), 'flatland.envs.rail_generators.rail_from_grid_transition_map', 'rail_from_grid_transition_map', (['rail'], {}), '(rail)\n', (2589, 2595), False, 'from flatland.envs.rail_generators import rail_from_grid_transition_map\n'), ((2635, 2669), 'flatland.envs.schedule_generators.random_schedule_generator', 'random_schedule_generator', ([], {'seed': '(12)'}), '(seed=12)\n', (2660, 2669), False, 'from flatland.envs.schedule_generators import random_schedule_generator\n'), ((3874, 3894), 'numpy.random.randint', 'np.random.randint', (['(4)'], {}), '(4)\n', (3891, 3894), True, 'import numpy as np\n'), ((770, 805), 'flatland.envs.rail_generators.rail_from_grid_transition_map', 'rail_from_grid_transition_map', (['rail'], {}), '(rail)\n', (799, 805), False, 'from flatland.envs.rail_generators import rail_from_grid_transition_map\n'), ((848, 882), 'flatland.envs.schedule_generators.random_schedule_generator', 'random_schedule_generator', ([], {'seed': '(12)'}), '(seed=12)\n', (873, 882), False, 'from flatland.envs.schedule_generators import random_schedule_generator\n'), ((5336, 5371), 'flatland.envs.rail_generators.rail_from_grid_transition_map', 'rail_from_grid_transition_map', (['rail'], {}), '(rail)\n', (5365, 5371), False, 'from flatland.envs.rail_generators import rail_from_grid_transition_map\n'), ((5414, 5441), 'flatland.envs.schedule_generators.random_schedule_generator', 'random_schedule_generator', ([], {}), '()\n', (5439, 5441), False, 'from flatland.envs.schedule_generators import random_schedule_generator\n'), ((5505, 5526), 'flatland.envs.observations.GlobalObsForRailEnv', 'GlobalObsForRailEnv', ([], {}), '()\n', (5524, 5526), False, 'from flatland.envs.observations import GlobalObsForRailEnv, TreeObsForRailEnv\n'), ((5615, 5650), 'flatland.envs.rail_generators.rail_from_grid_transition_map', 'rail_from_grid_transition_map', (['rail'], {}), '(rail)\n', (5644, 5650), False, 'from flatland.envs.rail_generators import rail_from_grid_transition_map\n'), ((5694, 5721), 'flatland.envs.schedule_generators.random_schedule_generator', 'random_schedule_generator', ([], {}), '()\n', (5719, 5721), False, 'from flatland.envs.schedule_generators import random_schedule_generator\n'), ((5786, 5807), 'flatland.envs.observations.GlobalObsForRailEnv', 'GlobalObsForRailEnv', ([], {}), '()\n', (5805, 5807), False, 'from flatland.envs.observations import GlobalObsForRailEnv, TreeObsForRailEnv\n'), ((6946, 6966), 'numpy.random.randint', 'np.random.randint', (['(4)'], {}), '(4)\n', (6963, 6966), True, 'import numpy as np\n'), ((2771, 2804), 'flatland.envs.predictions.ShortestPathPredictorForRailEnv', 'ShortestPathPredictorForRailEnv', ([], {}), '()\n', (2802, 2804), False, 'from flatland.envs.predictions import ShortestPathPredictorForRailEnv\n')] |
import numpy as np
import random
from scipy import stats
from scipy.signal import boxcar,convolve,correlate,resample,argrelextrema
from scipy.cluster.vq import kmeans,kmeans2
from scipy.stats import pearsonr
from neuropixels import cleanAxes
from neuropixels import psth_and_raster as psth_
def smooth_boxcar(data,boxcar_size):
smoothed = convolve(data,boxcar(int(boxcar_size)))/boxcar_size
smoothed = smoothed[int(boxcar_size/2):len(data)+int(boxcar_size/2)]
return smoothed
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def get_peak_waveform_from_template(template):
max = 0
ind=0
peak = np.zeros(np.shape(template.T)[0])
for i,wv in enumerate(template.T):
if np.max(np.abs(wv)) > max:
max = np.max(np.abs(wv))
ind = i
peak = wv
return peak
def precision(spike_times,stimulus_times,boxcar_size = 5,precision_type='first',pre=0.,post=15.,binsize=0.01,threshold=0.05,find_events=True):
# precision - jitter of first spike, jitter of total spikes
if find_events:
smoothed,all_events,events = get_events(spike_times,stimulus_times,boxcar_size = boxcar_size,threshold=threshold,pre=pre,post=post,binsize=binsize)
else:
events = [0.]
#print events
#get spikes for each event
event_precision = []
for i,event in enumerate(events):
all_spikes = []
first_spikes = []
for trial in stimulus_times:
event_start = trial + event
if i == len(events):
if event[i+1] - event > 0.05:
event_end = trial + event[i+1]
else:
event_end = trial + event + 0.05
else:
event_end = trial + event + 0.05
indices = np.where((spike_times > event_start) & (spike_times < event_end))[0]
first_spike = spike_times[np.where(spike_times>event_start)[0][0]]-event_start
#find jitter of first spikes
if first_spike < .1: #arbitrary cutoff because we don't trust identified events that don't have a spike within 100 msec
first_spikes.extend([first_spike])
#find jitter of all spikes
event_spikes = np.array(spike_times[indices])-event_start
all_spikes.extend(event_spikes)
if precision_type =='first':
#event_precision.extend([np.median(np.array(first_spikes))])
#event_precision.extend([np.median(np.array(first_spikes)-np.min(first_spikes))])
all_event_spikes = np.sort(np.array(first_spikes))
else:
all_event_spikes = np.sort(np.array(all_spikes).flatten())
#print all_event_spikes
first_half = all_event_spikes[:np.where(all_event_spikes<np.median(all_event_spikes))[0][-1]]
second_half = all_event_spikes[np.where(all_event_spikes>np.median(all_event_spikes))[0][0]:]
event_precision.extend([np.median(second_half)-np.median(first_half)])
event_precision = np.array(event_precision)
event_precision = event_precision[~np.isnan(event_precision)] * 1000.
return (np.mean(event_precision),np.std(event_precision),event_precision)
def get_binarized(spike_times,stimulus_times,pre=0.,post=15.,convolve=0.):
bytrial = psth_.raster(spike_times,stimulus_times,pre=pre,post=post,timeDomain=True,output='data')
binarized = []
for trial in bytrial:
binarized_ = np.zeros(int((post-pre)*1000))#use 1-msec bins
for spike in trial:
if spike > pre and spike < post:
binarized_[int(np.floor(spike*1000))] = 1
if convolve > 0.001 :
binarized_=smooth_boxcar(binarized_,convolve)
binarized.append(binarized_)
return binarized
def get_binned(spike_times,stimulus_times,binsize,pre=0.,post=15.,convolve=0.):
bytrial = psth_.raster(spike_times,stimulus_times,pre=pre,post=post,timeDomain=True,output='data')
# print(np.array(bytrial).shape())
binarized = []
for trial in bytrial:
binarized_ = np.zeros(int((post-pre)*1000/(1000*binsize)+1))#use binsize msec bins
for spike in trial:
if spike > pre and spike < post:
binarized_[int(np.floor((spike-pre)*1000/(1000*binsize)))] += 1
if convolve > 0.001 :
binarized_=smooth_boxcar(binarized_,convolve)
binarized.append(binarized_)
return binarized
def reliability(spike_times,stimulus_times,binsize,pre=0.,post=15.):
#reliability - how reproducible a spike train is over trials, at the msec level
binarized=get_binned(spike_times,stimulus_times,binsize,pre=pre,post=post)
sum = 0
for c,i in enumerate(binarized):
for c2 in np.arange(c+1,np.shape(binarized)[0],1):
j = binarized[c2][:]
if np.sum(i) > 0 and np.sum(j) > 0:
sum += np.inner(i,j) / np.inner(np.linalg.norm(i),np.linalg.norm(j))
else:
sum+=0
#return (2 / float(np.shape(binarized)[0]) * (np.shape(binarized)[0] - 1)) * sum,binarized
return sum / (np.shape(binarized)[0]/2.)#,binarized
def fano(spike_times,stimulus_times,pre=0.,post=15.,binsize=0.01,boxcar_size = 5,counting_window=0.3,threshold=0.2,by_event=False):
if by_event:
smoothed,all_events,events = get_events(spike_times,stimulus_times,boxcar_size = boxcar_size,threshold=threshold,pre=pre,post=post,binsize=binsize)
else:
events = [0]
fanos=[]
for i,event in enumerate(events):
counts=[]
for trial in stimulus_times:
event_start = trial + event
event_end = trial + event + counting_window
indices = np.where((spike_times > event_start) & (spike_times < event_end))[0]
counts.append(len(indices))
fanos.append(np.std(counts)**2 / np.mean(counts))
return np.median(fanos),counts, fanos
def get_events(spike_times,stimulus_times,threshold=.05,boxcar_size = 15,pre=0.,post=15.,binsize=0.001):
(edges,psth,variance) = psth_.psth_line(spike_times,
stimulus_times,
pre=pre,post=post,binsize=binsize,output='p',timeDomain=True)
numbins = int((post-pre) / binsize)
# first, find events:
smoothed = smooth_boxcar(psth[:numbins],int(boxcar_size))
minima = np.where(np.r_[True, smoothed[2:] > smoothed[:-2]] & np.r_[smoothed[:-2] > smoothed[2:], True]==True)[0]#from http://stackoverflow.com/questions/4624970/finding-local-maxima-minima-with-numpy-in-a-1d-numpy-array
#print np.max(psth)
threshold = threshold * np.max(psth)
good_minima = []
#print threshold
for i,minimum in enumerate(minima[:-1]):
if minima[i+1]*binsize - minima[i]*binsize > 0.3:
num_bins_after_minimum = 0.3 / binsize
else:
num_bins_after_minimum = minima[i+1] - minima[i]
try:
if np.max(psth[minimum:minimum+num_bins_after_minimum]) > threshold:
good_minima.extend([minimum])
except:
pass
# print np.shape(psth)
# print minimum
# print num_bins_after_minimum
# print threshold
# return np.nan,np.nan,np.nan
#
return smoothed,minima*binsize,np.array(good_minima)*binsize - (boxcar_size/2.)*binsize
def entropy(spike_times,stimulus_times,wordlength,binsize=0.001,pre=0.,post=15.):
binarized=get_binarized(spike_times,stimulus_times,pre=pre,post=post)
# create words of length wordlength
entropies_per_time=[]
for t in range(len(binarized[0])):
words = []
for trial in binarized:
if t<len(trial) - wordlength: #cutoff of the end of each trial because there aren't enough bins left to make the word
word = trial[t:t+wordlength]
words.append(word)
#make a distribution of the words
p = {}
#find all the words that actually occured. and the frequency of their occurence
#from http://stackoverflow.com/questions/16970982/find-unique-rows-in-numpy-array
# and http://stackoverflow.com/questions/33235135/count-occurrences-of-arrays-in-multidimensional-arrays-in-python
words = np.array(words)
b = np.ascontiguousarray(words).view(np.dtype((np.void, words.dtype.itemsize * words.shape[1])))
_, idx,counts = np.unique(b, return_index=True,return_counts=True)
possible_words = words[idx]
p = dict(zip([str(word) for word in possible_words],counts/float(np.shape(np.array(words))[0])))
print('all the words that occured:'+str(np.shape(np.array(words))[0]))
# calculate entropy as in reinagel and reid 2000; kumbhani et al., 2007
sum = 0
for word in possible_words:
sum += p[str(word)] * np.log2(p[str(word)])
H = (-1 / (wordlength*binsize)) * sum
entropies_per_time.append(H)
return np.mean(entropies_per_time)
def fano2(a):
# computes the Fano factor , variance**2/mean for an input matrix a with dimensions n x m,
# where n is the number of trials and m is the number of bins in the response
return np.std(a,axis=0)**2/np.mean(a,axis=0)
def ccmax(a):
# computes the cc_max for an input matrix a with dimensions n x m,
# where n is the number of trials and m is the number of bins in the response
ntrials = a.shape[0]
corr_matrix = np.empty((ntrials,ntrials))
for i in range(ntrials):
for j in range(ntrials):
r,p = pearsonr(a[i,:],a[j,:])
corr_matrix[i,j] = r
inds = np.triu_indices(ntrials, k=1)
upper = corr_matrix[inds[0],inds[1]]
return np.nanmean(upper)
def mutual_information(spike_times,stimulus_times,wordlength,binsize=0.001,pre=0.,post=15.,method='poisson'):
indices = np.where((spike_times > stimulus_times[0]) & (spike_times < stimulus_times[-1]))[0]
if method == 'poisson':
reference_spikes = possion_times(np.array(spike_times)[indices])
if method == 'shift':
reference_spikes = shifted_times(np.array(spike_times)[indices],stimulus_times)
total = entropy(reference_spikes,stimulus_times,wordlength,binsize=0.001,pre=0.,post=15.)
noise = entropy(np.array(spike_times),stimulus_times,wordlength,binsize=0.001,pre=0.,post=15.)
return total,noise, total-noise, (total-noise) / (post - pre)
def z_value(spike_times,stimulus_times,binsize,pre=0.,post=15.,method='shift'):
#based on reinagel and reid, 2000
# their definition, sightly modified for easier notation here
# Z(binsize) = limI(L,binsize) - I(L=1,binsize)
# "The term I(L=1,binsize) represents the estimate of information rate that would be obtained on the approximation that time bins are statistically independent within the spike train."
lim_I = mutual_information(spike_times,stimulus_times,wordlength,binsize=binsize,pre=0.,post=15.,method='poisson')
I1 = mutual_information(spike_times,stimulus_times,1,binsize=binsize,pre=0.,post=15.,method='poisson')
return z
def possion_times(spike_times):
#given an input spike train, make the times a rate-matched Poisson process
rate = len(np.array(spike_times)) / (np.array(spike_times)[-1] - np.array(spike_times)[0])
t = np.array(spike_times)[0]
poiss = [t]
for i in range(len(spike_times)-1):
t+=random.expovariate(rate)
poiss.append(t)
return np.array(poiss)
def shuffled_times(spike_times,window):
#given an input spike train, shuffle the spike times preserving the structure within the window
return spike_times
def shifted_times(spike_times,stimulus_times):
#given an input spike train and stimulus times,
#shift the spike times in each interval by an random amount, with wrapping
shifted_times = np.zeros(len(spike_times))
for i,start in enumerate(stimulus_times[:-1]):
indices = np.where((spike_times > start) & (spike_times <= stimulus_times[i+1]))[0] - 1
times = spike_times[indices]
offset = np.floor(np.random.rand() * (stimulus_times[i+1] - start))
offset_times = times + offset + start - i*(stimulus_times[i+1] - start)
wrapped_times = np.array([b if b < stimulus_times[i+1] else b-(stimulus_times[i+1]-start) for b in offset_times])
#print str(start)+' '+str(offset)+' '+str(offset_times)+' '+str(wrapped_times)
shifted_times[indices] = wrapped_times
indices = np.where(spike_times > stimulus_times[-1])[0]
times = spike_times[indices]
offset = np.random.rand() * (spike_times[-1] - stimulus_times[-1])
offset_times = times + offset
wrapped_times = np.array([b if b < spike_times[-1] else b-(spike_times[-1]-stimulus_times[-1]) for b in offset_times])
shifted_times[indices] = wrapped_times
return np.sort(shifted_times) - stimulus_times[0]
# def Rjitter_pair(spike_times, other_spike times):
# spearmanr(spike_times,other_spike,nan_policy='omit')
| [
"numpy.abs",
"numpy.sum",
"numpy.empty",
"numpy.floor",
"numpy.isnan",
"numpy.shape",
"numpy.mean",
"numpy.linalg.norm",
"numpy.inner",
"numpy.unique",
"numpy.nanmean",
"random.expovariate",
"numpy.std",
"neuropixels.psth_and_raster.raster",
"numpy.cumsum",
"numpy.max",
"numpy.median... | [((529, 554), 'numpy.cumsum', 'np.cumsum', (['a'], {'dtype': 'float'}), '(a, dtype=float)\n', (538, 554), True, 'import numpy as np\n'), ((3096, 3121), 'numpy.array', 'np.array', (['event_precision'], {}), '(event_precision)\n', (3104, 3121), True, 'import numpy as np\n'), ((3369, 3467), 'neuropixels.psth_and_raster.raster', 'psth_.raster', (['spike_times', 'stimulus_times'], {'pre': 'pre', 'post': 'post', 'timeDomain': '(True)', 'output': '"""data"""'}), "(spike_times, stimulus_times, pre=pre, post=post, timeDomain=\n True, output='data')\n", (3381, 3467), True, 'from neuropixels import psth_and_raster as psth_\n'), ((3946, 4044), 'neuropixels.psth_and_raster.raster', 'psth_.raster', (['spike_times', 'stimulus_times'], {'pre': 'pre', 'post': 'post', 'timeDomain': '(True)', 'output': '"""data"""'}), "(spike_times, stimulus_times, pre=pre, post=post, timeDomain=\n True, output='data')\n", (3958, 4044), True, 'from neuropixels import psth_and_raster as psth_\n'), ((6128, 6243), 'neuropixels.psth_and_raster.psth_line', 'psth_.psth_line', (['spike_times', 'stimulus_times'], {'pre': 'pre', 'post': 'post', 'binsize': 'binsize', 'output': '"""p"""', 'timeDomain': '(True)'}), "(spike_times, stimulus_times, pre=pre, post=post, binsize=\n binsize, output='p', timeDomain=True)\n", (6143, 6243), True, 'from neuropixels import psth_and_raster as psth_\n'), ((9088, 9115), 'numpy.mean', 'np.mean', (['entropies_per_time'], {}), '(entropies_per_time)\n', (9095, 9115), True, 'import numpy as np\n'), ((9554, 9582), 'numpy.empty', 'np.empty', (['(ntrials, ntrials)'], {}), '((ntrials, ntrials))\n', (9562, 9582), True, 'import numpy as np\n'), ((9731, 9760), 'numpy.triu_indices', 'np.triu_indices', (['ntrials'], {'k': '(1)'}), '(ntrials, k=1)\n', (9746, 9760), True, 'import numpy as np\n'), ((9813, 9830), 'numpy.nanmean', 'np.nanmean', (['upper'], {}), '(upper)\n', (9823, 9830), True, 'import numpy as np\n'), ((11527, 11542), 'numpy.array', 'np.array', (['poiss'], {}), '(poiss)\n', (11535, 11542), True, 'import numpy as np\n'), ((12737, 12849), 'numpy.array', 'np.array', (['[(b if b < spike_times[-1] else b - (spike_times[-1] - stimulus_times[-1])) for\n b in offset_times]'], {}), '([(b if b < spike_times[-1] else b - (spike_times[-1] -\n stimulus_times[-1])) for b in offset_times])\n', (12745, 12849), True, 'import numpy as np\n'), ((3213, 3237), 'numpy.mean', 'np.mean', (['event_precision'], {}), '(event_precision)\n', (3220, 3237), True, 'import numpy as np\n'), ((3238, 3261), 'numpy.std', 'np.std', (['event_precision'], {}), '(event_precision)\n', (3244, 3261), True, 'import numpy as np\n'), ((5962, 5978), 'numpy.median', 'np.median', (['fanos'], {}), '(fanos)\n', (5971, 5978), True, 'import numpy as np\n'), ((6468, 6575), 'numpy.where', 'np.where', (['(np.r_[True, smoothed[2:] > smoothed[:-2]] & np.r_[smoothed[:-2] > smoothed\n [2:], True] == True)'], {}), '(np.r_[True, smoothed[2:] > smoothed[:-2]] & np.r_[smoothed[:-2] >\n smoothed[2:], True] == True)\n', (6476, 6575), True, 'import numpy as np\n'), ((6736, 6748), 'numpy.max', 'np.max', (['psth'], {}), '(psth)\n', (6742, 6748), True, 'import numpy as np\n'), ((8379, 8394), 'numpy.array', 'np.array', (['words'], {}), '(words)\n', (8387, 8394), True, 'import numpy as np\n'), ((8524, 8575), 'numpy.unique', 'np.unique', (['b'], {'return_index': '(True)', 'return_counts': '(True)'}), '(b, return_index=True, return_counts=True)\n', (8533, 8575), True, 'import numpy as np\n'), ((9332, 9350), 'numpy.mean', 'np.mean', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (9339, 9350), True, 'import numpy as np\n'), ((9956, 10041), 'numpy.where', 'np.where', (['((spike_times > stimulus_times[0]) & (spike_times < stimulus_times[-1]))'], {}), '((spike_times > stimulus_times[0]) & (spike_times < stimulus_times[-1])\n )\n', (9964, 10041), True, 'import numpy as np\n'), ((10370, 10391), 'numpy.array', 'np.array', (['spike_times'], {}), '(spike_times)\n', (10378, 10391), True, 'import numpy as np\n'), ((11375, 11396), 'numpy.array', 'np.array', (['spike_times'], {}), '(spike_times)\n', (11383, 11396), True, 'import numpy as np\n'), ((11467, 11491), 'random.expovariate', 'random.expovariate', (['rate'], {}), '(rate)\n', (11485, 11491), False, 'import random\n'), ((12286, 12397), 'numpy.array', 'np.array', (['[(b if b < stimulus_times[i + 1] else b - (stimulus_times[i + 1] - start)) for\n b in offset_times]'], {}), '([(b if b < stimulus_times[i + 1] else b - (stimulus_times[i + 1] -\n start)) for b in offset_times])\n', (12294, 12397), True, 'import numpy as np\n'), ((12533, 12575), 'numpy.where', 'np.where', (['(spike_times > stimulus_times[-1])'], {}), '(spike_times > stimulus_times[-1])\n', (12541, 12575), True, 'import numpy as np\n'), ((12625, 12641), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (12639, 12641), True, 'import numpy as np\n'), ((12894, 12916), 'numpy.sort', 'np.sort', (['shifted_times'], {}), '(shifted_times)\n', (12901, 12916), True, 'import numpy as np\n'), ((705, 725), 'numpy.shape', 'np.shape', (['template.T'], {}), '(template.T)\n', (713, 725), True, 'import numpy as np\n'), ((8440, 8498), 'numpy.dtype', 'np.dtype', (['(np.void, words.dtype.itemsize * words.shape[1])'], {}), '((np.void, words.dtype.itemsize * words.shape[1]))\n', (8448, 8498), True, 'import numpy as np\n'), ((9312, 9329), 'numpy.std', 'np.std', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (9318, 9329), True, 'import numpy as np\n'), ((9662, 9688), 'scipy.stats.pearsonr', 'pearsonr', (['a[i, :]', 'a[j, :]'], {}), '(a[i, :], a[j, :])\n', (9670, 9688), False, 'from scipy.stats import pearsonr\n'), ((11287, 11308), 'numpy.array', 'np.array', (['spike_times'], {}), '(spike_times)\n', (11295, 11308), True, 'import numpy as np\n'), ((787, 797), 'numpy.abs', 'np.abs', (['wv'], {}), '(wv)\n', (793, 797), True, 'import numpy as np\n'), ((831, 841), 'numpy.abs', 'np.abs', (['wv'], {}), '(wv)\n', (837, 841), True, 'import numpy as np\n'), ((1846, 1911), 'numpy.where', 'np.where', (['((spike_times > event_start) & (spike_times < event_end))'], {}), '((spike_times > event_start) & (spike_times < event_end))\n', (1854, 1911), True, 'import numpy as np\n'), ((2299, 2329), 'numpy.array', 'np.array', (['spike_times[indices]'], {}), '(spike_times[indices])\n', (2307, 2329), True, 'import numpy as np\n'), ((2632, 2654), 'numpy.array', 'np.array', (['first_spikes'], {}), '(first_spikes)\n', (2640, 2654), True, 'import numpy as np\n'), ((3161, 3186), 'numpy.isnan', 'np.isnan', (['event_precision'], {}), '(event_precision)\n', (3169, 3186), True, 'import numpy as np\n'), ((4822, 4841), 'numpy.shape', 'np.shape', (['binarized'], {}), '(binarized)\n', (4830, 4841), True, 'import numpy as np\n'), ((5170, 5189), 'numpy.shape', 'np.shape', (['binarized'], {}), '(binarized)\n', (5178, 5189), True, 'import numpy as np\n'), ((5769, 5834), 'numpy.where', 'np.where', (['((spike_times > event_start) & (spike_times < event_end))'], {}), '((spike_times > event_start) & (spike_times < event_end))\n', (5777, 5834), True, 'import numpy as np\n'), ((5934, 5949), 'numpy.mean', 'np.mean', (['counts'], {}), '(counts)\n', (5941, 5949), True, 'import numpy as np\n'), ((7048, 7102), 'numpy.max', 'np.max', (['psth[minimum:minimum + num_bins_after_minimum]'], {}), '(psth[minimum:minimum + num_bins_after_minimum])\n', (7054, 7102), True, 'import numpy as np\n'), ((7422, 7443), 'numpy.array', 'np.array', (['good_minima'], {}), '(good_minima)\n', (7430, 7443), True, 'import numpy as np\n'), ((8407, 8434), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['words'], {}), '(words)\n', (8427, 8434), True, 'import numpy as np\n'), ((10110, 10131), 'numpy.array', 'np.array', (['spike_times'], {}), '(spike_times)\n', (10118, 10131), True, 'import numpy as np\n'), ((10209, 10230), 'numpy.array', 'np.array', (['spike_times'], {}), '(spike_times)\n', (10217, 10230), True, 'import numpy as np\n'), ((11313, 11334), 'numpy.array', 'np.array', (['spike_times'], {}), '(spike_times)\n', (11321, 11334), True, 'import numpy as np\n'), ((11341, 11362), 'numpy.array', 'np.array', (['spike_times'], {}), '(spike_times)\n', (11349, 11362), True, 'import numpy as np\n'), ((11991, 12063), 'numpy.where', 'np.where', (['((spike_times > start) & (spike_times <= stimulus_times[i + 1]))'], {}), '((spike_times > start) & (spike_times <= stimulus_times[i + 1]))\n', (11999, 12063), True, 'import numpy as np\n'), ((12132, 12148), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (12146, 12148), True, 'import numpy as np\n'), ((3022, 3044), 'numpy.median', 'np.median', (['second_half'], {}), '(second_half)\n', (3031, 3044), True, 'import numpy as np\n'), ((3045, 3066), 'numpy.median', 'np.median', (['first_half'], {}), '(first_half)\n', (3054, 3066), True, 'import numpy as np\n'), ((4897, 4906), 'numpy.sum', 'np.sum', (['i'], {}), '(i)\n', (4903, 4906), True, 'import numpy as np\n'), ((4915, 4924), 'numpy.sum', 'np.sum', (['j'], {}), '(j)\n', (4921, 4924), True, 'import numpy as np\n'), ((4953, 4967), 'numpy.inner', 'np.inner', (['i', 'j'], {}), '(i, j)\n', (4961, 4967), True, 'import numpy as np\n'), ((5914, 5928), 'numpy.std', 'np.std', (['counts'], {}), '(counts)\n', (5920, 5928), True, 'import numpy as np\n'), ((2709, 2729), 'numpy.array', 'np.array', (['all_spikes'], {}), '(all_spikes)\n', (2717, 2729), True, 'import numpy as np\n'), ((3675, 3697), 'numpy.floor', 'np.floor', (['(spike * 1000)'], {}), '(spike * 1000)\n', (3683, 3697), True, 'import numpy as np\n'), ((4314, 4363), 'numpy.floor', 'np.floor', (['((spike - pre) * 1000 / (1000 * binsize))'], {}), '((spike - pre) * 1000 / (1000 * binsize))\n', (4322, 4363), True, 'import numpy as np\n'), ((4978, 4995), 'numpy.linalg.norm', 'np.linalg.norm', (['i'], {}), '(i)\n', (4992, 4995), True, 'import numpy as np\n'), ((4996, 5013), 'numpy.linalg.norm', 'np.linalg.norm', (['j'], {}), '(j)\n', (5010, 5013), True, 'import numpy as np\n'), ((1955, 1990), 'numpy.where', 'np.where', (['(spike_times > event_start)'], {}), '(spike_times > event_start)\n', (1963, 1990), True, 'import numpy as np\n'), ((8773, 8788), 'numpy.array', 'np.array', (['words'], {}), '(words)\n', (8781, 8788), True, 'import numpy as np\n'), ((2851, 2878), 'numpy.median', 'np.median', (['all_event_spikes'], {}), '(all_event_spikes)\n', (2860, 2878), True, 'import numpy as np\n'), ((2953, 2980), 'numpy.median', 'np.median', (['all_event_spikes'], {}), '(all_event_spikes)\n', (2962, 2980), True, 'import numpy as np\n'), ((8693, 8708), 'numpy.array', 'np.array', (['words'], {}), '(words)\n', (8701, 8708), True, 'import numpy as np\n')] |
import math
import numpy as np
from matplotlib import pyplot as plt
from rlkit.visualization import visualization_util as vu
class Dynamics(object):
def __init__(self, projection, noise):
self.projection = projection
self.noise = noise
def __call__(self, samples):
new_samples = samples + self.noise * np.random.randn(
*samples.shape
)
return self.projection(new_samples)
def plot_curves(names_and_data, report):
n_curves = len(names_and_data)
if n_curves < 4:
n_cols = n_curves
n_rows = 1
else:
n_cols = n_curves // 2
n_rows = math.ceil(float(n_curves) / n_cols)
plt.figure()
for i, (name, data) in enumerate(names_and_data):
j = i + 1
plt.subplot(n_rows, n_cols, j)
plt.plot(np.array(data))
plt.title(name)
fig = plt.gcf()
img = vu.save_image(fig)
report.add_image(img, "Final Distribution")
def visualize_samples(
samples,
report,
title="Samples",
):
plt.figure()
plt.plot(samples[:, 0], samples[:, 1], '.')
plt.xlim(-1.5, 1.5)
plt.ylim(-1.5, 1.5)
plt.title(title)
fig = plt.gcf()
sample_img = vu.save_image(fig)
report.add_image(sample_img, title)
return sample_img
def visualize_samples_and_projection(
samples,
report,
post_dynamics_samples=None,
dynamics=None,
title="Samples",
):
assert post_dynamics_samples is not None or dynamics is not None
plt.figure()
plt.subplot(1, 2, 1)
plt.plot(samples[:, 0], samples[:, 1], '.')
plt.xlim(-1.5, 1.5)
plt.ylim(-1.5, 1.5)
plt.title(title)
if post_dynamics_samples is None:
post_dynamics_samples = dynamics(samples)
plt.subplot(1, 2, 2)
plt.plot(post_dynamics_samples[:, 0], post_dynamics_samples[:, 1], '.')
plt.xlim(-1.5, 1.5)
plt.ylim(-1.5, 1.5)
plt.title("Projected " + title)
fig = plt.gcf()
sample_img = vu.save_image(fig)
report.add_image(sample_img, title)
return sample_img
def prob_to_weight(prob, skew_config):
weight_type = skew_config['weight_type']
min_prob = skew_config['minimum_prob']
if min_prob:
prob = np.maximum(prob, min_prob)
with np.errstate(divide='ignore', invalid='ignore'):
if weight_type == 'inv_p':
weights = 1. / prob
elif weight_type == 'nll':
weights = - np.log(prob)
elif weight_type == 'sqrt_inv_p':
weights = (1. / prob) ** 0.5
elif weight_type == 'exp':
exp = skew_config['alpha']
weights = prob ** exp
else:
raise NotImplementedError()
weights[weights == np.inf] = 0
weights[weights == -np.inf] = 0
weights[weights == -np.nan] = 0
return weights / weights.flatten().sum() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplot",
"numpy.maximum",
"rlkit.visualization.visualization_util.save_image",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"numpy.random.randn",
"numpy.log",
"numpy.errstate",
"matplotlib.pyplot.figure",
"numpy.ar... | [((679, 691), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (689, 691), True, 'from matplotlib import pyplot as plt\n'), ((870, 879), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (877, 879), True, 'from matplotlib import pyplot as plt\n'), ((890, 908), 'rlkit.visualization.visualization_util.save_image', 'vu.save_image', (['fig'], {}), '(fig)\n', (903, 908), True, 'from rlkit.visualization import visualization_util as vu\n'), ((1047, 1059), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1057, 1059), True, 'from matplotlib import pyplot as plt\n'), ((1064, 1107), 'matplotlib.pyplot.plot', 'plt.plot', (['samples[:, 0]', 'samples[:, 1]', '"""."""'], {}), "(samples[:, 0], samples[:, 1], '.')\n", (1072, 1107), True, 'from matplotlib import pyplot as plt\n'), ((1112, 1131), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1.5)', '(1.5)'], {}), '(-1.5, 1.5)\n', (1120, 1131), True, 'from matplotlib import pyplot as plt\n'), ((1136, 1155), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.5)', '(1.5)'], {}), '(-1.5, 1.5)\n', (1144, 1155), True, 'from matplotlib import pyplot as plt\n'), ((1160, 1176), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1169, 1176), True, 'from matplotlib import pyplot as plt\n'), ((1188, 1197), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1195, 1197), True, 'from matplotlib import pyplot as plt\n'), ((1215, 1233), 'rlkit.visualization.visualization_util.save_image', 'vu.save_image', (['fig'], {}), '(fig)\n', (1228, 1233), True, 'from rlkit.visualization import visualization_util as vu\n'), ((1529, 1541), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1539, 1541), True, 'from matplotlib import pyplot as plt\n'), ((1546, 1566), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (1557, 1566), True, 'from matplotlib import pyplot as plt\n'), ((1571, 1614), 'matplotlib.pyplot.plot', 'plt.plot', (['samples[:, 0]', 'samples[:, 1]', '"""."""'], {}), "(samples[:, 0], samples[:, 1], '.')\n", (1579, 1614), True, 'from matplotlib import pyplot as plt\n'), ((1619, 1638), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1.5)', '(1.5)'], {}), '(-1.5, 1.5)\n', (1627, 1638), True, 'from matplotlib import pyplot as plt\n'), ((1643, 1662), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.5)', '(1.5)'], {}), '(-1.5, 1.5)\n', (1651, 1662), True, 'from matplotlib import pyplot as plt\n'), ((1667, 1683), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1676, 1683), True, 'from matplotlib import pyplot as plt\n'), ((1777, 1797), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (1788, 1797), True, 'from matplotlib import pyplot as plt\n'), ((1802, 1873), 'matplotlib.pyplot.plot', 'plt.plot', (['post_dynamics_samples[:, 0]', 'post_dynamics_samples[:, 1]', '"""."""'], {}), "(post_dynamics_samples[:, 0], post_dynamics_samples[:, 1], '.')\n", (1810, 1873), True, 'from matplotlib import pyplot as plt\n'), ((1878, 1897), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-1.5)', '(1.5)'], {}), '(-1.5, 1.5)\n', (1886, 1897), True, 'from matplotlib import pyplot as plt\n'), ((1902, 1921), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.5)', '(1.5)'], {}), '(-1.5, 1.5)\n', (1910, 1921), True, 'from matplotlib import pyplot as plt\n'), ((1926, 1957), 'matplotlib.pyplot.title', 'plt.title', (["('Projected ' + title)"], {}), "('Projected ' + title)\n", (1935, 1957), True, 'from matplotlib import pyplot as plt\n'), ((1969, 1978), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1976, 1978), True, 'from matplotlib import pyplot as plt\n'), ((1996, 2014), 'rlkit.visualization.visualization_util.save_image', 'vu.save_image', (['fig'], {}), '(fig)\n', (2009, 2014), True, 'from rlkit.visualization import visualization_util as vu\n'), ((772, 802), 'matplotlib.pyplot.subplot', 'plt.subplot', (['n_rows', 'n_cols', 'j'], {}), '(n_rows, n_cols, j)\n', (783, 802), True, 'from matplotlib import pyplot as plt\n'), ((844, 859), 'matplotlib.pyplot.title', 'plt.title', (['name'], {}), '(name)\n', (853, 859), True, 'from matplotlib import pyplot as plt\n'), ((2238, 2264), 'numpy.maximum', 'np.maximum', (['prob', 'min_prob'], {}), '(prob, min_prob)\n', (2248, 2264), True, 'import numpy as np\n'), ((2274, 2320), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (2285, 2320), True, 'import numpy as np\n'), ((820, 834), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (828, 834), True, 'import numpy as np\n'), ((338, 369), 'numpy.random.randn', 'np.random.randn', (['*samples.shape'], {}), '(*samples.shape)\n', (353, 369), True, 'import numpy as np\n'), ((2448, 2460), 'numpy.log', 'np.log', (['prob'], {}), '(prob)\n', (2454, 2460), True, 'import numpy as np\n')] |
import textwrap
import numpy as np
from phonopy.phonon.group_velocity import GroupVelocity
from phonopy.harmonic.force_constants import similarity_transformation
from phonopy.phonon.thermal_properties import mode_cv as get_mode_cv
from phonopy.units import THzToEv, EV, THz, Angstrom
from phono3py.file_IO import write_pp_to_hdf5
from phono3py.phonon3.triplets import (get_grid_address, reduce_grid_points,
get_ir_grid_points,
from_coarse_to_dense_grid_points,
get_grid_points_by_rotations,
get_all_triplets)
from phono3py.other.isotope import Isotope
unit_to_WmK = ((THz * Angstrom) ** 2 / (Angstrom ** 3) * EV / THz /
(2 * np.pi)) # 2pi comes from definition of lifetime.
def all_bands_exist(interaction):
band_indices = interaction.get_band_indices()
num_band = interaction.get_primitive().get_number_of_atoms() * 3
if len(band_indices) == num_band:
if (band_indices - np.arange(num_band) == 0).all():
return True
return False
def write_pp(conductivity,
pp,
i,
filename=None,
compression="gzip"):
grid_point = conductivity.get_grid_points()[i]
sigmas = conductivity.get_sigmas()
sigma_cutoff = conductivity.get_sigma_cutoff_width()
mesh = conductivity.get_mesh_numbers()
triplets, weights, map_triplets, _ = pp.get_triplets_at_q()
grid_address = pp.get_grid_address()
bz_map = pp.get_bz_map()
if map_triplets is None:
all_triplets = None
else:
all_triplets = get_all_triplets(grid_point,
grid_address,
bz_map,
mesh)
if len(sigmas) > 1:
print("Multiple smearing parameters were given. The last one in ")
print("ph-ph interaction calculations was written in the file.")
write_pp_to_hdf5(mesh,
pp=pp.get_interaction_strength(),
g_zero=pp.get_zero_value_positions(),
grid_point=grid_point,
triplet=triplets,
weight=weights,
triplet_map=map_triplets,
triplet_all=all_triplets,
sigma=sigmas[-1],
sigma_cutoff=sigma_cutoff,
filename=filename,
compression=compression)
class Conductivity(object):
def __init__(self,
interaction,
symmetry,
grid_points=None,
temperatures=None,
sigmas=None,
sigma_cutoff=None,
is_isotope=False,
mass_variances=None,
mesh_divisors=None,
coarse_mesh_shifts=None,
boundary_mfp=None, # in micrometre
is_kappa_star=True,
gv_delta_q=None, # finite difference for group veolocity
is_full_pp=False,
log_level=0):
if sigmas is None:
self._sigmas = []
else:
self._sigmas = sigmas
self._sigma_cutoff = sigma_cutoff
self._pp = interaction
self._is_full_pp = is_full_pp
self._collision = None # has to be set derived class
if temperatures is None:
self._temperatures = None
else:
self._temperatures = np.array(temperatures, dtype='double')
self._is_kappa_star = is_kappa_star
self._gv_delta_q = gv_delta_q
self._log_level = log_level
self._primitive = self._pp.get_primitive()
self._dm = self._pp.get_dynamical_matrix()
self._frequency_factor_to_THz = self._pp.get_frequency_factor_to_THz()
self._cutoff_frequency = self._pp.get_cutoff_frequency()
self._boundary_mfp = boundary_mfp
self._symmetry = symmetry
if not self._is_kappa_star:
self._point_operations = np.array([np.eye(3, dtype='intc')],
dtype='intc')
else:
self._point_operations = symmetry.get_reciprocal_operations()
rec_lat = np.linalg.inv(self._primitive.get_cell())
self._rotations_cartesian = np.array(
[similarity_transformation(rec_lat, r)
for r in self._point_operations], dtype='double')
self._grid_points = None
self._grid_weights = None
self._grid_address = None
self._ir_grid_points = None
self._ir_grid_weights = None
self._read_gamma = False
self._read_gamma_iso = False
self._kappa = None
self._mode_kappa = None
self._frequencies = None
self._cv = None
self._gv = None
self._gv_sum2 = None
self._gamma = None
self._gamma_iso = None
self._num_sampling_grid_points = 0
self._mesh = None
self._mesh_divisors = None
self._coarse_mesh = None
self._coarse_mesh_shifts = None
self._set_mesh_numbers(mesh_divisors=mesh_divisors,
coarse_mesh_shifts=coarse_mesh_shifts)
volume = self._primitive.get_volume()
self._conversion_factor = unit_to_WmK / volume
self._isotope = None
self._mass_variances = None
self._is_isotope = is_isotope
if mass_variances is not None:
self._is_isotope = True
if self._is_isotope:
self._set_isotope(mass_variances)
self._grid_point_count = None
self._set_grid_properties(grid_points)
if (self._dm.is_nac() and
self._dm.get_nac_method() == 'gonze' and
self._gv_delta_q is None):
self._gv_delta_q = 1e-5
if self._log_level:
msg = "Group velocity calculation:\n"
text = ("Analytical derivative of dynamical matrix is not "
"implemented for NAC by Gonze et al. Instead "
"numerical derivative of it is used with dq=1e-5 "
"for group velocity calculation.")
msg += textwrap.fill(text,
initial_indent=" ",
subsequent_indent=" ",
width=70)
print(msg)
self._gv_obj = GroupVelocity(
self._dm,
q_length=self._gv_delta_q,
symmetry=self._symmetry,
frequency_factor_to_THz=self._frequency_factor_to_THz)
# gv_delta_q may be changed.
self._gv_delta_q = self._gv_obj.get_q_length()
def __iter__(self):
return self
def __next__(self):
if self._grid_point_count == len(self._grid_points):
if self._log_level:
print("=================== End of collection of collisions "
"===================")
raise StopIteration
else:
self._run_at_grid_point()
self._grid_point_count += 1
return self._grid_point_count - 1
def next(self):
return self.__next__()
def get_mesh_divisors(self):
return self._mesh_divisors
@property
def mesh_numbers(self):
return self._mesh
def get_mesh_numbers(self):
return self.mesh_numbers
def get_mode_heat_capacities(self):
return self._cv
def get_group_velocities(self):
return self._gv
def get_gv_by_gv(self):
return self._gv_sum2
def get_frequencies(self):
return self._frequencies[self._grid_points]
def get_qpoints(self):
return self._qpoints
def get_grid_points(self):
return self._grid_points
def get_grid_weights(self):
return self._grid_weights
@property
def temperatures(self):
return self._temperatures
def get_temperatures(self):
return self.temperatures
def set_temperatures(self, temperatures):
self._temperatures = temperatures
self._allocate_values()
def set_gamma(self, gamma):
self._gamma = gamma
self._read_gamma = True
def set_gamma_isotope(self, gamma_iso):
self._gamma_iso = gamma_iso
self._read_gamma_iso = True
@property
def gamma(self):
return self._gamma
def get_gamma(self):
return self.gamma
@property
def gamma_isotope(self):
return self._gamma_iso
def get_gamma_isotope(self):
return self.gamma_isotope
@property
def kappa(self):
return self._kappa
def get_kappa(self):
return self.kappa
@property
def mode_kappa(self):
return self._mode_kappa
def get_mode_kappa(self):
return self.mode_kappa
def get_sigmas(self):
return self._sigmas
def get_sigma_cutoff_width(self):
return self._sigma_cutoff
def get_grid_point_count(self):
return self._grid_point_count
def get_averaged_pp_interaction(self):
return self._averaged_pp_interaction
def _run_at_grid_point(self):
"""This has to be implementated in the derived class"""
pass
def _allocate_values(self):
"""This has to be implementated in the derived class"""
pass
def _set_grid_properties(self, grid_points):
self._grid_address = self._pp.get_grid_address()
self._pp.set_nac_q_direction(nac_q_direction=None)
if grid_points is not None: # Specify grid points
self._grid_points = reduce_grid_points(
self._mesh_divisors,
self._grid_address,
grid_points,
coarse_mesh_shifts=self._coarse_mesh_shifts)
(self._ir_grid_points,
self._ir_grid_weights) = self._get_ir_grid_points()
elif not self._is_kappa_star: # All grid points
coarse_grid_address = get_grid_address(self._coarse_mesh)
coarse_grid_points = np.arange(np.prod(self._coarse_mesh),
dtype='uintp')
self._grid_points = from_coarse_to_dense_grid_points(
self._mesh,
self._mesh_divisors,
coarse_grid_points,
coarse_grid_address,
coarse_mesh_shifts=self._coarse_mesh_shifts)
self._grid_weights = np.ones(len(self._grid_points), dtype='intc')
self._ir_grid_points = self._grid_points
self._ir_grid_weights = self._grid_weights
else: # Automatic sampling
self._grid_points, self._grid_weights = self._get_ir_grid_points()
self._ir_grid_points = self._grid_points
self._ir_grid_weights = self._grid_weights
self._qpoints = np.array(self._grid_address[self._grid_points] /
self._mesh.astype('double'),
dtype='double', order='C')
self._grid_point_count = 0
self._frequencies, self._eigenvectors, _ = self._pp.get_phonons()
def _get_gamma_isotope_at_sigmas(self, i):
gamma_iso = []
bz_map = self._pp.get_bz_map()
pp_freqs, pp_eigvecs, pp_phonon_done = self._pp.get_phonons()
for j, sigma in enumerate(self._sigmas):
if self._log_level:
text = "Calculating Gamma of ph-isotope with "
if sigma is None:
text += "tetrahedron method"
else:
text += "sigma=%s" % sigma
print(text)
self._isotope.set_sigma(sigma)
self._isotope.set_phonons(self._grid_address,
bz_map,
pp_freqs,
pp_eigvecs,
pp_phonon_done,
dm=self._dm)
gp = self._grid_points[i]
self._isotope.set_grid_point(gp)
self._isotope.run()
gamma_iso.append(self._isotope.get_gamma())
return np.array(gamma_iso, dtype='double', order='C')
def _set_mesh_numbers(self, mesh_divisors=None, coarse_mesh_shifts=None):
self._mesh = self._pp.get_mesh_numbers()
if mesh_divisors is None:
self._mesh_divisors = np.array([1, 1, 1], dtype='intc')
else:
self._mesh_divisors = []
for i, (m, n) in enumerate(zip(self._mesh, mesh_divisors)):
if m % n == 0:
self._mesh_divisors.append(n)
else:
self._mesh_divisors.append(1)
print(("Mesh number %d for the " +
["first", "second", "third"][i] +
" axis is not dividable by divisor %d.") % (m, n))
self._mesh_divisors = np.array(self._mesh_divisors, dtype='intc')
if coarse_mesh_shifts is None:
self._coarse_mesh_shifts = [False, False, False]
else:
self._coarse_mesh_shifts = coarse_mesh_shifts
for i in range(3):
if (self._coarse_mesh_shifts[i] and
(self._mesh_divisors[i] % 2 != 0)):
print("Coarse grid along " +
["first", "second", "third"][i] +
" axis can not be shifted. Set False.")
self._coarse_mesh_shifts[i] = False
self._coarse_mesh = self._mesh // self._mesh_divisors
if self._log_level:
print("Lifetime sampling mesh: [ %d %d %d ]" %
tuple(self._mesh // self._mesh_divisors))
def _get_ir_grid_points(self):
if self._coarse_mesh_shifts is None:
mesh_shifts = [False, False, False]
else:
mesh_shifts = self._coarse_mesh_shifts
(coarse_grid_points,
coarse_grid_weights,
coarse_grid_address, _) = get_ir_grid_points(
self._coarse_mesh,
self._symmetry.get_pointgroup_operations(),
mesh_shifts=mesh_shifts)
grid_points = from_coarse_to_dense_grid_points(
self._mesh,
self._mesh_divisors,
coarse_grid_points,
coarse_grid_address,
coarse_mesh_shifts=self._coarse_mesh_shifts)
grid_weights = coarse_grid_weights
assert grid_weights.sum() == np.prod(self._mesh // self._mesh_divisors)
return grid_points, grid_weights
def _set_isotope(self, mass_variances):
if mass_variances is True:
mv = None
else:
mv = mass_variances
self._isotope = Isotope(
self._mesh,
self._primitive,
mass_variances=mv,
frequency_factor_to_THz=self._frequency_factor_to_THz,
symprec=self._symmetry.get_symmetry_tolerance(),
cutoff_frequency=self._cutoff_frequency,
lapack_zheev_uplo=self._pp.get_lapack_zheev_uplo())
self._mass_variances = self._isotope.get_mass_variances()
def _set_harmonic_properties(self, i_irgp, i_data):
grid_point = self._grid_points[i_irgp]
freqs = self._frequencies[grid_point][self._pp.get_band_indices()]
self._cv[:, i_data, :] = self._get_cv(freqs)
gv = self._get_gv(self._qpoints[i_irgp])
self._gv[i_data] = gv[self._pp.get_band_indices(), :]
# Outer product of group velocities (v x v) [num_k*, num_freqs, 3, 3]
gv_by_gv_tensor, order_kstar = self._get_gv_by_gv(i_irgp, i_data)
self._num_sampling_grid_points += order_kstar
# Sum all vxv at k*
for j, vxv in enumerate(
([0, 0], [1, 1], [2, 2], [1, 2], [0, 2], [0, 1])):
self._gv_sum2[i_data, :, j] = gv_by_gv_tensor[:, vxv[0], vxv[1]]
def _get_gv(self, q):
self._gv_obj.run([q])
return self._gv_obj.get_group_velocity()[0]
def _get_gv_by_gv(self, i_irgp, i_data):
rotation_map = get_grid_points_by_rotations(
self._grid_address[self._grid_points[i_irgp]],
self._point_operations,
self._mesh)
gv = self._gv[i_data]
gv_by_gv = np.zeros((len(gv), 3, 3), dtype='double')
for r in self._rotations_cartesian:
gvs_rot = np.dot(gv, r.T)
gv_by_gv += [np.outer(r_gv, r_gv) for r_gv in gvs_rot]
gv_by_gv /= len(rotation_map) // len(np.unique(rotation_map))
order_kstar = len(np.unique(rotation_map))
if self._grid_weights is not None:
if order_kstar != self._grid_weights[i_irgp]:
if self._log_level:
text = ("Number of elements in k* is unequal "
"to number of equivalent grid-points. "
"This means that the mesh sampling grids break "
"symmetry. Please check carefully "
"the convergence over grid point densities.")
msg = textwrap.fill(text,
initial_indent=" ",
subsequent_indent=" ",
width=70)
print("*" * 30 + "Warning" + "*" * 30)
print(msg)
print("*" * 67)
return gv_by_gv, order_kstar
def _get_cv(self, freqs):
cv = np.zeros((len(self._temperatures), len(freqs)), dtype='double')
# T/freq has to be large enough to avoid divergence.
# Otherwise just set 0.
for i, f in enumerate(freqs):
finite_t = (self._temperatures > f / 100)
if f > self._cutoff_frequency:
cv[:, i] = np.where(
finite_t, get_mode_cv(
np.where(finite_t, self._temperatures, 10000),
f * THzToEv), 0)
return cv
def _get_main_diagonal(self, i, j, k):
num_band = self._primitive.get_number_of_atoms() * 3
main_diagonal = self._gamma[j, k, i].copy()
if self._gamma_iso is not None:
main_diagonal += self._gamma_iso[j, i]
if self._boundary_mfp is not None:
main_diagonal += self._get_boundary_scattering(i)
# if self._boundary_mfp is not None:
# for l in range(num_band):
# # Acoustic modes at Gamma are avoided.
# if i == 0 and l < 3:
# continue
# gv_norm = np.linalg.norm(self._gv[i, l])
# mean_free_path = (gv_norm * Angstrom * 1e6 /
# (4 * np.pi * main_diagonal[l]))
# if mean_free_path > self._boundary_mfp:
# main_diagonal[l] = (
# gv_norm / (4 * np.pi * self._boundary_mfp))
return main_diagonal
def _get_boundary_scattering(self, i):
num_band = self._primitive.get_number_of_atoms() * 3
g_boundary = np.zeros(num_band, dtype='double')
for l in range(num_band):
g_boundary[l] = (np.linalg.norm(self._gv[i, l]) * Angstrom * 1e6 /
(4 * np.pi * self._boundary_mfp))
return g_boundary
def _show_log_header(self, i):
if self._log_level:
gp = self._grid_points[i]
print("======================= Grid point %d (%d/%d) "
"=======================" %
(gp, i + 1, len(self._grid_points)))
print("q-point: (%5.2f %5.2f %5.2f)" % tuple(self._qpoints[i]))
if self._boundary_mfp is not None:
if self._boundary_mfp > 1000:
print("Boundary mean free path (millimetre): %.3f" %
(self._boundary_mfp / 1000.0))
else:
print("Boundary mean free path (micrometre): %.5f" %
self._boundary_mfp)
if self._is_isotope:
print(("Mass variance parameters: " +
"%5.2e " * len(self._mass_variances)) %
tuple(self._mass_variances))
| [
"numpy.outer",
"textwrap.fill",
"numpy.eye",
"phono3py.phonon3.triplets.from_coarse_to_dense_grid_points",
"numpy.zeros",
"numpy.unique",
"phonopy.phonon.group_velocity.GroupVelocity",
"phonopy.harmonic.force_constants.similarity_transformation",
"phono3py.phonon3.triplets.get_grid_points_by_rotatio... | [((1679, 1735), 'phono3py.phonon3.triplets.get_all_triplets', 'get_all_triplets', (['grid_point', 'grid_address', 'bz_map', 'mesh'], {}), '(grid_point, grid_address, bz_map, mesh)\n', (1695, 1735), False, 'from phono3py.phonon3.triplets import get_grid_address, reduce_grid_points, get_ir_grid_points, from_coarse_to_dense_grid_points, get_grid_points_by_rotations, get_all_triplets\n'), ((6555, 6689), 'phonopy.phonon.group_velocity.GroupVelocity', 'GroupVelocity', (['self._dm'], {'q_length': 'self._gv_delta_q', 'symmetry': 'self._symmetry', 'frequency_factor_to_THz': 'self._frequency_factor_to_THz'}), '(self._dm, q_length=self._gv_delta_q, symmetry=self._symmetry,\n frequency_factor_to_THz=self._frequency_factor_to_THz)\n', (6568, 6689), False, 'from phonopy.phonon.group_velocity import GroupVelocity\n'), ((12312, 12358), 'numpy.array', 'np.array', (['gamma_iso'], {'dtype': '"""double"""', 'order': '"""C"""'}), "(gamma_iso, dtype='double', order='C')\n", (12320, 12358), True, 'import numpy as np\n'), ((14364, 14524), 'phono3py.phonon3.triplets.from_coarse_to_dense_grid_points', 'from_coarse_to_dense_grid_points', (['self._mesh', 'self._mesh_divisors', 'coarse_grid_points', 'coarse_grid_address'], {'coarse_mesh_shifts': 'self._coarse_mesh_shifts'}), '(self._mesh, self._mesh_divisors,\n coarse_grid_points, coarse_grid_address, coarse_mesh_shifts=self.\n _coarse_mesh_shifts)\n', (14396, 14524), False, 'from phono3py.phonon3.triplets import get_grid_address, reduce_grid_points, get_ir_grid_points, from_coarse_to_dense_grid_points, get_grid_points_by_rotations, get_all_triplets\n'), ((16249, 16364), 'phono3py.phonon3.triplets.get_grid_points_by_rotations', 'get_grid_points_by_rotations', (['self._grid_address[self._grid_points[i_irgp]]', 'self._point_operations', 'self._mesh'], {}), '(self._grid_address[self._grid_points[i_irgp]],\n self._point_operations, self._mesh)\n', (16277, 16364), False, 'from phono3py.phonon3.triplets import get_grid_address, reduce_grid_points, get_ir_grid_points, from_coarse_to_dense_grid_points, get_grid_points_by_rotations, get_all_triplets\n'), ((19260, 19294), 'numpy.zeros', 'np.zeros', (['num_band'], {'dtype': '"""double"""'}), "(num_band, dtype='double')\n", (19268, 19294), True, 'import numpy as np\n'), ((3584, 3622), 'numpy.array', 'np.array', (['temperatures'], {'dtype': '"""double"""'}), "(temperatures, dtype='double')\n", (3592, 3622), True, 'import numpy as np\n'), ((9749, 9870), 'phono3py.phonon3.triplets.reduce_grid_points', 'reduce_grid_points', (['self._mesh_divisors', 'self._grid_address', 'grid_points'], {'coarse_mesh_shifts': 'self._coarse_mesh_shifts'}), '(self._mesh_divisors, self._grid_address, grid_points,\n coarse_mesh_shifts=self._coarse_mesh_shifts)\n', (9767, 9870), False, 'from phono3py.phonon3.triplets import get_grid_address, reduce_grid_points, get_ir_grid_points, from_coarse_to_dense_grid_points, get_grid_points_by_rotations, get_all_triplets\n'), ((12556, 12589), 'numpy.array', 'np.array', (['[1, 1, 1]'], {'dtype': '"""intc"""'}), "([1, 1, 1], dtype='intc')\n", (12564, 12589), True, 'import numpy as np\n'), ((13094, 13137), 'numpy.array', 'np.array', (['self._mesh_divisors'], {'dtype': '"""intc"""'}), "(self._mesh_divisors, dtype='intc')\n", (13102, 13137), True, 'import numpy as np\n'), ((14658, 14700), 'numpy.prod', 'np.prod', (['(self._mesh // self._mesh_divisors)'], {}), '(self._mesh // self._mesh_divisors)\n', (14665, 14700), True, 'import numpy as np\n'), ((16556, 16571), 'numpy.dot', 'np.dot', (['gv', 'r.T'], {}), '(gv, r.T)\n', (16562, 16571), True, 'import numpy as np\n'), ((16735, 16758), 'numpy.unique', 'np.unique', (['rotation_map'], {}), '(rotation_map)\n', (16744, 16758), True, 'import numpy as np\n'), ((4441, 4478), 'phonopy.harmonic.force_constants.similarity_transformation', 'similarity_transformation', (['rec_lat', 'r'], {}), '(rec_lat, r)\n', (4466, 4478), False, 'from phonopy.harmonic.force_constants import similarity_transformation\n'), ((6319, 6393), 'textwrap.fill', 'textwrap.fill', (['text'], {'initial_indent': '""" """', 'subsequent_indent': '""" """', 'width': '(70)'}), "(text, initial_indent=' ', subsequent_indent=' ', width=70)\n", (6332, 6393), False, 'import textwrap\n'), ((10123, 10158), 'phono3py.phonon3.triplets.get_grid_address', 'get_grid_address', (['self._coarse_mesh'], {}), '(self._coarse_mesh)\n', (10139, 10158), False, 'from phono3py.phonon3.triplets import get_grid_address, reduce_grid_points, get_ir_grid_points, from_coarse_to_dense_grid_points, get_grid_points_by_rotations, get_all_triplets\n'), ((10320, 10480), 'phono3py.phonon3.triplets.from_coarse_to_dense_grid_points', 'from_coarse_to_dense_grid_points', (['self._mesh', 'self._mesh_divisors', 'coarse_grid_points', 'coarse_grid_address'], {'coarse_mesh_shifts': 'self._coarse_mesh_shifts'}), '(self._mesh, self._mesh_divisors,\n coarse_grid_points, coarse_grid_address, coarse_mesh_shifts=self.\n _coarse_mesh_shifts)\n', (10352, 10480), False, 'from phono3py.phonon3.triplets import get_grid_address, reduce_grid_points, get_ir_grid_points, from_coarse_to_dense_grid_points, get_grid_points_by_rotations, get_all_triplets\n'), ((16597, 16617), 'numpy.outer', 'np.outer', (['r_gv', 'r_gv'], {}), '(r_gv, r_gv)\n', (16605, 16617), True, 'import numpy as np\n'), ((16684, 16707), 'numpy.unique', 'np.unique', (['rotation_map'], {}), '(rotation_map)\n', (16693, 16707), True, 'import numpy as np\n'), ((4148, 4171), 'numpy.eye', 'np.eye', (['(3)'], {'dtype': '"""intc"""'}), "(3, dtype='intc')\n", (4154, 4171), True, 'import numpy as np\n'), ((10202, 10228), 'numpy.prod', 'np.prod', (['self._coarse_mesh'], {}), '(self._coarse_mesh)\n', (10209, 10228), True, 'import numpy as np\n'), ((17274, 17346), 'textwrap.fill', 'textwrap.fill', (['text'], {'initial_indent': '""" """', 'subsequent_indent': '""" """', 'width': '(70)'}), "(text, initial_indent=' ', subsequent_indent=' ', width=70)\n", (17287, 17346), False, 'import textwrap\n'), ((1067, 1086), 'numpy.arange', 'np.arange', (['num_band'], {}), '(num_band)\n', (1076, 1086), True, 'import numpy as np\n'), ((18071, 18116), 'numpy.where', 'np.where', (['finite_t', 'self._temperatures', '(10000)'], {}), '(finite_t, self._temperatures, 10000)\n', (18079, 18116), True, 'import numpy as np\n'), ((19358, 19388), 'numpy.linalg.norm', 'np.linalg.norm', (['self._gv[i, l]'], {}), '(self._gv[i, l])\n', (19372, 19388), True, 'import numpy as np\n')] |
import os, json, glob
import torch
import numpy as np
import src.params as params
import argparse, pdb
from src.model import get_model_or_checkpoint
from scipy.io import wavfile
from collections import defaultdict
from src.dataloader import AudioFileWindower
from pathlib import Path
WAV_SR = 44100
# WAV_SR = params.SAMPLE_RATE
# iterate through windows and save audio chunks and prediction candidates
def inference_and_write_chunks(args):
# load dataset object used to iterate windows of audio
chunk_duration=params.INFERENCE_CHUNK_S
wav_file_paths = [ Path(p) for p in glob.glob(args.wavMasterPath+"/*.wav") ][:5]
model_path = Path(args.modelPath)
mean, invstd = model_path/params.MEAN_FILE, model_path/params.INVSTD_FILE
audio_file_windower = AudioFileWindower(wav_file_paths, mean=mean, invstd=invstd)
# initialize model from checkpoint
model, _ = get_model_or_checkpoint(params.MODEL_NAME,model_path,use_cuda=True)
# various output locations
blob_root = "https://podcaststorage.blob.core.windows.net/{}".format(args.relativeBlobPath)
pos_chunk_dir = Path(args.positiveChunkDir)
pos_preds_dir = Path(args.positiveCandidatePredsDir)
neg_chunk_dir = Path(args.negativeChunkDir)
os.makedirs(pos_chunk_dir,exist_ok=True)
os.makedirs(pos_preds_dir,exist_ok=True)
os.makedirs(neg_chunk_dir,exist_ok=True)
# iterate through windows in dataloader, store current chunk windows and length
curr_chunk, curr_chunk_json = [], {}
curr_chunk_duration, curr_chunk_all_negative = 0, 1
chunk_file_name = ""
pos_chunk_path = pos_chunk_dir / chunk_file_name
neg_chunk_path = neg_chunk_dir / chunk_file_name
# file_chunk_counts = defaultdict(int)
file_chunk_counts = {}
pdb.set_trace()
for i in range(len(audio_file_windower)):
# iterate through windows
# decide whether to create new chunk or not
# if making new chunk, write out current json, wav before that
# get preds for chunk, add to things accordingly
# get an audio window
audio_file_windower.get_mode = 'audio_orig_sr'
audio_window, _ = audio_file_windower[i]
_, _, _, af = audio_file_windower.windows[i]
window_s = audio_file_windower.window_s
src_wav_path = af.name
# create new chunk if exceeds length or a new source wavfile has started
is_first_chunk = len(file_chunk_counts)==0
is_new_chunk = (curr_chunk_duration) >= chunk_duration
# is_new_chunk = is_new_chunk or len(file_chunk_counts)==0
# is_new_chunk = is_new_chunk or ( len(file_chunk_counts)>0 and
# src_wav_path not in file_chunk_counts )
if is_new_chunk or is_first_chunk:
if not is_first_chunk:
# write JSON/wav if positive, wav if negative, else skip
if len(curr_chunk_json)!=0: # tests if any positive
print("Writing out positive candidate chunk:", chunk_file_name)
with open(pos_preds_dir/(Path(chunk_file_name).stem+".json"),'w') as fp:
json.dump(curr_chunk_json,fp)
wavfile.write(pos_chunk_path, WAV_SR, np.concatenate(curr_chunk))
elif curr_chunk_all_negative: # test if negative
print("Writing out negative chunk:", chunk_file_name)
wavfile.write(neg_chunk_path, WAV_SR, np.concatenate(curr_chunk))
else:
pass
# used while making a chunk to postfix a random guid
src_wav_name = Path(src_wav_path).stem # NOTE: assumes filename is the absolute time
if src_wav_name in file_chunk_counts:
file_chunk_counts[src_wav_name] += 1
else:
file_chunk_counts[src_wav_name] = 0
postfix = '_'+format(file_chunk_counts[src_wav_name],'04x')
chunk_file_name = src_wav_name+postfix+'.wav'
curr_chunk, curr_chunk_json = [], {}
curr_chunk_duration, curr_chunk_all_negative = 0, 1
pos_chunk_path = pos_chunk_dir / chunk_file_name
neg_chunk_path = neg_chunk_dir / chunk_file_name
blob_uri = blob_root+'/'+chunk_file_name
# add window to current chunk
curr_chunk_duration += window_s
curr_chunk.append(audio_window)
# get a mel spec for the window
audio_file_windower.get_mode = 'mel_spec'
mel_spec_window, _ = audio_file_windower[i]
# run inference on window
input_data = torch.from_numpy(mel_spec_window).float().unsqueeze(0).unsqueeze(0)
pred, embed = model(input_data)
posterior = np.exp(pred.detach().cpu().numpy())
pred_id = torch.argmax(pred, dim=1).item()
confidence = round(float(posterior[0,1]),3)
# trigger and update JSON for current chunk if positive prediction
# chunk is considered negative if there are no positive candidates and
# all windows had confidence < negativeThreshold
if confidence>args.positiveThreshold:
if len(curr_chunk_json)==0: # adding the first prediction
# add the header fields (uri, absolute_time, source_guid, annotations)
curr_chunk_json["uri"] = blob_uri
curr_chunk_json["absolute_time"] = src_wav_name
curr_chunk_json["source_guid"] = "rpi_orcasound_lab"
curr_chunk_json["annotations"] = []
start_s, duration_s = curr_chunk_duration-window_s, window_s
curr_chunk_json["annotations"].append(
{
"start_time_s":start_s,
"duration_s":duration_s,
"confidence":confidence
}
)
print("Positive prediction at {:.2f}, Confidence {:.3f}!".format(start_s,confidence))
curr_chunk_all_negative *= 0
elif confidence>args.negativeThreshold:
curr_chunk_all_negative *= 0
print("Completed writing files:", file_chunk_counts)
if __name__ == "__main__":
"""
Processes unlabelled data using a classifier with two operating points/thresholds.
1. Generating positive annotation candidates for Pod.Cast UI. Use a threshold here that favors high recall (>85% ish) of positive examples over precision (>65% ish).
2. Generating negative examples from this distribution. Use a threshold here that's high precision (>90%) as we don't want positive examples incorrectly labelled negative.
These values above are approximate and will likely evolve as the classifier keeps improving.
NOTE: The wavfile names are assumed to be the "absolute_time" below.
Outputs:
1. For positive candidates: corresponding 60s chunks of wavfiles and JSON with schema:
{
"uri": "https://podcaststorage.blob.core.windows.net/[RELATIVE BLOB PATH]/[WAVCHUNK NAME],
e.g. https://podcaststorage.blob.core.windows.net/orcasoundlabchunked/1562337136_000f.wav
"absolute_time": UNIX time of corresponding Orcasound S3 bucket e.g. 1562337136,
"source_guid": Orcasound lab hydrophone id e.g. rpi_orcasound_lab,
"annotations": [
{"start_time_s","duration_s","confidence"}
]
}
2. For negative examples: corresponding 60s chunks of wavfiles that are labelled as all negative with high confidence.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-wavMasterPath', default=None, type=str, required=True)
parser.add_argument('-sourceGuid', default=None, type=str, required=True)
parser.add_argument('-modelPath', default='AudioSet_fc_all', type=str, required=True)
parser.add_argument('-positiveChunkDir', default=None, type=str, required=True)
parser.add_argument('-positiveCandidatePredsDir', default=None, type=str, required=True)
parser.add_argument('-positiveThreshold', default=None, type=float, required=True)
parser.add_argument('-relativeBlobPath', default=None, type=str, required=True)
parser.add_argument('-negativeChunkDir', default=None, type=str, required=True)
parser.add_argument('-negativeThreshold', default=None, type=float, required=True)
args = parser.parse_args()
inference_and_write_chunks(args)
| [
"json.dump",
"os.makedirs",
"argparse.ArgumentParser",
"torch.argmax",
"src.dataloader.AudioFileWindower",
"pathlib.Path",
"src.model.get_model_or_checkpoint",
"pdb.set_trace",
"glob.glob",
"numpy.concatenate",
"torch.from_numpy"
] | [((652, 672), 'pathlib.Path', 'Path', (['args.modelPath'], {}), '(args.modelPath)\n', (656, 672), False, 'from pathlib import Path\n'), ((778, 837), 'src.dataloader.AudioFileWindower', 'AudioFileWindower', (['wav_file_paths'], {'mean': 'mean', 'invstd': 'invstd'}), '(wav_file_paths, mean=mean, invstd=invstd)\n', (795, 837), False, 'from src.dataloader import AudioFileWindower\n'), ((893, 962), 'src.model.get_model_or_checkpoint', 'get_model_or_checkpoint', (['params.MODEL_NAME', 'model_path'], {'use_cuda': '(True)'}), '(params.MODEL_NAME, model_path, use_cuda=True)\n', (916, 962), False, 'from src.model import get_model_or_checkpoint\n'), ((1109, 1136), 'pathlib.Path', 'Path', (['args.positiveChunkDir'], {}), '(args.positiveChunkDir)\n', (1113, 1136), False, 'from pathlib import Path\n'), ((1157, 1193), 'pathlib.Path', 'Path', (['args.positiveCandidatePredsDir'], {}), '(args.positiveCandidatePredsDir)\n', (1161, 1193), False, 'from pathlib import Path\n'), ((1214, 1241), 'pathlib.Path', 'Path', (['args.negativeChunkDir'], {}), '(args.negativeChunkDir)\n', (1218, 1241), False, 'from pathlib import Path\n'), ((1246, 1287), 'os.makedirs', 'os.makedirs', (['pos_chunk_dir'], {'exist_ok': '(True)'}), '(pos_chunk_dir, exist_ok=True)\n', (1257, 1287), False, 'import os, json, glob\n'), ((1291, 1332), 'os.makedirs', 'os.makedirs', (['pos_preds_dir'], {'exist_ok': '(True)'}), '(pos_preds_dir, exist_ok=True)\n', (1302, 1332), False, 'import os, json, glob\n'), ((1336, 1377), 'os.makedirs', 'os.makedirs', (['neg_chunk_dir'], {'exist_ok': '(True)'}), '(neg_chunk_dir, exist_ok=True)\n', (1347, 1377), False, 'import os, json, glob\n'), ((1767, 1782), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1780, 1782), False, 'import argparse, pdb\n'), ((7532, 7557), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7555, 7557), False, 'import argparse, pdb\n'), ((573, 580), 'pathlib.Path', 'Path', (['p'], {}), '(p)\n', (577, 580), False, 'from pathlib import Path\n'), ((590, 630), 'glob.glob', 'glob.glob', (["(args.wavMasterPath + '/*.wav')"], {}), "(args.wavMasterPath + '/*.wav')\n", (599, 630), False, 'import os, json, glob\n'), ((3636, 3654), 'pathlib.Path', 'Path', (['src_wav_path'], {}), '(src_wav_path)\n', (3640, 3654), False, 'from pathlib import Path\n'), ((4801, 4826), 'torch.argmax', 'torch.argmax', (['pred'], {'dim': '(1)'}), '(pred, dim=1)\n', (4813, 4826), False, 'import torch\n'), ((3152, 3182), 'json.dump', 'json.dump', (['curr_chunk_json', 'fp'], {}), '(curr_chunk_json, fp)\n', (3161, 3182), False, 'import os, json, glob\n'), ((3240, 3266), 'numpy.concatenate', 'np.concatenate', (['curr_chunk'], {}), '(curr_chunk)\n', (3254, 3266), True, 'import numpy as np\n'), ((3465, 3491), 'numpy.concatenate', 'np.concatenate', (['curr_chunk'], {}), '(curr_chunk)\n', (3479, 3491), True, 'import numpy as np\n'), ((4619, 4652), 'torch.from_numpy', 'torch.from_numpy', (['mel_spec_window'], {}), '(mel_spec_window)\n', (4635, 4652), False, 'import torch\n'), ((3080, 3101), 'pathlib.Path', 'Path', (['chunk_file_name'], {}), '(chunk_file_name)\n', (3084, 3101), False, 'from pathlib import Path\n')] |
"""
Benchmarks for the various ways of iterating over the values of an array.
"""
import numpy as np
# This choice of dtype is intentional. It seems to allow better
# vectorization with LLVM 3.7 than either float32 or float64 (at least here
# on SandyBridge), and therefore helps reveal iterator inefficiences.
dtype = np.int32
zero = dtype(0)
N = 1000
arr1 = np.zeros(N * N, dtype=dtype)
arr2c = arr1.reshape((N, N))
arr2f = arr2c.T
arr2a = np.concatenate((arr2c, arr2c))[::2]
# 2d with a very small inner dimension
arr2c2 = arr1.reshape((N * N // 5, 5))
arr2f2 = arr2c2.copy(order='F')
arr2a2 = np.concatenate((arr2c2, arr2c2))[::2]
def setup():
from numba import jit
@jit(nopython=True)
def array_iter_1d(arr):
total = zero
for val in arr:
total += val
return total
@jit(nopython=True)
def flat_iter(arr):
total = zero
for val in arr.flat:
total += val
return total
@jit(nopython=True)
def flat_index(arr):
total = zero
flat = arr.flat
for i in range(arr.size):
total += flat[i]
return total
@jit(nopython=True)
def ndindex(arr):
total = zero
for ind in np.ndindex(arr.shape):
total += arr[ind]
return total
@jit(nopython=True)
def range1d(arr):
total = zero
n, = arr.shape
for i in range(n):
total += arr[i]
return total
@jit(nopython=True)
def range2d(arr):
total = zero
m, n = arr.shape
for i in range(m):
for j in range(n):
total += arr[i, j]
return total
@jit(nopython=True)
def nditer1(a):
total = zero
for u in np.nditer(a):
total += u.item()
return total
@jit(nopython=True)
def nditer2(a, b):
total = zero
for u, v in np.nditer((a, b)):
total += u.item() * v.item()
return total
@jit(nopython=True)
def nditer3(a, b, out):
total = zero
for u, v, res in np.nditer((a, b, out)):
res.itemset(u.item() * v.item())
return total
@jit(nopython=True)
def zip_iter(a, b):
total = zero
for u, v in zip(a, b):
total += u * v
return total
@jit(nopython=True)
def zip_flat(a, b):
total = zero
for u, v in zip(a.flat, b.flat):
total += u * v
return total
globals().update(locals())
class MonoArrayIterators:
# These are the dimensions-agnostic iteration methods
def time_flat_iter_1d(self):
flat_iter(arr1)
def time_flat_iter_2d_C(self):
flat_iter(arr2c)
def time_flat_iter_2d_fortran(self):
flat_iter(arr2f)
def time_flat_iter_2d_non_contiguous(self):
flat_iter(arr2a)
def time_flat_index_1d(self):
flat_index(arr1)
def time_flat_index_2d_C(self):
flat_index(arr2c)
def time_flat_index_2d_fortran(self):
flat_index(arr2f)
def time_flat_index_2d_non_contiguous(self):
flat_index(arr2a)
def time_ndindex_1d(self):
ndindex(arr1)
def time_ndindex_2d(self):
ndindex(arr2c)
def time_nditer_iter_1d(self):
nditer1(arr1)
def time_nditer_iter_2d_C(self):
nditer1(arr2c)
def time_nditer_iter_2d_C_small_inner_dim(self):
nditer1(arr2c2)
def time_nditer_iter_2d_fortran(self):
nditer1(arr2f)
def time_nditer_iter_2d_non_contiguous(self):
nditer1(arr2a)
# When the number of dimensions is known / hardcoded
def time_array_iter_1d(self):
array_iter_1d(arr1)
def time_range_index_1d(self):
range1d(arr1)
def time_range_index_2d(self):
range2d(arr2c)
class MultiArrayIterators:
# These are the dimensions-agnostic iteration methods
def time_nditer_two_1d(self):
nditer2(arr1, arr1)
def time_nditer_two_2d_C_C(self):
nditer2(arr2c, arr2c)
def time_nditer_two_2d_F_F(self):
nditer2(arr2f, arr2f)
def time_nditer_two_2d_F_C(self):
nditer2(arr2f, arr2c)
def time_nditer_two_2d_C_A(self):
nditer2(arr2c, arr2a)
def time_nditer_two_2d_A_A(self):
nditer2(arr2a, arr2a)
def time_nditer_two_2d_C_C_small_inner_dim(self):
nditer2(arr2c2, arr2c2)
def time_nditer_two_2d_F_F_small_inner_dim(self):
nditer2(arr2f2, arr2f2)
def time_nditer_two_2d_F_C_small_inner_dim(self):
nditer2(arr2f2, arr2c2)
def time_nditer_two_2d_C_A_small_inner_dim(self):
nditer2(arr2c2, arr2a2)
def time_zip_flat_two_1d(self):
zip_flat(arr1, arr1)
def time_zip_flat_two_2d_C_C(self):
zip_flat(arr2c, arr2c)
def time_zip_flat_two_2d_C_C_small_inner_dim(self):
zip_flat(arr2c2, arr2c2)
def time_zip_flat_two_2d_F_F(self):
zip_flat(arr2f, arr2f)
def time_zip_flat_two_2d_A_A(self):
zip_flat(arr2a, arr2a)
def time_nditer_three_2d_C_C_C(self):
nditer3(arr2c, arr2c, arr2c)
def time_nditer_three_2d_F_F_F(self):
nditer3(arr2f, arr2f, arr2f)
def time_nditer_three_2d_A_A_A(self):
nditer3(arr2a, arr2a, arr2a)
# When the number of dimensions is known / hardcoded
def time_zip_iter_two_1d(self):
zip_iter(arr1, arr1)
| [
"numpy.ndindex",
"numpy.nditer",
"numpy.zeros",
"numba.jit",
"numpy.concatenate"
] | [((365, 393), 'numpy.zeros', 'np.zeros', (['(N * N)'], {'dtype': 'dtype'}), '(N * N, dtype=dtype)\n', (373, 393), True, 'import numpy as np\n'), ((447, 477), 'numpy.concatenate', 'np.concatenate', (['(arr2c, arr2c)'], {}), '((arr2c, arr2c))\n', (461, 477), True, 'import numpy as np\n'), ((602, 634), 'numpy.concatenate', 'np.concatenate', (['(arr2c2, arr2c2)'], {}), '((arr2c2, arr2c2))\n', (616, 634), True, 'import numpy as np\n'), ((688, 706), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (691, 706), False, 'from numba import jit\n'), ((832, 850), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (835, 850), False, 'from numba import jit\n'), ((977, 995), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (980, 995), False, 'from numba import jit\n'), ((1156, 1174), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1159, 1174), False, 'from numba import jit\n'), ((1317, 1335), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1320, 1335), False, 'from numba import jit\n'), ((1484, 1502), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1487, 1502), False, 'from numba import jit\n'), ((1691, 1709), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1694, 1709), False, 'from numba import jit\n'), ((1839, 1857), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1842, 1857), False, 'from numba import jit\n'), ((2009, 2027), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (2012, 2027), False, 'from numba import jit\n'), ((2198, 2216), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (2201, 2216), False, 'from numba import jit\n'), ((2347, 2365), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (2350, 2365), False, 'from numba import jit\n'), ((1237, 1258), 'numpy.ndindex', 'np.ndindex', (['arr.shape'], {}), '(arr.shape)\n', (1247, 1258), True, 'import numpy as np\n'), ((1768, 1780), 'numpy.nditer', 'np.nditer', (['a'], {}), '(a)\n', (1777, 1780), True, 'import numpy as np\n'), ((1922, 1939), 'numpy.nditer', 'np.nditer', (['(a, b)'], {}), '((a, b))\n', (1931, 1939), True, 'import numpy as np\n'), ((2102, 2124), 'numpy.nditer', 'np.nditer', (['(a, b, out)'], {}), '((a, b, out))\n', (2111, 2124), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.ensemble import RandomForestClassifier
import os
import cv2
ed = 21
im_train = np.load(os.path.join('..','Data','Classification','im_train.npy'))
im_test = np.load(os.path.join('..','Data','Classification','im_test.npy'))
label_train = np.load(os.path.join('..','Data','Classification','feature_train.npy'))
label_test = np.load(os.path.join('..','Data','Classification','feature_test.npy'))
Nfeature = 7
feature_train = np.zeros((label_train.shape[0],Nfeature))
for k in range(im_train.shape[0]):
# Mean and variance of image
tmp = im_train[k].copy()
tmp[im_train[k]==0] = np.nan
feature_train[k,0] = np.nanmean(tmp)
feature_train[k,1] = np.nansum((feature_train[k,0]-tmp)**2)/float(np.nansum(tmp>1e-7))
volume = float(np.nansum(tmp>1e-7))
# Mean and variance of edges
mask = np.array(im_train[k]>1e-7,dtype=np.uint8)
edge = np.array((mask- cv2.erode(mask,np.ones((ed,ed))))>0, dtype=np.float32)
tmp = edge*im_train[k].copy()
tmp[im_train[k]==0] = np.nan
feature_train[k,2] = np.nanmean(tmp)
feature_train[k,3] = np.nansum((feature_train[k,2]-tmp)**2)/float(np.nansum(tmp>1e-7))
# Volume/perimeter: should measure some spericity
perim = np.array((mask- cv2.erode(mask,np.ones((3,3))))>0, dtype=np.float32)
perim = float(np.sum(perim))
feature_train[k,4] = volume / perim
# Aspect ration
x,y,w,h = cv2.boundingRect(mask)
aspect_ratio = float(w)/h
feature_train[k,5] = aspect_ratio
# Roundness
contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
(x,y),(MA,ma),angle = cv2.fitEllipse(contours[0])
eccentricity = np.sqrt((ma/2.)**2-(MA/2.)**2)
eccentricity = np.round(eccentricity/(x/2),2)
feature_train[k,6] = eccentricity
nclass=4
clf = RandomForestClassifier(n_estimators=1000, max_depth=10,random_state=0, verbose=1)
clf.fit(feature_train,label_train)
## Prediciton
feature_test = np.zeros((label_test.shape[0],Nfeature))
for k in range(im_test.shape[0]):
# Mean and variance of image
tmp = im_test[k].copy()
tmp[im_test[k]==0] = np.nan
feature_test[k,0] = np.nanmean(tmp)
feature_test[k,1] = np.nansum((feature_test[k,0]-tmp)**2)/float(np.nansum(tmp>1e-7))
volume = float(np.nansum(tmp>1e-7))
# Mean and variance of edges
mask = np.array(im_train[k]>1e-7,dtype=np.uint8)
edge = np.array((mask- cv2.erode(mask,np.ones((ed,ed))))>0, dtype=np.float32)
tmp = edge*im_train[k].copy()
tmp[im_train[k]==0] = np.nan
feature_test[k,2] = np.nanmean(tmp)
feature_test[k,3] = np.nansum((feature_test[k,2]-tmp)**2)/float(np.nansum(tmp>1e-7))
# Volume/perimeter: should measure some spericity
perim = np.array((mask- cv2.erode(mask,np.ones((3,3))))>0, dtype=np.float32)
perim = float(np.sum(perim))
feature_test[k,4] = volume / perim
# Aspect ration
x,y,w,h = cv2.boundingRect(mask)
aspect_ratio = float(w)/h
feature_test[k,5] = aspect_ratio
# Roundness
contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
(x,y),(MA,ma),angle = cv2.fitEllipse(contours[0])
eccentricity = np.sqrt((ma/2)**2-(MA/2)**2)
eccentricity = np.round(eccentricity/(x/2),2)
feature_test[k,6] = eccentricity
predict_test = clf.predict(feature_test)
import pylab; pylab.ion()
proba_test_predict = np.argmax(predict_test,1)
proba_test = np.argmax(label_test,1)
pylab.plot(proba_test_predict,'r')
pylab.plot(proba_test,'k') | [
"sklearn.ensemble.RandomForestClassifier",
"numpy.nansum",
"pylab.ion",
"numpy.sum",
"numpy.argmax",
"numpy.zeros",
"numpy.sqrt",
"numpy.ones",
"cv2.fitEllipse",
"numpy.array",
"numpy.round",
"cv2.boundingRect",
"os.path.join",
"pylab.plot",
"cv2.findContours",
"numpy.nanmean"
] | [((459, 501), 'numpy.zeros', 'np.zeros', (['(label_train.shape[0], Nfeature)'], {}), '((label_train.shape[0], Nfeature))\n', (467, 501), True, 'import numpy as np\n'), ((1823, 1909), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(1000)', 'max_depth': '(10)', 'random_state': '(0)', 'verbose': '(1)'}), '(n_estimators=1000, max_depth=10, random_state=0,\n verbose=1)\n', (1845, 1909), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1971, 2012), 'numpy.zeros', 'np.zeros', (['(label_test.shape[0], Nfeature)'], {}), '((label_test.shape[0], Nfeature))\n', (1979, 2012), True, 'import numpy as np\n'), ((3360, 3371), 'pylab.ion', 'pylab.ion', ([], {}), '()\n', (3369, 3371), False, 'import pylab\n'), ((3393, 3419), 'numpy.argmax', 'np.argmax', (['predict_test', '(1)'], {}), '(predict_test, 1)\n', (3402, 3419), True, 'import numpy as np\n'), ((3432, 3456), 'numpy.argmax', 'np.argmax', (['label_test', '(1)'], {}), '(label_test, 1)\n', (3441, 3456), True, 'import numpy as np\n'), ((3456, 3491), 'pylab.plot', 'pylab.plot', (['proba_test_predict', '"""r"""'], {}), "(proba_test_predict, 'r')\n", (3466, 3491), False, 'import pylab\n'), ((3491, 3518), 'pylab.plot', 'pylab.plot', (['proba_test', '"""k"""'], {}), "(proba_test, 'k')\n", (3501, 3518), False, 'import pylab\n'), ((122, 182), 'os.path.join', 'os.path.join', (['""".."""', '"""Data"""', '"""Classification"""', '"""im_train.npy"""'], {}), "('..', 'Data', 'Classification', 'im_train.npy')\n", (134, 182), False, 'import os\n'), ((199, 258), 'os.path.join', 'os.path.join', (['""".."""', '"""Data"""', '"""Classification"""', '"""im_test.npy"""'], {}), "('..', 'Data', 'Classification', 'im_test.npy')\n", (211, 258), False, 'import os\n'), ((279, 344), 'os.path.join', 'os.path.join', (['""".."""', '"""Data"""', '"""Classification"""', '"""feature_train.npy"""'], {}), "('..', 'Data', 'Classification', 'feature_train.npy')\n", (291, 344), False, 'import os\n'), ((364, 428), 'os.path.join', 'os.path.join', (['""".."""', '"""Data"""', '"""Classification"""', '"""feature_test.npy"""'], {}), "('..', 'Data', 'Classification', 'feature_test.npy')\n", (376, 428), False, 'import os\n'), ((657, 672), 'numpy.nanmean', 'np.nanmean', (['tmp'], {}), '(tmp)\n', (667, 672), True, 'import numpy as np\n'), ((850, 895), 'numpy.array', 'np.array', (['(im_train[k] > 1e-07)'], {'dtype': 'np.uint8'}), '(im_train[k] > 1e-07, dtype=np.uint8)\n', (858, 895), True, 'import numpy as np\n'), ((1066, 1081), 'numpy.nanmean', 'np.nanmean', (['tmp'], {}), '(tmp)\n', (1076, 1081), True, 'import numpy as np\n'), ((1417, 1439), 'cv2.boundingRect', 'cv2.boundingRect', (['mask'], {}), '(mask)\n', (1433, 1439), False, 'import cv2\n'), ((1551, 1613), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (1567, 1613), False, 'import cv2\n'), ((1640, 1667), 'cv2.fitEllipse', 'cv2.fitEllipse', (['contours[0]'], {}), '(contours[0])\n', (1654, 1667), False, 'import cv2\n'), ((1687, 1729), 'numpy.sqrt', 'np.sqrt', (['((ma / 2.0) ** 2 - (MA / 2.0) ** 2)'], {}), '((ma / 2.0) ** 2 - (MA / 2.0) ** 2)\n', (1694, 1729), True, 'import numpy as np\n'), ((1737, 1772), 'numpy.round', 'np.round', (['(eccentricity / (x / 2))', '(2)'], {}), '(eccentricity / (x / 2), 2)\n', (1745, 1772), True, 'import numpy as np\n'), ((2163, 2178), 'numpy.nanmean', 'np.nanmean', (['tmp'], {}), '(tmp)\n', (2173, 2178), True, 'import numpy as np\n'), ((2354, 2399), 'numpy.array', 'np.array', (['(im_train[k] > 1e-07)'], {'dtype': 'np.uint8'}), '(im_train[k] > 1e-07, dtype=np.uint8)\n', (2362, 2399), True, 'import numpy as np\n'), ((2569, 2584), 'numpy.nanmean', 'np.nanmean', (['tmp'], {}), '(tmp)\n', (2579, 2584), True, 'import numpy as np\n'), ((2917, 2939), 'cv2.boundingRect', 'cv2.boundingRect', (['mask'], {}), '(mask)\n', (2933, 2939), False, 'import cv2\n'), ((3050, 3112), 'cv2.findContours', 'cv2.findContours', (['mask', 'cv2.RETR_TREE', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n', (3066, 3112), False, 'import cv2\n'), ((3139, 3166), 'cv2.fitEllipse', 'cv2.fitEllipse', (['contours[0]'], {}), '(contours[0])\n', (3153, 3166), False, 'import cv2\n'), ((3186, 3224), 'numpy.sqrt', 'np.sqrt', (['((ma / 2) ** 2 - (MA / 2) ** 2)'], {}), '((ma / 2) ** 2 - (MA / 2) ** 2)\n', (3193, 3224), True, 'import numpy as np\n'), ((3234, 3269), 'numpy.round', 'np.round', (['(eccentricity / (x / 2))', '(2)'], {}), '(eccentricity / (x / 2), 2)\n', (3242, 3269), True, 'import numpy as np\n'), ((698, 741), 'numpy.nansum', 'np.nansum', (['((feature_train[k, 0] - tmp) ** 2)'], {}), '((feature_train[k, 0] - tmp) ** 2)\n', (707, 741), True, 'import numpy as np\n'), ((784, 806), 'numpy.nansum', 'np.nansum', (['(tmp > 1e-07)'], {}), '(tmp > 1e-07)\n', (793, 806), True, 'import numpy as np\n'), ((1107, 1150), 'numpy.nansum', 'np.nansum', (['((feature_train[k, 2] - tmp) ** 2)'], {}), '((feature_train[k, 2] - tmp) ** 2)\n', (1116, 1150), True, 'import numpy as np\n'), ((1327, 1340), 'numpy.sum', 'np.sum', (['perim'], {}), '(perim)\n', (1333, 1340), True, 'import numpy as np\n'), ((2203, 2245), 'numpy.nansum', 'np.nansum', (['((feature_test[k, 0] - tmp) ** 2)'], {}), '((feature_test[k, 0] - tmp) ** 2)\n', (2212, 2245), True, 'import numpy as np\n'), ((2288, 2310), 'numpy.nansum', 'np.nansum', (['(tmp > 1e-07)'], {}), '(tmp > 1e-07)\n', (2297, 2310), True, 'import numpy as np\n'), ((2609, 2651), 'numpy.nansum', 'np.nansum', (['((feature_test[k, 2] - tmp) ** 2)'], {}), '((feature_test[k, 2] - tmp) ** 2)\n', (2618, 2651), True, 'import numpy as np\n'), ((2828, 2841), 'numpy.sum', 'np.sum', (['perim'], {}), '(perim)\n', (2834, 2841), True, 'import numpy as np\n'), ((743, 765), 'numpy.nansum', 'np.nansum', (['(tmp > 1e-07)'], {}), '(tmp > 1e-07)\n', (752, 765), True, 'import numpy as np\n'), ((1152, 1174), 'numpy.nansum', 'np.nansum', (['(tmp > 1e-07)'], {}), '(tmp > 1e-07)\n', (1161, 1174), True, 'import numpy as np\n'), ((2247, 2269), 'numpy.nansum', 'np.nansum', (['(tmp > 1e-07)'], {}), '(tmp > 1e-07)\n', (2256, 2269), True, 'import numpy as np\n'), ((2653, 2675), 'numpy.nansum', 'np.nansum', (['(tmp > 1e-07)'], {}), '(tmp > 1e-07)\n', (2662, 2675), True, 'import numpy as np\n'), ((934, 951), 'numpy.ones', 'np.ones', (['(ed, ed)'], {}), '((ed, ed))\n', (941, 951), True, 'import numpy as np\n'), ((1271, 1286), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (1278, 1286), True, 'import numpy as np\n'), ((2438, 2455), 'numpy.ones', 'np.ones', (['(ed, ed)'], {}), '((ed, ed))\n', (2445, 2455), True, 'import numpy as np\n'), ((2772, 2787), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (2779, 2787), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import argparse
import matplotlib.pyplot as plt
import nice
import numpy as np
import torch
import torchvision as tv
import utils
from torch.autograd import Variable
kMNISTInputDim = 784
kMNISTInputSize = 28
kMNISTNumExamples = 100
def ShowImagesInGrid(data, rows, cols, save_path="original_images.png"):
plt.axis('off')
fig = plt.figure(figsize=(rows, cols))
for i in range(1, cols * rows + 1):
img = data[i-1].reshape((kMNISTInputSize, kMNISTInputSize))
fig.add_subplot(rows, cols, i)
plt.imshow(img)
# plt.show(block=True)
if save_path is not None:
plt.savefig(save_path)
def PrepareMNISTData(dataset):
# dataset = mnist_dataset(root='./data')
x = np.zeros((kMNISTNumExamples, kMNISTInputDim))
for i in range(10):
idx = dataset.train_labels==i
# Loop on the digits
for j in range(10):
# Loop on the different shadowing methods
x[j * 10 + i] = dataset.train_data[idx][j].reshape((-1,))
m_val = np.ones((kMNISTNumExamples, kMNISTInputDim))
# Get masks
m_val[:10, :392] = 0
m_val[10:20, 392:] = 0
m_val[20:30, ::2] = 0
m_val[30:40, 1::2] = 0
for i in range(28):
m_val[40:50, (i*28):((2*i+1)*14)] = 0
for i in range(28):
m_val[50:60, ((2*i+1)*14):((i+1)*28)] = 0
m_val[60:70, 196:588] = 0
for i in range(28):
m_val[70:80, ((4*i+1)*7):((4*i+3)*7)] = 0
m_val[80:90] = np.random.binomial(n=1, p=.25, size=(10,784))
m_val[90:] = np.random.binomial(n=1, p=.1, size=(10,784))
ShowImagesInGrid(x, 10, 10)
after = np.multiply(m_val, x)
ShowImagesInGrid(after, 10, 10, save_path="after.png")
return m_val, x
def Reconstruct(mask, mask_val, x, flow, iters=300, lr=0.001, save_path=None):
device = torch.device("cuda:0")
x_mixed = np.where(mask==1, x, mask_val)
i = 0
mean = torch.load('./statistics/mnist_mean.pt')
x_mixed = np.reshape(x_mixed, (kMNISTNumExamples, 1, kMNISTInputSize, kMNISTInputSize))
x_mixed_var = Variable(torch.Tensor(x_mixed), requires_grad=True)
x_mixed_tensor = utils.prepare_data(
x_mixed_var, 'mnist', zca=None, mean=mean).to(device)
inputs = Variable(x_mixed_tensor, requires_grad=True)
# inputs = Variable(torch.Tensor(x_mixed).cuda(), requires_grad=True)
lr_ = np.float64(lr)
while i < iters:
# print("iter: ", i)
loss = flow(inputs).mean()
loss.backward()
inc = lr_ * inputs.grad
# print(inputs.grad.data)
inputs[mask!=1].data += inc[mask!=1]
i += 1
if save_path is not None:
result = inputs.detach().reshape((-1, 1, kMNISTInputSize, kMNISTInputSize))
tv.utils.save_image(tv.utils.make_grid(result.cpu()), save_path+str(iters)+".png")
return inputs.detach().cpu().numpy()
def main(args):
transform = tv.transforms.Compose([tv.transforms.Grayscale(num_output_channels=1),
tv.transforms.ToTensor()])
trainset = tv.datasets.MNIST(root='~/torch/data/MNIST',
train=True, download=True, transform=transform)
prior = utils.StandardLogistic()
device = torch.device("cuda:0")
flow = nice.NICE(prior=utils.StandardLogistic(),
coupling=4,
in_out_dim=kMNISTInputDim,
mid_dim=1000,
hidden=5,
mask_config=1).to(device)
mask, x = PrepareMNISTData(trainset)
ShowImagesInGrid(x, 10, 10, save_path="original.png")
flow.load_state_dict(torch.load(args.model_path)['model_state_dict'])
mask_val = np.random.uniform(size=(kMNISTNumExamples, kMNISTInputDim))
mask_val = np.multiply(mask_val, x)
iters = args.iters
result = Reconstruct(mask, mask_val, x, flow, iters=iters, save_path="./inpainting/mnist_")
ShowImagesInGrid(result, 10, 10, save_path="reconstructed_"+ str(iters) + ".png")
if __name__ == "__main__":
parser = argparse.ArgumentParser('MNIST NICE PyTorch inpainting experiment.')
parser.add_argument('--model_path',
help='Saved model path.',
type=str,
default='./models/mnist/mnist_bs200_logistic_cp4_md1000_hd5_iter25000.tar')
parser.add_argument('--iters',
help='Number of iterations.',
type=int,
default=300)
args = parser.parse_args()
main(args)
| [
"utils.prepare_data",
"argparse.ArgumentParser",
"numpy.ones",
"matplotlib.pyplot.figure",
"torch.device",
"numpy.float64",
"numpy.multiply",
"matplotlib.pyplot.imshow",
"torch.load",
"torch.Tensor",
"numpy.reshape",
"utils.StandardLogistic",
"numpy.random.binomial",
"torch.autograd.Variab... | [((336, 351), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (344, 351), True, 'import matplotlib.pyplot as plt\n'), ((362, 394), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(rows, cols)'}), '(figsize=(rows, cols))\n', (372, 394), True, 'import matplotlib.pyplot as plt\n'), ((739, 784), 'numpy.zeros', 'np.zeros', (['(kMNISTNumExamples, kMNISTInputDim)'], {}), '((kMNISTNumExamples, kMNISTInputDim))\n', (747, 784), True, 'import numpy as np\n'), ((1041, 1085), 'numpy.ones', 'np.ones', (['(kMNISTNumExamples, kMNISTInputDim)'], {}), '((kMNISTNumExamples, kMNISTInputDim))\n', (1048, 1085), True, 'import numpy as np\n'), ((1474, 1521), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': '(0.25)', 'size': '(10, 784)'}), '(n=1, p=0.25, size=(10, 784))\n', (1492, 1521), True, 'import numpy as np\n'), ((1537, 1583), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': '(0.1)', 'size': '(10, 784)'}), '(n=1, p=0.1, size=(10, 784))\n', (1555, 1583), True, 'import numpy as np\n'), ((1627, 1648), 'numpy.multiply', 'np.multiply', (['m_val', 'x'], {}), '(m_val, x)\n', (1638, 1648), True, 'import numpy as np\n'), ((1821, 1843), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (1833, 1843), False, 'import torch\n'), ((1858, 1890), 'numpy.where', 'np.where', (['(mask == 1)', 'x', 'mask_val'], {}), '(mask == 1, x, mask_val)\n', (1866, 1890), True, 'import numpy as np\n'), ((1910, 1950), 'torch.load', 'torch.load', (['"""./statistics/mnist_mean.pt"""'], {}), "('./statistics/mnist_mean.pt')\n", (1920, 1950), False, 'import torch\n'), ((1965, 2042), 'numpy.reshape', 'np.reshape', (['x_mixed', '(kMNISTNumExamples, 1, kMNISTInputSize, kMNISTInputSize)'], {}), '(x_mixed, (kMNISTNumExamples, 1, kMNISTInputSize, kMNISTInputSize))\n', (1975, 2042), True, 'import numpy as np\n'), ((2229, 2273), 'torch.autograd.Variable', 'Variable', (['x_mixed_tensor'], {'requires_grad': '(True)'}), '(x_mixed_tensor, requires_grad=True)\n', (2237, 2273), False, 'from torch.autograd import Variable\n'), ((2358, 2372), 'numpy.float64', 'np.float64', (['lr'], {}), '(lr)\n', (2368, 2372), True, 'import numpy as np\n'), ((3041, 3137), 'torchvision.datasets.MNIST', 'tv.datasets.MNIST', ([], {'root': '"""~/torch/data/MNIST"""', 'train': '(True)', 'download': '(True)', 'transform': 'transform'}), "(root='~/torch/data/MNIST', train=True, download=True,\n transform=transform)\n", (3058, 3137), True, 'import torchvision as tv\n'), ((3179, 3203), 'utils.StandardLogistic', 'utils.StandardLogistic', ([], {}), '()\n', (3201, 3203), False, 'import utils\n'), ((3217, 3239), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (3229, 3239), False, 'import torch\n'), ((3652, 3711), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(kMNISTNumExamples, kMNISTInputDim)'}), '(size=(kMNISTNumExamples, kMNISTInputDim))\n', (3669, 3711), True, 'import numpy as np\n'), ((3727, 3751), 'numpy.multiply', 'np.multiply', (['mask_val', 'x'], {}), '(mask_val, x)\n', (3738, 3751), True, 'import numpy as np\n'), ((3998, 4066), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""MNIST NICE PyTorch inpainting experiment."""'], {}), "('MNIST NICE PyTorch inpainting experiment.')\n", (4021, 4066), False, 'import argparse\n'), ((550, 565), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (560, 565), True, 'import matplotlib.pyplot as plt\n'), ((631, 653), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (642, 653), True, 'import matplotlib.pyplot as plt\n'), ((2070, 2091), 'torch.Tensor', 'torch.Tensor', (['x_mixed'], {}), '(x_mixed)\n', (2082, 2091), False, 'import torch\n'), ((2134, 2195), 'utils.prepare_data', 'utils.prepare_data', (['x_mixed_var', '"""mnist"""'], {'zca': 'None', 'mean': 'mean'}), "(x_mixed_var, 'mnist', zca=None, mean=mean)\n", (2152, 2195), False, 'import utils\n'), ((2911, 2957), 'torchvision.transforms.Grayscale', 'tv.transforms.Grayscale', ([], {'num_output_channels': '(1)'}), '(num_output_channels=1)\n', (2934, 2957), True, 'import torchvision as tv\n'), ((2999, 3023), 'torchvision.transforms.ToTensor', 'tv.transforms.ToTensor', ([], {}), '()\n', (3021, 3023), True, 'import torchvision as tv\n'), ((3587, 3614), 'torch.load', 'torch.load', (['args.model_path'], {}), '(args.model_path)\n', (3597, 3614), False, 'import torch\n'), ((3267, 3291), 'utils.StandardLogistic', 'utils.StandardLogistic', ([], {}), '()\n', (3289, 3291), False, 'import utils\n')] |
#!/usr/bin/env python
#file parse.py: parsers for map file, distance matrix file, env file
__author__ = "<NAME>"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["<NAME>", "<NAME>", "<NAME>",
"<NAME>", "<NAME>", "<NAME>"]
__license__ = "BSD"
__version__ = "1.7.0-dev"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
from numpy import asarray
class QiimeParseError(Exception):
pass
def parse_mapping_file(lines, strip_quotes=True, suppress_stripping=False):
"""Parser for map file that relates samples to metadata.
Format: header line with fields
optionally other comment lines starting with #
tab-delimited fields
Result: list of lists of fields, incl. headers.
"""
if hasattr(lines,"upper"):
# Try opening if a string was passed
try:
lines = open(lines,'U')
except IOError:
raise QiimeParseError("A string was passed that doesn't refer "
"to an accessible filepath.")
if strip_quotes:
if suppress_stripping:
# remove quotes but not spaces
strip_f = lambda x: x.replace('"','')
else:
# remove quotes and spaces
strip_f = lambda x: x.replace('"','').strip()
else:
if suppress_stripping:
# don't remove quotes or spaces
strip_f = lambda x: x
else:
# remove spaces but not quotes
strip_f = lambda x: x.strip()
# Create lists to store the results
mapping_data = []
header = []
comments = []
# Begin iterating over lines
for line in lines:
line = strip_f(line)
if not line or (suppress_stripping and not line.strip()):
# skip blank lines when not stripping lines
continue
if line.startswith('#'):
line = line[1:]
if not header:
header = line.strip().split('\t')
else:
comments.append(line)
else:
# Will add empty string to empty fields
tmp_line = list(map(strip_f, line.split('\t')))
if len(tmp_line)<len(header):
tmp_line.extend(['']*(len(header)-len(tmp_line)))
mapping_data.append(tmp_line)
if not header:
raise QiimeParseError("No header line was found in mapping file.")
if not mapping_data:
raise QiimeParseError("No data found in mapping file.")
return mapping_data, header, comments
def mapping_file_to_dict(mapping_data, header):
"""processes mapping data in list of lists format into a 2 deep dict"""
map_dict = {}
for i in range(len(mapping_data)):
sam = mapping_data[i]
map_dict[sam[0]] = {}
for j in range(len(header)):
if j == 0: continue # sampleID field
map_dict[sam[0]][header[j]] = sam[j]
return map_dict
def parse_metadata_state_descriptions(state_string):
"""From string in format 'col1:good1,good2;col2:good1' return dict."""
result = {}
state_string = state_string.strip()
if state_string:
cols = [s.strip() for s in state_string.split(';')]
for c in cols:
# split on the first colon to account for category names with colons
colname, vals = [s.strip() for s in c.split(':', 1)]
vals = [v.strip() for v in vals.split(',')]
result[colname] = set(vals)
return result
def parse_mapping_file_to_dict(*args, **kwargs):
"""Parser for map file that relates samples to metadata.
input format: header line with fields
optionally other comment lines starting with #
tab-delimited fields
calls parse_mapping_file, then processes the result into a 2d dict, assuming
the first field is the sample id
e.g.: {'sample1':{'age':'3','sex':'male'},'sample2':...
returns the dict, and a list of comment lines
"""
mapping_data, header, comments = parse_mapping_file(*args,**kwargs)
return mapping_file_to_dict(mapping_data, header), comments
def process_otu_table_sample_ids(sample_id_fields):
""" process the sample IDs line of an OTU table """
if len(sample_id_fields) == 0:
raise ValueError('Error parsing sample ID line in OTU table. '
'Fields are %s' % ' '.join(sample_id_fields))
# Detect if a metadata column is included as the last column. This
# field will be named either 'Consensus Lineage' or 'OTU Metadata',
# but we don't care about case or spaces.
last_column_header = sample_id_fields[-1].strip().replace(' ','').lower()
if last_column_header in ['consensuslineage', 'otumetadata', 'taxonomy']:
has_metadata = True
sample_ids = sample_id_fields[:-1]
else:
has_metadata = False
sample_ids = sample_id_fields
# Return the list of sample IDs and boolean indicating if a metadata
# column is included.
return sample_ids, has_metadata
def parse_classic_otu_table(lines,count_map_f=int, remove_empty_rows=False):
"""parses a classic otu table (sample ID x OTU ID map)
Returns tuple: sample_ids, otu_ids, matrix of OTUs(rows) x samples(cols),
and lineages from infile.
"""
otu_table = []
otu_ids = []
metadata = []
sample_ids = []
# iterate over lines in the OTU table -- keep track of line number
# to support legacy (Qiime 1.2.0 and earlier) OTU tables
for i, line in enumerate(lines):
line = line.strip()
if line:
if (i==1 or i==0) and line.startswith('#OTU ID') and not sample_ids:
# we've got a legacy OTU table
try:
sample_ids, has_metadata = process_otu_table_sample_ids(
line.strip().split('\t')[1:])
except ValueError:
raise ValueError("Error parsing sample IDs in OTU table. "
"Appears to be a legacy OTU table. Sample"
" ID line:\n %s" % line)
elif not line.startswith('#'):
if not sample_ids:
# current line is the first non-space, non-comment line
# in OTU table, so contains the sample IDs
try:
sample_ids, has_metadata = process_otu_table_sample_ids(
line.strip().split('\t')[1:])
except ValueError:
raise ValueError("Error parsing sample IDs in OTU "
"table. Sample ID line:\n %s" % line)
else:
# current line is OTU line in OTU table
fields = line.split('\t')
if has_metadata:
# if there is OTU metadata the last column gets appended
# to the metadata list
# added in a try/except to handle OTU tables containing
# floating numbers
try:
valid_fields = asarray(fields[1:-1], dtype=count_map_f)
except ValueError:
valid_fields = asarray(fields[1:-1], dtype=float)
# validate that there are no empty rows
if remove_empty_rows and (valid_fields>=0).all() and \
sum(valid_fields)==0.0:
continue
metadata.append([f.strip() for f in fields[-1].split(';')])
else:
# otherwise all columns are appended to otu_table
# added in a try/except to handle OTU tables containing
# floating numbers
try:
valid_fields = asarray(fields[1:], dtype=count_map_f)
except ValueError:
valid_fields = asarray(fields[1:], dtype=float)
# validate that there are no empty rows
if remove_empty_rows and (valid_fields>=0.0).all() and \
sum(valid_fields)==0.0:
continue
otu_table.append(valid_fields)
# grab the OTU ID
otu_id = fields[0].strip()
otu_ids.append(otu_id)
return sample_ids, otu_ids, asarray(otu_table), metadata
parse_otu_table = parse_classic_otu_table
def parse_coords(lines):
"""Parse unifrac coord file into coords, labels, eigvals, pct_explained.
Returns:
- list of sample labels in order
- array of coords (rows = samples, cols = axes in descending order)
- list of eigenvalues
- list of percent variance explained
File format is tab-delimited with following contents:
- header line (starts 'pc vector number')
- one-per-line per-sample coords
- two blank lines
- eigvals
- % variation explained
Strategy: just read the file into memory, find the lines we want
"""
lines = list(lines)
# make sure these and the other checks below are true as they are what
# differentiate coordinates files from distance matrix files
if not lines[0].startswith('pc vector number'):
raise QiimeParseError("The line with the vector number was not found"
", this information is required in coordinates files")
lines = [l.strip() for l in lines[1:]] # discard first line, which is a label
lines = [_f for _f in lines if _f] # remove any blank lines
# check on this information post removal of blank lines
if not lines[-2].startswith('eigvals'):
raise QiimeParseError("The line containing the eigenvalues was not "
"found, this information is required in coordinates files")
if not lines[-1].startswith('% variation'):
raise QiimeParseError("The line with the percent of variation explained"
" was not found, this information is required in coordinates files")
#now last 2 lines are eigvals and % variation, so read them
eigvals = asarray(lines[-2].split('\t')[1:], dtype=float)
pct_var = asarray(lines[-1].split('\t')[1:], dtype=float)
#finally, dump the rest of the lines into a table
header, result = [], []
for line in lines[:-2]:
fields = [f.strip() for f in line.split('\t')]
header.append(fields[0])
result.append([float(f) for f in fields[1:]])
return header, asarray(result), eigvals, pct_var
| [
"numpy.asarray"
] | [((8666, 8684), 'numpy.asarray', 'asarray', (['otu_table'], {}), '(otu_table)\n', (8673, 8684), False, 'from numpy import asarray\n'), ((10750, 10765), 'numpy.asarray', 'asarray', (['result'], {}), '(result)\n', (10757, 10765), False, 'from numpy import asarray\n'), ((7263, 7303), 'numpy.asarray', 'asarray', (['fields[1:-1]'], {'dtype': 'count_map_f'}), '(fields[1:-1], dtype=count_map_f)\n', (7270, 7303), False, 'from numpy import asarray\n'), ((8035, 8073), 'numpy.asarray', 'asarray', (['fields[1:]'], {'dtype': 'count_map_f'}), '(fields[1:], dtype=count_map_f)\n', (8042, 8073), False, 'from numpy import asarray\n'), ((7390, 7424), 'numpy.asarray', 'asarray', (['fields[1:-1]'], {'dtype': 'float'}), '(fields[1:-1], dtype=float)\n', (7397, 7424), False, 'from numpy import asarray\n'), ((8160, 8192), 'numpy.asarray', 'asarray', (['fields[1:]'], {'dtype': 'float'}), '(fields[1:], dtype=float)\n', (8167, 8192), False, 'from numpy import asarray\n')] |
#
# Copyright (C) 2000-2008 <NAME>
#
""" Contains the class _NetNode_ which is used to represent nodes in neural nets
**Network Architecture:**
A tacit assumption in all of this stuff is that we're dealing with
feedforward networks.
The network itself is stored as a list of _NetNode_ objects. The list
is ordered in the sense that nodes in earlier/later layers than a
given node are guaranteed to come before/after that node in the list.
This way we can easily generate the values of each node by moving
sequentially through the list, we're guaranteed that every input for a
node has already been filled in.
Each node stores a list (_inputNodes_) of indices of its inputs in the
main node list.
"""
import numpy
from . import ActFuncs
# FIX: this class has not been updated to new-style classes
# (RD Issue380) because that would break all of our legacy pickled
# data. Until a solution is found for this breakage, an update is
# impossible.
class NetNode:
""" a node in a neural network
"""
def Eval(self, valVect):
"""Given a set of inputs (valVect), returns the output of this node
**Arguments**
- valVect: a list of inputs
**Returns**
the result of running the values in valVect through this node
"""
if self.inputNodes and len(self.inputNodes) != 0:
# grab our list of weighted inputs
inputs = numpy.take(valVect, self.inputNodes)
# weight them
inputs = self.weights * inputs
# run that through the activation function
val = self.actFunc(sum(inputs))
else:
val = 1
# put our value in the list and return it (just in case)
valVect[self.nodeIndex] = val
return val
def SetInputs(self, inputNodes):
""" Sets the input list
**Arguments**
- inputNodes: a list of _NetNode_s which are to be used as inputs
**Note**
If this _NetNode_ already has weights set and _inputNodes_ is a different length,
this will bomb out with an assertion.
"""
if self.weights is not None:
assert len(self.weights) == len(inputNodes), \
'lengths of weights and nodes do not match'
self.inputNodes = inputNodes[:]
def GetInputs(self):
""" returns the input list
"""
return self.inputNodes
def SetWeights(self, weights):
""" Sets the weight list
**Arguments**
- weights: a list of values which are to be used as weights
**Note**
If this _NetNode_ already has _inputNodes_ and _weights_ is a different length,
this will bomb out with an assertion.
"""
if self.inputNodes:
assert len(weights) == len(self.inputNodes),\
'lengths of weights and nodes do not match'
self.weights = numpy.array(weights)
def GetWeights(self):
""" returns the weight list
"""
return self.weights
def __init__(self, nodeIndex, nodeList, inputNodes=None, weights=None, actFunc=ActFuncs.Sigmoid,
actFuncParms=()):
""" Constructor
**Arguments**
- nodeIndex: the integer index of this node in _nodeList_
- nodeList: the list of other _NetNodes_ already in the network
- inputNodes: a list of this node's inputs
- weights: a list of this node's weights
- actFunc: the activation function to be used here. Must support the API
of _ActFuncs.ActFunc_.
- actFuncParms: a tuple of extra arguments to be passed to the activation function
constructor.
**Note**
There should be only one copy of _inputNodes_, every _NetNode_ just has a pointer
to it so that changes made at one node propagate automatically to the others.
"""
if inputNodes and weights:
assert (len(weights) == len(inputNodes))
if weights:
self.weights = numpy.array(weights)
else:
self.weights = None
if inputNodes:
self.inputNodes = inputNodes[:]
else:
self.inputNodes = None
self.nodeIndex = nodeIndex
# there's only one of these, everybody has a pointer to it.
self.nodeList = nodeList
self.actFunc = actFunc(*actFuncParms)
| [
"numpy.take",
"numpy.array"
] | [((2764, 2784), 'numpy.array', 'numpy.array', (['weights'], {}), '(weights)\n', (2775, 2784), False, 'import numpy\n'), ((1391, 1427), 'numpy.take', 'numpy.take', (['valVect', 'self.inputNodes'], {}), '(valVect, self.inputNodes)\n', (1401, 1427), False, 'import numpy\n'), ((3842, 3862), 'numpy.array', 'numpy.array', (['weights'], {}), '(weights)\n', (3853, 3862), False, 'import numpy\n')] |
#!/usr/bin/env python
#===============================================================================
# Copyright 2017 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
'''
Created on 7/7/2019
@author: <NAME>
Utility functions for dealing with netcdf data
'''
import netCDF4
import numpy as np
from garjmcmctdem_utils import spatial_functions
from garjmcmctdem_utils.misc_utils import pickle2xarray
import pandas as pd
def object2array(variable, dtype):
"""Helper function for converting single variables to a list
Parameters
----------
variable : object
Python object
dtype : python datatype
Returns
-------
list
If variable is a single variables return the variable within a list
"""
single_var = (type(variable) == dtype)
if single_var:
return [variable]
else:
return variable
def get_lines(dataset, line_numbers, variables):
"""
A function for extracting variables from a particular AEM line
@param dataset: netcdf dataset
@param: list of integer AEM line_numbers:
@param: list of integer variables)
"""
# Allow single variable to be given as a string
variables = object2array(variables, str)
# Allow single line
line_numbers = object2array(line_numbers, int)
# Chekc netcdf dataset
if not dataset.__class__ == netCDF4._netCDF4.Dataset:
raise ValueError("Input datafile is not netCDF4 format")
# Iterate through lines and get the point indices
for line in line_numbers:
point_mask = dataset['line_index'][:] == np.where(dataset['line'][:] == line)[0]
# Iterate through the variables and add the masked arrays to a dictionary
line_dict = {}
for var in variables:
if dataset[var].dimensions[0] == 'point':
line_dict[var] = dataset[var][point_mask]
elif dataset[var].dimensions[0] == 'depth':
line_dict[var] = np.tile(dataset[var][:], [point_mask.sum(),1])
yield line, line_dict
def extract_rj_sounding(rj, det, point_index = 0):
"""
TODO: clean up this function or consider removing!!!
"""
rj_dat = rj.data
det_dat = det.data
freq = rj_dat['log10conductivity_histogram'][point_index].data.astype(np.float)
easting = np.float(rj_dat['easting'][point_index].data)
northing = np.float(rj_dat['northing'][point_index].data)
cond_pdf = freq / freq.sum(axis =1)[0]
cond_pdf[cond_pdf == 0] = np.nan
cp_freq = rj_dat["interface_depth_histogram"][point_index].data.astype(np.float)
cp_pdf = cp_freq / freq.sum(axis =1)[0]
laybins = rj_dat['nlayers_histogram'][point_index].data
lay_prob = laybins / freq.sum(axis =1)[0]
condmin, condmax = rj_dat.min_log10_conductivity, rj_dat.max_log10_conductivity
ncond_cells = rj_dat.dimensions['conductivity_cells'].size
cond_cells = np.linspace(condmin, condmax, ncond_cells)
pmin, pmax = rj_dat.min_depth, rj_dat.max_depth
depth_cells = rj_dat['layer_centre_depth'][:]
extent = [cond_cells.min(), cond_cells.max(), depth_cells.max(), depth_cells.min()]
mean = np.power(10,rj_dat['conductivity_mean'][point_index].data)
p10 = np.power(10,rj_dat['conductivity_p10'][point_index].data)
p50 = np.power(10,rj_dat['conductivity_p50'][point_index].data)
p90 = np.power(10,rj_dat['conductivity_p90'][point_index].data)
distances, indices = spatial_functions.nearest_neighbours([easting, northing],
det.coords,
max_distance = 100.)
point_ind_det = indices[0]
det_cond = det_dat['conductivity'][point_ind_det].data
det_depth_top = det_dat['layer_top_depth'][point_ind_det].data
det_doi = det_dat['depth_of_investigation'][point_ind_det].data
try:
misfit = rj_dat['misfit'][point_index].data
except IndexError:
misfit = None
try:
sample_no = np.arange(1, rj_dat.dimensions['convergence_sample'].size + 1)
except KeyError:
sample_no = None
burnin = rj_dat.nburnin
nsamples = rj_dat.nsamples
nchains = rj_dat.nchains
elevation = rj_dat['elevation'][point_index]
# get line under new schema
line_index = int(rj_dat['line_index'][point_index])
line = int(rj_dat['line'][line_index])
fiducial = float(rj_dat['fiducial'][point_index])
elevation = rj_dat['elevation'][point_index]
# Need to create this for opening pickle files with xarrays
xarr = pickle2xarray(det.section_path[line])
dist = spatial_functions.xy_2_var(xarr,
np.array([[easting, northing]]),
'grid_distances')
return {'conductivity_pdf': cond_pdf, "change_point_pdf": cp_pdf, "conductivity_extent": extent,
'cond_p10': p10, 'cond_p50': p50, 'cond_p90': p90, 'cond_mean': mean, 'depth_cells': depth_cells,
'nlayer_bins': laybins, 'nlayer_prob': lay_prob, 'nsamples': nsamples, 'ndata': rj_dat.dimensions['data'].size,
"nchains": nchains, 'burnin': burnin, 'misfit': misfit, 'sample_no': sample_no, 'cond_cells': cond_cells, 'det_cond': det_cond,
'det_depth_top': det_depth_top, 'det_doi': det_doi, 'line': line, 'northing': northing, 'easting': easting, 'fiducial':fiducial,
'elevation': elevation, 'det_dist': dist, 'det_line': xarr}
def testNetCDFDataset(netCDF_dataset):
"""Test if datafile is netcdf.
TODO add a check of necessary parameters
Parameters
----------
netCDF_dataset : object
netcdf AEM dataset.
Returns
-------
boolean
"""
return netCDF_dataset.__class__ == netCDF4._netCDF4.Dataset
def get_lookup_mask(lines, netCDF_dataset):
"""A function for return a mask for an AEM line/ lines
Parameters
----------
lines : array like
array of line numbers
netCDF_dataset:
netcdf dataset with variables 'line' and 'line_index'
Returns
-------
self, boolean array
Boolean mask for lines
"""
lines = object2array(lines, int)
line_inds = np.where(np.isin(netCDF_dataset['line'][:], lines))[0]
return np.isin(netCDF_dataset['line_index'],line_inds)
def write_inversion_ready_file(dataset, outpath, nc_variables,
nc_formats, other_variables = None,
mask = None):
"""A function for writing an inversion ready.dat file. This file can be
inverted using GA-AEM inversion algorithms.
Parameters
----------
dataset : object
Netcdf dataset
outpath : string
Path of inversion ready file.
nc_variables : list
List of variables from dataset.
eg ["ga_project", "utc_date", "flight", "line", "fiducial",
"easting", "northing", "tx_height_measured", "elevation",
"gps_height", "roll", "pitch", "yaw", "TxRx_dx", "TxRx_dy",
"TxRx_dz", "low_moment_Z-component_EM_data",
"high_moment_Z-component_EM_data"]
nc_formats : list
List of formats for variables.
eg ['{:5d}','{:9.0F}','{:12.2F}','{:8.0F}','{:12.2F}','{:10.2F}',
'{:11.2F}','{:8.1F}','{:9.2F}', '{:9.2F}','{:7.2F}','{:7.2F}',
'{:7.2F}','{:7.2F}','{:7.2F}','{:7.2F}', '{:15.6E}', '{:15.6E}']
other_variables : dictionary
dictionary of additional variables with the name of the variable as the
key, 'array' key as the
e.g.{'rel_uncertainty_low_moment_Z-component':
{'data': numpy array, 'format': {:15.6E}} }
mask: boolean array
"""
# Now create a mask if none exists
if mask is None:
mask = np.ones(shape = (dataset.dimensions['point'].size), dtype = np.bool)
# Create an empty dataframe
df = pd.DataFrame(index = range(mask.sum()))
# Create a dictionary with arrays, formats and variable name
data = {}
for i, var in enumerate(nc_variables):
if var == 'line':
line_inds = dataset['line_index'][mask]
arr = dataset[var][line_inds].data
elif var == 'flight':
flight_inds = dataset['flight_index'][mask]
arr = dataset[var][flight_inds].data
# Scalar variables
elif len(dataset[var].shape) == 0:
arr = np.repeat(dataset[var][:].data, mask.sum())
else:
arr = dataset[var][mask].data
# Add to dictionary
data[var] = {'array': arr,
'format': nc_formats[i]}
# Now we add the additional columns
if other_variables is not None:
for item in other_variables.keys():
# apply mask
data[item] = {'array': other_variables[item]['array'][mask],
'format': other_variables[item]['format']}
# build pandas dataframe
for item in data:
print(item)
arr = data[item]['array']
if len(arr.shape) < 2:
df[item] = [data[item]['format'].format(x) for x in arr]
# For 3d variables like the EM data
else:
for i in range(arr.shape[1]):
df[item + '_' + str(i+1)] = [data[item]['format'].format(x) for x in arr[:,i]]
# Note use a pipe so we can easily delete later
df.apply(lambda row: ''.join(map(str, row)), axis=1).to_csv(outpath, sep = ',', index = False, header = False)
# Now write the .hdr file
header_file = '.'.join(outfile.split('.')[:-1]) + '.hdr'
counter = 1
with open(header_file, 'w') as f:
for item in data.keys():
shape = data[item]['array'].shape
if len(shape) == 1:
f.write(''.join([item, ' ', str(counter), '\n']))
counter += 1
else:
f.write(''.join([item,' ',str(counter),'-',str(counter + shape[1] - 1),'\n']))
counter += shape[1]
def get_sorted_line_inds(dataset, line, how = "east-west", subset = 1):
"""A function for returning the indices for a line that has been sorted,
north-south, east-west or vice versa.
Parameters
----------
dataset : netcdf dataset
Description of parameter `dataset`.
line : integer
Description of parameter `line`.
how : string
one of 'east-west', 'west-east', 'north-south', 'south-north'
subset : integer
how to subset the points
Returns
-------
array
Array of point indices
"""
# make sure the how is correctly specified
err_message = 'Please specify one of \'east-west\', \'west-east\',\'north-south\', or \'south-north\''
assert how in ['east-west', 'west-east', 'north-south', 'south-north'], err_message
# get line indices
line_inds = np.where(get_lookup_mask([line],dataset))[0]
sort_mask = line_inds[np.argsort(dataset['fiducial'][line_inds])]
#sort_mask = dataset['fiducial']
# sort first on fiducials
if how == 'east-west':
if dataset['easting'][sort_mask][0] < dataset['easting'][sort_mask][-1]:
sort_mask = sort_mask[::-1]
elif how == 'west-east':
if dataset['easting'][sort_mask][0] > dataset['easting'][sort_mask][-1]:
sort_mask = sort_mask[::-1]
elif how == 'north-south':
if dataset['northing'][sort_mask][0] < dataset['northing'][sort_mask][-1]:
sort_mask = sort_mask[::-1]
elif how == 'south-north':
if dataset['northing'][sort_mask][0] > dataset['northing'][sort_mask][-1]:
sort_mask = sort_mask[::-1]
return sort_mask[::subset]
| [
"numpy.isin",
"numpy.power",
"garjmcmctdem_utils.spatial_functions.nearest_neighbours",
"numpy.float",
"numpy.ones",
"garjmcmctdem_utils.misc_utils.pickle2xarray",
"numpy.argsort",
"numpy.where",
"numpy.arange",
"numpy.array",
"numpy.linspace"
] | [((2936, 2981), 'numpy.float', 'np.float', (["rj_dat['easting'][point_index].data"], {}), "(rj_dat['easting'][point_index].data)\n", (2944, 2981), True, 'import numpy as np\n'), ((2997, 3043), 'numpy.float', 'np.float', (["rj_dat['northing'][point_index].data"], {}), "(rj_dat['northing'][point_index].data)\n", (3005, 3043), True, 'import numpy as np\n'), ((3532, 3574), 'numpy.linspace', 'np.linspace', (['condmin', 'condmax', 'ncond_cells'], {}), '(condmin, condmax, ncond_cells)\n', (3543, 3574), True, 'import numpy as np\n'), ((3780, 3839), 'numpy.power', 'np.power', (['(10)', "rj_dat['conductivity_mean'][point_index].data"], {}), "(10, rj_dat['conductivity_mean'][point_index].data)\n", (3788, 3839), True, 'import numpy as np\n'), ((3849, 3907), 'numpy.power', 'np.power', (['(10)', "rj_dat['conductivity_p10'][point_index].data"], {}), "(10, rj_dat['conductivity_p10'][point_index].data)\n", (3857, 3907), True, 'import numpy as np\n'), ((3917, 3975), 'numpy.power', 'np.power', (['(10)', "rj_dat['conductivity_p50'][point_index].data"], {}), "(10, rj_dat['conductivity_p50'][point_index].data)\n", (3925, 3975), True, 'import numpy as np\n'), ((3985, 4043), 'numpy.power', 'np.power', (['(10)', "rj_dat['conductivity_p90'][point_index].data"], {}), "(10, rj_dat['conductivity_p90'][point_index].data)\n", (3993, 4043), True, 'import numpy as np\n'), ((4069, 4162), 'garjmcmctdem_utils.spatial_functions.nearest_neighbours', 'spatial_functions.nearest_neighbours', (['[easting, northing]', 'det.coords'], {'max_distance': '(100.0)'}), '([easting, northing], det.coords,\n max_distance=100.0)\n', (4105, 4162), False, 'from garjmcmctdem_utils import spatial_functions\n'), ((5206, 5243), 'garjmcmctdem_utils.misc_utils.pickle2xarray', 'pickle2xarray', (['det.section_path[line]'], {}), '(det.section_path[line])\n', (5219, 5243), False, 'from garjmcmctdem_utils.misc_utils import pickle2xarray\n'), ((6898, 6946), 'numpy.isin', 'np.isin', (["netCDF_dataset['line_index']", 'line_inds'], {}), "(netCDF_dataset['line_index'], line_inds)\n", (6905, 6946), True, 'import numpy as np\n'), ((4647, 4709), 'numpy.arange', 'np.arange', (['(1)', "(rj_dat.dimensions['convergence_sample'].size + 1)"], {}), "(1, rj_dat.dimensions['convergence_sample'].size + 1)\n", (4656, 4709), True, 'import numpy as np\n'), ((5327, 5358), 'numpy.array', 'np.array', (['[[easting, northing]]'], {}), '([[easting, northing]])\n', (5335, 5358), True, 'import numpy as np\n'), ((8414, 8476), 'numpy.ones', 'np.ones', ([], {'shape': "dataset.dimensions['point'].size", 'dtype': 'np.bool'}), "(shape=dataset.dimensions['point'].size, dtype=np.bool)\n", (8421, 8476), True, 'import numpy as np\n'), ((11526, 11568), 'numpy.argsort', 'np.argsort', (["dataset['fiducial'][line_inds]"], {}), "(dataset['fiducial'][line_inds])\n", (11536, 11568), True, 'import numpy as np\n'), ((6840, 6881), 'numpy.isin', 'np.isin', (["netCDF_dataset['line'][:]", 'lines'], {}), "(netCDF_dataset['line'][:], lines)\n", (6847, 6881), True, 'import numpy as np\n'), ((2211, 2247), 'numpy.where', 'np.where', (["(dataset['line'][:] == line)"], {}), "(dataset['line'][:] == line)\n", (2219, 2247), True, 'import numpy as np\n')] |
from numpy import sqrt, exp, pi, power, tanh, vectorize
# time constants for model: Postnova et al. 2018 - Table 1
tau_v = 50.0 #s
tau_m = tau_v
tau_H = 59.0*3600.0 #s
tau_X = (24.0*3600.0) / (2.0*pi) #s
tau_Y = tau_X
tau_C = 24.2*3600.0 #s
tau_A = 1.5*3600.0 #s # 1.5 hours # Tekieh et al. 2020 - Section 2.3.2, after Equation 9
tau_L = 24.0*60.0 #s # 24 min # Tekieh et al. 2020 - Section 3.3
# coupling strengths constants: Postnova et al. 2018 - Table 1
nu_vm = -2.1 #mV
nu_mv = -1.8 #mV
nu_Hm = 4.57 #s
nu_Xp = 37.0*60.0 #s
nu_Xn = 0.032
nu_YY = (1.0/3.0)*nu_Xp
nu_YX = 0.55*nu_Xp
nu_vH = 1.0
nu_vC = -0.5 #mV
nu_LA = -0.11 # Tekieh et al. 2020 - Section 3.3
# nu_LA = -0.4 # testing: good results with -0.4
# circadian constants: Postnova et al. 2018 - Table 1
gamma = 0.13
delta = 24.0*3600.0/0.99729 #s
beta = 0.007/60.0 #sˆ-1
#mV # external neuronal drives constants: Postnova et al. 2018 - Table 1
D_m = 1.3
def wake_effort(Q_v, forced = 0):
# Wake Effort Function
# Inputs:
# Q_v: mean population firing rate of the VLPO
# forced: 1 if forced wake, default 0
# Outpus:
# W: wake effort
V_WE = -0.07 #mv # wake effort constants: Postnova et al. 2018 - Table 1
W = forced * max(0, V_WE-nu_mv*Q_v-D_m) # Postnova et al. 2018 - Table 1, Equation 8
return W
wake_effort_v = vectorize(wake_effort)
def total_sleep_drive(H,C):
# Total Sleep Drive Function
# Inputs:
# H: homeostatic drive
# C: circadian drive, sleep propensity model
# Outputs:
# D_v: total sleep drive
A_v = -10.3 #mV # external neuronal drives constants: Postnova et al. 2018 - Table 1
D_v = nu_vH*H + nu_vC*C + A_v # Postnova et al. 2018 - Table 1, Equation 9
return D_v
total_sleep_drive_v = vectorize(total_sleep_drive)
def nonphotic_drive(X, S):
# Nonphotic Drive to the Circadian Function
# Inputs:
# X: Circadian Variables
# S: Wake = 1 or Sleep = 0 state
# Outputs:
# D_n: nonphotic drive to the circadian
r = 10.0 # nonphotic drive constant: Postnova et al. 2018 - Table 1
D_n = (S-(2.0/3.0))*(1-tanh(r*X)) # Postnova et al. 2018 - Table 1, Equation 11
return D_n
nonphotic_drive_v = vectorize(nonphotic_drive)
def photoreceptor_conversion_rate(IE, S, version = '2020'):
# Photoreceptor Conversion Rate Function
# Inputs:
# I or E_emel: Illuminance (lux) or Melanopic Irradiance # Is it melanopic illuminance I_mel instead of just I?
# S: Wake = 1 or Sleep = 0 state
# version: 2018 uses Illuminance, 2020 uses melanopic irradiance
# Output:
# alpha: the photorecpetor conversion rate
IE = IE*S # Postnova et al. 2018 - Table 1, Equation 14
# photic drive constants: Postnova et al. 2018 - Table 1
I_0 = 100 #lx
I_1 = 9500 #lx
alpha_0 = 0.1/60.0 #sˆ-1
if (version == '2018'):
alpha = ((alpha_0*IE)/(IE+I_1))*sqrt(IE/I_0) # Postnova et al. 2018 - Table 1, Equation 13
if (version == '2020'):
F_4100K = 8.19e-4 # Tekieh et al. 2020 - Equation 5
alpha = ((alpha_0*IE)/(IE+I_1*F_4100K))*sqrt(IE/(I_0*F_4100K)) # Tekieh et al. 2020 - Equation 7
return alpha
photoreceptor_conversion_rate_v = vectorize(photoreceptor_conversion_rate)
def photic_drive(X, Y, P, alpha):
# Photic Drive to the Circadian function
# Inputs:
# X, Y: Circadian Variables
# P: Photoreceptor Activity
# alpha: photoreceptor conversion rate
# Outputs:
# D_p: photic drive to the circadian
epsilon = 0.4 # photic drive constants: Postnova et al. 2018 - Table 1
D_p = alpha*(1-P)*(1-epsilon*X)*(1-epsilon*Y) # Postnova et al. 2018 - Table 1, Equation 12
return D_p
photic_drive_v = vectorize(photic_drive)
def mean_population_firing_rate(V_i):
# Mean Population Firing Rate Function
# Inputs:
# V_v or V_m: Mean voltages of the VLPO and MA respectively
# Output:
# Q: mean population firing rate
# firing rate constants: Postnova et al. 2018 - Table 1
Q_max = 100.0 #sˆ-1
theta = 10.0 #mV
sigma_prime = 3.0 #mV
Q = Q_max / (1 + exp((theta-V_i)/sigma_prime)) # Postnova et al. 2018 - Table 1, Equation 7
return Q
mean_population_firing_rate_v = vectorize(mean_population_firing_rate)
def state(V_m):
# Wake/Sleep State Function
# Postnova et al. 2018 - Table 1, Equation 15
# Input:
# V_m: Mean Voltage of the monoaminergic (MA) wake-active neuronal populations
# Output:
# S: sleep state, 1 is awake, 0 is asleep
V_th = -2.0 #mV # wake effort constants: Postnova et al. 2018 - Table 1
if (V_m > V_th):
S = 1
else:
S = 0
return S
state_v = vectorize(state)
def sigmoid(E_emel):
# sigmoid function
# Inputs:
# E_emel: Melanopic Irradiance
# Outputs:
# S: sigmoid in range [0 1], is it always so? yes
# parameters defining the melanopic irradiance value at half-maximal alerting effect and the steepness of the curve
# Tekieh et al. 2020 - Section 2.3.3
S_b = 0.05 # W/mˆ2
S_c = 1/223.5 # mˆ2/W, there is a typo in the paper, it should be S_cˆ{-1} = 223.5 mˆ2/W
# the sigmoig was defined for illuminance initially,
# but the parameters were then computed for irradiance so we can use irradiance directly
# E_emel = E_emel / 0.0013262 # test to transform irradiance to illuminance
S = 1/(1 + exp((S_b-E_emel)/S_c) ) # Tekieh et al. 2020 - Equation 14
return S
sigmoid_v = vectorize(sigmoid)
def alertness_measure(C, H, Theta_L = 0):
# Alertness Measure Function
# Inputs:
# H: homeostatic drive
# C: circadian drive, sleep propensity model
# Tetha_L: light-dependent modulation of the homeostatic weight
# Outputs:
# AM: alertness measure on the KSS
# KSS: Karolinska Sleepiness Scale
# Ranges from 1 = "Extremely alert" to 9 = "Extremely sleepy, fighting sleep."
# KSS default parameters: Postnova et al. 2018 - Table 3
Theta_0 = -24.34
Theta_H = 2.28
Theta_C = -1.74
AM = Theta_0 + (Theta_H + Theta_L)*H + Theta_C*C # Postnova et al. 2018 - Equation 23, Tekieh et al. 2020 - Equation 12
return AM
alertness_measure_v = vectorize(alertness_measure)
def circadian_drive(X,Y):
# Circadian Drive Function, sleep propensity model
# Inputs:
# X, Y: Circadian Variables
# Outputs:
# C: circadian drive
C = 0.1*((1.0+X)/2.0)+power(((3.1*X - 2.5*Y + 4.2)/(3.7*(X+2))),2) # Postnova et al. 2016 - Equations 1, 2, and 3
return C
circadian_drive_v = vectorize(circadian_drive)
def melatonin_suppression(E_emel):
# Melatonin Suppression Function
# Inputs:
# E_emel: Melanopic Irradiance
# Outputs:
# r: melatonin suppression
# parameters of the sigmoid function # Tekieh et al. 2020 - Section 2.3.2, after Equation 9
r_a = 1
r_b = 0.031 # W/mˆ2
r_c = 0.82
r = 1 - (r_a/(1+power(E_emel/r_b,-r_c))) # Tekieh et al. 2020 - Equation 9
return r
melatonin_suppression_v = vectorize(melatonin_suppression)
def model(y, t, input_function, forced_wake, minE, maxE, version = '2020'):
V_v, V_m, H, X, Y, P, Theta_L = y
IE = input_function(t)
S = state(V_m)
# so many things can go wrong with this sigmoid definition
# what's the threshold irradiance that creates a locally measurable impact on the KSS?
Sigmoid = ( sigmoid(IE) - sigmoid(minE) ) / ( sigmoid(maxE) - sigmoid(minE) ) # Tekieh et al. 2020 - Section 2.3.3: scaling to [0,1]
alpha = photoreceptor_conversion_rate(IE, S, version)
Q_m = mean_population_firing_rate(V_m)
Q_v = mean_population_firing_rate(V_v)
C = circadian_drive(X,Y)
D_v = total_sleep_drive(H,C)
D_n = nonphotic_drive(X, S)
D_p = photic_drive(X, Y, P, alpha)
F_w = forced_wake(t)
W = wake_effort(Q_v, F_w)
gradient_y = [(nu_vm*Q_m - V_v + D_v)/tau_v, # V_v, Postnova et al. 2018 - Table 1, Equation 1
(nu_mv*Q_v - V_m + D_m + W)/tau_m, # V_m, Postnova et al. 2018 - Table 1, Equation 2
(nu_Hm*Q_m - H)/tau_H, # H, Postnova et al. 2018 - Table 1, Equation 3
(Y + gamma*(X/3.0 + power(X,3)*4.0/3.0 - power(X,7)*256.0/105.0) + nu_Xp*D_p + nu_Xn*D_n)/tau_X, # X, Postnova et al. 2018 - Table 1, Equation 4
(D_p*(nu_YY*Y - nu_YX*X) - power((delta/tau_C),2)*X)/tau_Y, # Y, Postnova et al. 2018 - Table 1, Equation 5
alpha*(1-P)-(beta*P), # P, Postnova et al. 2018 - Table 1, Equation 6, revised
(-Theta_L + nu_LA*Sigmoid)/tau_L # Tekieh et al. 2020 - Equation 13
]
return gradient_y | [
"numpy.vectorize",
"numpy.tanh",
"numpy.power",
"numpy.exp",
"numpy.sqrt"
] | [((1333, 1355), 'numpy.vectorize', 'vectorize', (['wake_effort'], {}), '(wake_effort)\n', (1342, 1355), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((1767, 1795), 'numpy.vectorize', 'vectorize', (['total_sleep_drive'], {}), '(total_sleep_drive)\n', (1776, 1795), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((2212, 2238), 'numpy.vectorize', 'vectorize', (['nonphotic_drive'], {}), '(nonphotic_drive)\n', (2221, 2238), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((3224, 3264), 'numpy.vectorize', 'vectorize', (['photoreceptor_conversion_rate'], {}), '(photoreceptor_conversion_rate)\n', (3233, 3264), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((3736, 3759), 'numpy.vectorize', 'vectorize', (['photic_drive'], {}), '(photic_drive)\n', (3745, 3759), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((4250, 4288), 'numpy.vectorize', 'vectorize', (['mean_population_firing_rate'], {}), '(mean_population_firing_rate)\n', (4259, 4288), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((4711, 4727), 'numpy.vectorize', 'vectorize', (['state'], {}), '(state)\n', (4720, 4727), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((5506, 5524), 'numpy.vectorize', 'vectorize', (['sigmoid'], {}), '(sigmoid)\n', (5515, 5524), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((6230, 6258), 'numpy.vectorize', 'vectorize', (['alertness_measure'], {}), '(alertness_measure)\n', (6239, 6258), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((6584, 6610), 'numpy.vectorize', 'vectorize', (['circadian_drive'], {}), '(circadian_drive)\n', (6593, 6610), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((7053, 7085), 'numpy.vectorize', 'vectorize', (['melatonin_suppression'], {}), '(melatonin_suppression)\n', (7062, 7085), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((6458, 6511), 'numpy.power', 'power', (['((3.1 * X - 2.5 * Y + 4.2) / (3.7 * (X + 2)))', '(2)'], {}), '((3.1 * X - 2.5 * Y + 4.2) / (3.7 * (X + 2)), 2)\n', (6463, 6511), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((2119, 2130), 'numpy.tanh', 'tanh', (['(r * X)'], {}), '(r * X)\n', (2123, 2130), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((2914, 2928), 'numpy.sqrt', 'sqrt', (['(IE / I_0)'], {}), '(IE / I_0)\n', (2918, 2928), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((3114, 3140), 'numpy.sqrt', 'sqrt', (['(IE / (I_0 * F_4100K))'], {}), '(IE / (I_0 * F_4100K))\n', (3118, 3140), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((4129, 4161), 'numpy.exp', 'exp', (['((theta - V_i) / sigma_prime)'], {}), '((theta - V_i) / sigma_prime)\n', (4132, 4161), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((5421, 5446), 'numpy.exp', 'exp', (['((S_b - E_emel) / S_c)'], {}), '((S_b - E_emel) / S_c)\n', (5424, 5446), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((6954, 6979), 'numpy.power', 'power', (['(E_emel / r_b)', '(-r_c)'], {}), '(E_emel / r_b, -r_c)\n', (6959, 6979), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((8403, 8426), 'numpy.power', 'power', (['(delta / tau_C)', '(2)'], {}), '(delta / tau_C, 2)\n', (8408, 8426), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((8254, 8265), 'numpy.power', 'power', (['X', '(7)'], {}), '(X, 7)\n', (8259, 8265), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n'), ((8233, 8244), 'numpy.power', 'power', (['X', '(3)'], {}), '(X, 3)\n', (8238, 8244), False, 'from numpy import sqrt, exp, pi, power, tanh, vectorize\n')] |
#!/usr/bin/env python2
#
# This files can be used to benchmark different classifiers
# on lfw dataset with known and unknown dataset.
# More info at: https://github.com/cmusatyalab/openface/issues/144
# <NAME> & <NAME>
# 2016/06/28
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
start = time.time()
import argparse
import cv2
import os
import pickle
import shutil # For copy images
import errno
import sys
import operator
from operator import itemgetter
import numpy as np
np.set_printoptions(precision=2)
import pandas as pd
import openface
from sklearn.pipeline import Pipeline
from sklearn.lda import LDA
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVC
from sklearn.mixture import GMM
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from nolearn.dbn import DBN
import multiprocessing
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
sys.path.append('./util/')
align_dlib = __import__('align-dlib')
# The list of available classifiers. The list is used in train() and
# inferFromTest() functions.
clfChoices = [
'LinearSvm',
'GMM',
'RadialSvm',
'DecisionTree',
'GaussianNB',
'DBN']
def train(args):
start = time.time()
for clfChoice in clfChoices:
print("Loading embeddings.")
fname = "{}/labels.csv".format(args.workDir)
labels = pd.read_csv(fname, header=None).as_matrix()[:, 1]
labels = map(itemgetter(1),
map(os.path.split,
map(os.path.dirname, labels))) # Get the directory.
fname = "{}/reps.csv".format(args.workDir)
embeddings = pd.read_csv(fname, header=None).as_matrix()
le = LabelEncoder().fit(labels)
labelsNum = le.transform(labels)
nClasses = len(le.classes_)
print("Training for {} classes.".format(nClasses))
if clfChoice == 'LinearSvm':
clf = SVC(C=1, kernel='linear', probability=True)
elif clfChoice == 'GMM': # Doesn't work best
clf = GMM(n_components=nClasses)
# ref:
# http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html#example-classification-plot-classifier-comparison-py
elif clfChoice == 'RadialSvm': # Radial Basis Function kernel
# works better with C = 1 and gamma = 2
clf = SVC(C=1, kernel='rbf', probability=True, gamma=2)
elif clfChoice == 'DecisionTree': # Doesn't work best
clf = DecisionTreeClassifier(max_depth=20)
elif clfChoice == 'GaussianNB':
clf = GaussianNB()
# ref: https://jessesw.com/Deep-Learning/
elif clfChoice == 'DBN':
if args.verbose:
verbose = 1
else:
verbose = 0
clf = DBN([embeddings.shape[1], 500, labelsNum[-1:][0] + 1], # i/p nodes, hidden nodes, o/p nodes
learn_rates=0.3,
# Smaller steps mean a possibly more accurate result, but the
# training will take longer
learn_rate_decays=0.9,
# a factor the initial learning rate will be multiplied by
# after each iteration of the training
epochs=300, # no of iternation
# dropouts = 0.25, # Express the percentage of nodes that
# will be randomly dropped as a decimal.
verbose=verbose)
if args.ldaDim > 0:
clf_final = clf
clf = Pipeline([('lda', LDA(n_components=args.ldaDim)),
('clf', clf_final)])
clf.fit(embeddings, labelsNum)
fName = os.path.join(args.workDir, clfChoice + ".pkl")
print("Saving classifier to '{}'".format(fName))
with open(fName, 'w') as f:
pickle.dump((le, clf), f)
if args.verbose:
print(
"Training and saving the classifiers took {} seconds.".format(
time.time() - start))
def getRep(imgPath):
start = time.time()
bgrImg = cv2.imread(imgPath)
if bgrImg is None:
raise Exception("Unable to load image: {}".format(imgPath))
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
if args.verbose:
print(" + Original size: {}".format(rgbImg.shape))
if args.verbose:
print("Loading the image took {} seconds.".format(time.time() - start))
start = time.time()
bb = align.getLargestFaceBoundingBox(rgbImg)
if (bb is None):
raise Exception("Unable to find a face: {}".format(imgPath))
if args.verbose:
print("Face detection took {} seconds.".format(time.time() - start))
start = time.time()
alignedFace = align.align(
args.imgDim,
rgbImg,
bb,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
if alignedFace is None:
raise Exception("Unable to align image: {}".format(imgPath))
if args.verbose:
print("Alignment took {} seconds.".format(time.time() - start))
start = time.time()
rep = net.forward(alignedFace)
if args.verbose:
print(
"Neural network forward pass took {} seconds.".format(
time.time() - start))
return rep
def inferFromTest(args):
for clfChoice in clfChoices:
print ("===============")
print ("Using the classifier: " + clfChoice)
with open(os.path.join(args.featureFolder[0], clfChoice + ".pkl"), 'r') as f_clf:
if sys.version_info[0] < 3:
(le, clf) = pickle.load(f_clf)
else:
(le, clf) = pickle.load(f_clf, encoding='latin1')
correctPrediction = 0
inCorrectPrediction = 0
sumConfidence = 0.0
testSet = [
os.path.join(
args.testFolder[0], f) for f in os.listdir(
args.testFolder[0]) if not f.endswith('.DS_Store')]
for personSet in testSet:
personImages = [os.path.join(personSet, f) for f in os.listdir(
personSet) if not f.endswith('.DS_Store')]
for img in personImages:
if args.verbose:
print("\n=== {} ===".format(img.split('/')[-1:][0]))
try:
rep = getRep(img).reshape(1, -1)
except Exception as e:
print (e)
continue
start = time.time()
predictions = clf.predict_proba(rep).ravel()
maxI = np.argmax(predictions)
person = le.inverse_transform(maxI)
confidence = predictions[maxI]
if args.verbose:
print(
"Prediction took {} seconds.".format(
time.time() - start))
if args.verbose:
print(
"Predict {} with {:.2f} confidence.".format(
person.decode('utf-8'), confidence))
sumConfidence += confidence
if confidence <= args.threshold and args.unknown:
person = "_unknown"
if (img.split('/')[-1:][0].split('.')[0][:-5] == person and not args.unknown) or (person == "_unknown" and args.unknown):
correctPrediction += 1
else:
inCorrectPrediction += 1
if isinstance(clf, GMM) and args.verbose:
dist = np.linalg.norm(rep - clf.means_[maxI])
print(" + Distance from the mean: {}".format(dist))
print ("Results for the classifier: " + clfChoice)
print ("Correct Prediction :" + str(correctPrediction))
print ("In-correct Prediction: " + str(inCorrectPrediction))
print ("Accuracy :" + str(float(correctPrediction) / (correctPrediction + inCorrectPrediction)))
print ("Avg Confidence: " + str(float(sumConfidence) / (correctPrediction + inCorrectPrediction)))
def preprocess(args):
start = time.time()
lfwPath = args.lfwDir
destPath = args.featuresDir
fullFaceDirectory = [os.path.join(lfwPath, f) for f in os.listdir(
lfwPath) if not f.endswith('.DS_Store')] # .DS_Store for the OS X
noOfImages = []
folderName = []
for folder in fullFaceDirectory:
try:
noOfImages.append(len(os.listdir(folder)))
folderName.append(folder.split('/')[-1:][0])
# print (folder.split('/')[-1:][0] +": " +
# str(len(os.listdir(folder))))
except:
pass
# Sorting
noOfImages_sorted, folderName_sorted = zip(
*sorted(zip(noOfImages, folderName), key=operator.itemgetter(0), reverse=True))
with open(os.path.join(destPath, "List_of_folders_and_number_of_images.txt"), "w") as text_file:
for f, n in zip(folderName_sorted, noOfImages_sorted):
text_file.write("{} : {} \n".format(f, n))
if args.verbose:
print ("Sorting lfw dataset took {} seconds.".format(time.time() - start))
start = time.time()
# Copy known train dataset
for i in range(int(args.rangeOfPeople.split(':')[0]), int(
args.rangeOfPeople.split(':')[1])):
src = os.path.join(lfwPath, folderName_sorted[i])
try:
destFolder = os.path.join(
destPath, 'train_known_raw', folderName_sorted[i])
shutil.copytree(src, destFolder)
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, destFolder)
else:
if args.verbose:
print('Directory not copied. Error: %s' % e)
if args.verbose:
print ("Copying train dataset from lfw took {} seconds.".format(time.time() - start))
start = time.time()
# Take 10% images from train dataset as test dataset for known
train_known_raw = [
os.path.join(
os.path.join(
destPath,
'train_known_raw'),
f) for f in os.listdir(
os.path.join(
destPath,
'train_known_raw')) if not f.endswith('.DS_Store')] # .DS_Store for the OS X
for folder in train_known_raw:
images = [os.path.join(folder, f) for f in os.listdir(
folder) if not f.endswith('.DS_Store')]
if not os.path.exists(os.path.join(
destPath, 'test_known_raw', folder.split('/')[-1:][0])):
os.makedirs(os.path.join(destPath, 'test_known_raw',
folder.split('/')[-1:][0]))
# print ("Created {}".format(os.path.join(destPath,
# 'test_known_raw', folder.split('/')[-1:][0])))
for i in range(int(0.9 * len(images)), len(images)):
destFile = os.path.join(destPath, 'test_known_raw', folder.split(
'/')[-1:][0], images[i].split('/')[-1:][0])
try:
shutil.move(images[i], destFile)
except:
pass
if args.verbose:
print ("Spliting lfw dataset took {} seconds.".format(time.time() - start))
start = time.time()
# Copy unknown test dataset
for i in range(int(args.rangeOfPeople.split(':')
[1]), len(folderName_sorted)):
src = os.path.join(lfwPath, folderName_sorted[i])
try:
destFolder = os.path.join(
destPath, 'test_unknown_raw', folderName_sorted[i])
shutil.copytree(src, destFolder)
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, destFolder)
else:
if args.verbose:
print('Directory not copied. Error: %s' % e)
if args.verbose:
print ("Copying test dataset from lfw took {} seconds.".format(time.time() - start))
start = time.time()
class Args():
"""
This class is created to pass arguments to ./util/align-dlib.py
"""
def __init__(self, inputDir, outputDir, verbose):
self.inputDir = inputDir
self.dlibFacePredictor = os.path.join(
dlibModelDir, "shape_predictor_68_face_landmarks.dat")
self.mode = 'align'
self.landmarks = 'outerEyesAndNose'
self.size = 96
self.outputDir = outputDir
self.skipMulti = True
self.verbose = verbose
self.fallbackLfw = False
argsForAlign = Args(
os.path.join(
destPath,
'train_known_raw'),
os.path.join(
destPath,
'train_known_aligned'),
args.verbose)
jobs = []
for i in range(8):
p = multiprocessing.Process(
target=align_dlib.alignMain, args=(
argsForAlign,))
jobs.append(p)
p.start()
for p in jobs:
p.join()
if args.verbose:
print ("Aligning the raw train data took {} seconds.".format(time.time() - start))
start = time.time()
os.system(
'./batch-represent/main.lua -outDir ' +
os.path.join(
destPath,
'train_known_features') +
' -data ' +
os.path.join(
destPath,
'train_known_aligned'))
if args.verbose:
print ("Extracting features from aligned train data took {} seconds.".format(time.time() - start))
start = time.time()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--dlibFacePredictor',
type=str,
help="Path to dlib's face predictor.",
default=os.path.join(
dlibModelDir,
"shape_predictor_68_face_landmarks.dat"))
parser.add_argument(
'--networkModel',
type=str,
help="Path to Torch network model.",
default=os.path.join(
openfaceModelDir,
'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--verbose', action='store_true')
subparsers = parser.add_subparsers(dest='mode', help="Mode")
trainParser = subparsers.add_parser('train',
help="Train a new classifier.")
trainParser.add_argument('--ldaDim', type=int, default=-1)
trainParser.add_argument(
'--classifier',
type=str,
choices=[
'LinearSvm',
'GMM',
'RadialSvm',
'DecisionTree'],
help='The type of classifier to use.',
default='LinearSvm')
trainParser.add_argument(
'workDir',
type=str,
help="The input work directory containing 'reps.csv' and 'labels.csv'. Obtained from aligning a directory with 'align-dlib' and getting the representations with 'batch-represent'.")
inferParser = subparsers.add_parser(
'infer', help='Predict who an image contains from a trained classifier.')
inferParser.add_argument(
'classifierModel',
type=str,
help='The Python pickle representing the classifier. This is NOT the Torch network model, which can be set with --networkModel.')
inferParser.add_argument('imgs', type=str, nargs='+',
help="Input image.")
inferFromTestParser = subparsers.add_parser(
'inferFromTest',
help='Predict who an image contains from a trained classifier.')
# inferFromTestParser.add_argument('--classifierModel', type=str,
# help='The Python pickle representing the classifier. This is NOT the
# Torch network model, which can be set with --networkModel.')
inferFromTestParser.add_argument(
'featureFolder',
type=str,
nargs='+',
help="Input the fratures folder which has the classifiers.")
inferFromTestParser.add_argument(
'testFolder',
type=str,
nargs='+',
help="Input the test folder. It can be either known test dataset or unknown test dataset.")
inferFromTestParser.add_argument(
'--threshold',
type=float,
nargs='+',
help="Threshold of the confidence to classify a prediction as unknown person. <threshold will be predicted as unknown person.",
default=0.0)
inferFromTestParser.add_argument(
'--unknown',
action='store_true',
help="Use this flag if you are testing on unknown dataset. Make sure you set thresold value")
preprocessParser = subparsers.add_parser(
'preprocess',
help='Before Benchmarking preprocess divides the dataset into train and test pairs. Also it will align the train dataset and extract the features from it.')
preprocessParser.add_argument('--lfwDir', type=str,
help="Enter the lfw face directory")
preprocessParser.add_argument(
'--rangeOfPeople',
type=str,
help="Range of the people you would like to take as known person group. Not that the input is a list starts with 0 and the people are sorted in decending order of number of images. Eg: 0:10 ")
preprocessParser.add_argument(
'--featuresDir',
type=str,
help="Enter the directory location where the aligned images, features, and classifer model will be saved.")
args = parser.parse_args()
if args.verbose:
print("Argument parsing and import libraries took {} seconds.".format(
time.time() - start))
start = time.time()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(args.networkModel, imgDim=args.imgDim,
cuda=args.cuda)
if args.verbose:
print("Loading the dlib and OpenFace models took {} seconds.".format(
time.time() - start))
start = time.time()
if args.mode == 'train':
train(args)
elif args.mode == 'infer':
# infer(args)
raise Exception("Use ./demo/classifier.py")
elif args.mode == 'inferFromTest':
inferFromTest(args)
elif args.mode == 'preprocess':
preprocess(args)
| [
"openface.TorchNeuralNet",
"pickle.dump",
"argparse.ArgumentParser",
"numpy.argmax",
"pandas.read_csv",
"sklearn.tree.DecisionTreeClassifier",
"pickle.load",
"numpy.linalg.norm",
"sklearn.svm.SVC",
"os.path.join",
"shutil.copy",
"sys.path.append",
"numpy.set_printoptions",
"cv2.cvtColor",
... | [((850, 861), 'time.time', 'time.time', ([], {}), '()\n', (859, 861), False, 'import time\n'), ((1041, 1073), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (1060, 1073), True, 'import numpy as np\n'), ((1494, 1531), 'os.path.join', 'os.path.join', (['fileDir', '""".."""', '"""models"""'], {}), "(fileDir, '..', 'models')\n", (1506, 1531), False, 'import os\n'), ((1547, 1577), 'os.path.join', 'os.path.join', (['modelDir', '"""dlib"""'], {}), "(modelDir, 'dlib')\n", (1559, 1577), False, 'import os\n'), ((1597, 1631), 'os.path.join', 'os.path.join', (['modelDir', '"""openface"""'], {}), "(modelDir, 'openface')\n", (1609, 1631), False, 'import os\n'), ((1633, 1659), 'sys.path.append', 'sys.path.append', (['"""./util/"""'], {}), "('./util/')\n", (1648, 1659), False, 'import sys\n'), ((1455, 1481), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1471, 1481), False, 'import os\n'), ((1938, 1949), 'time.time', 'time.time', ([], {}), '()\n', (1947, 1949), False, 'import time\n'), ((4823, 4834), 'time.time', 'time.time', ([], {}), '()\n', (4832, 4834), False, 'import time\n'), ((4848, 4867), 'cv2.imread', 'cv2.imread', (['imgPath'], {}), '(imgPath)\n', (4858, 4867), False, 'import cv2\n'), ((4973, 5012), 'cv2.cvtColor', 'cv2.cvtColor', (['bgrImg', 'cv2.COLOR_BGR2RGB'], {}), '(bgrImg, cv2.COLOR_BGR2RGB)\n', (4985, 5012), False, 'import cv2\n'), ((5209, 5220), 'time.time', 'time.time', ([], {}), '()\n', (5218, 5220), False, 'import time\n'), ((5472, 5483), 'time.time', 'time.time', ([], {}), '()\n', (5481, 5483), False, 'import time\n'), ((5831, 5842), 'time.time', 'time.time', ([], {}), '()\n', (5840, 5842), False, 'import time\n'), ((8841, 8852), 'time.time', 'time.time', ([], {}), '()\n', (8850, 8852), False, 'import time\n'), ((14484, 14509), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14507, 14509), False, 'import argparse\n'), ((18577, 18588), 'time.time', 'time.time', ([], {}), '()\n', (18586, 18588), False, 'import time\n'), ((18602, 18644), 'openface.AlignDlib', 'openface.AlignDlib', (['args.dlibFacePredictor'], {}), '(args.dlibFacePredictor)\n', (18620, 18644), False, 'import openface\n'), ((18655, 18733), 'openface.TorchNeuralNet', 'openface.TorchNeuralNet', (['args.networkModel'], {'imgDim': 'args.imgDim', 'cuda': 'args.cuda'}), '(args.networkModel, imgDim=args.imgDim, cuda=args.cuda)\n', (18678, 18733), False, 'import openface\n'), ((4461, 4507), 'os.path.join', 'os.path.join', (['args.workDir', "(clfChoice + '.pkl')"], {}), "(args.workDir, clfChoice + '.pkl')\n", (4473, 4507), False, 'import os\n'), ((8937, 8961), 'os.path.join', 'os.path.join', (['lfwPath', 'f'], {}), '(lfwPath, f)\n', (8949, 8961), False, 'import os\n'), ((9885, 9896), 'time.time', 'time.time', ([], {}), '()\n', (9894, 9896), False, 'import time\n'), ((10054, 10097), 'os.path.join', 'os.path.join', (['lfwPath', 'folderName_sorted[i]'], {}), '(lfwPath, folderName_sorted[i])\n', (10066, 10097), False, 'import os\n'), ((10701, 10712), 'time.time', 'time.time', ([], {}), '()\n', (10710, 10712), False, 'import time\n'), ((12046, 12057), 'time.time', 'time.time', ([], {}), '()\n', (12055, 12057), False, 'import time\n'), ((12212, 12255), 'os.path.join', 'os.path.join', (['lfwPath', 'folderName_sorted[i]'], {}), '(lfwPath, folderName_sorted[i])\n', (12224, 12255), False, 'import os\n'), ((12859, 12870), 'time.time', 'time.time', ([], {}), '()\n', (12868, 12870), False, 'import time\n'), ((13494, 13535), 'os.path.join', 'os.path.join', (['destPath', '"""train_known_raw"""'], {}), "(destPath, 'train_known_raw')\n", (13506, 13535), False, 'import os\n'), ((13570, 13615), 'os.path.join', 'os.path.join', (['destPath', '"""train_known_aligned"""'], {}), "(destPath, 'train_known_aligned')\n", (13582, 13615), False, 'import os\n'), ((13714, 13788), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'align_dlib.alignMain', 'args': '(argsForAlign,)'}), '(target=align_dlib.alignMain, args=(argsForAlign,))\n', (13737, 13788), False, 'import multiprocessing\n'), ((14026, 14037), 'time.time', 'time.time', ([], {}), '()\n', (14035, 14037), False, 'import time\n'), ((14429, 14440), 'time.time', 'time.time', ([], {}), '()\n', (14438, 14440), False, 'import time\n'), ((18918, 18929), 'time.time', 'time.time', ([], {}), '()\n', (18927, 18929), False, 'import time\n'), ((2161, 2174), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (2171, 2174), False, 'from operator import itemgetter\n'), ((2642, 2685), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(1)', 'kernel': '"""linear"""', 'probability': '(True)'}), "(C=1, kernel='linear', probability=True)\n", (2645, 2685), False, 'from sklearn.svm import SVC\n'), ((4613, 4638), 'pickle.dump', 'pickle.dump', (['(le, clf)', 'f'], {}), '((le, clf), f)\n', (4624, 4638), False, 'import pickle\n'), ((6566, 6601), 'os.path.join', 'os.path.join', (['args.testFolder[0]', 'f'], {}), '(args.testFolder[0], f)\n', (6578, 6601), False, 'import os\n'), ((8971, 8990), 'os.listdir', 'os.listdir', (['lfwPath'], {}), '(lfwPath)\n', (8981, 8990), False, 'import os\n'), ((9560, 9626), 'os.path.join', 'os.path.join', (['destPath', '"""List_of_folders_and_number_of_images.txt"""'], {}), "(destPath, 'List_of_folders_and_number_of_images.txt')\n", (9572, 9626), False, 'import os\n'), ((10136, 10199), 'os.path.join', 'os.path.join', (['destPath', '"""train_known_raw"""', 'folderName_sorted[i]'], {}), "(destPath, 'train_known_raw', folderName_sorted[i])\n", (10148, 10199), False, 'import os\n'), ((10229, 10261), 'shutil.copytree', 'shutil.copytree', (['src', 'destFolder'], {}), '(src, destFolder)\n', (10244, 10261), False, 'import shutil\n'), ((10839, 10880), 'os.path.join', 'os.path.join', (['destPath', '"""train_known_raw"""'], {}), "(destPath, 'train_known_raw')\n", (10851, 10880), False, 'import os\n'), ((11150, 11173), 'os.path.join', 'os.path.join', (['folder', 'f'], {}), '(folder, f)\n', (11162, 11173), False, 'import os\n'), ((12294, 12358), 'os.path.join', 'os.path.join', (['destPath', '"""test_unknown_raw"""', 'folderName_sorted[i]'], {}), "(destPath, 'test_unknown_raw', folderName_sorted[i])\n", (12306, 12358), False, 'import os\n'), ((12388, 12420), 'shutil.copytree', 'shutil.copytree', (['src', 'destFolder'], {}), '(src, destFolder)\n', (12403, 12420), False, 'import shutil\n'), ((13123, 13190), 'os.path.join', 'os.path.join', (['dlibModelDir', '"""shape_predictor_68_face_landmarks.dat"""'], {}), "(dlibModelDir, 'shape_predictor_68_face_landmarks.dat')\n", (13135, 13190), False, 'import os\n'), ((14212, 14257), 'os.path.join', 'os.path.join', (['destPath', '"""train_known_aligned"""'], {}), "(destPath, 'train_known_aligned')\n", (14224, 14257), False, 'import os\n'), ((14648, 14715), 'os.path.join', 'os.path.join', (['dlibModelDir', '"""shape_predictor_68_face_landmarks.dat"""'], {}), "(dlibModelDir, 'shape_predictor_68_face_landmarks.dat')\n", (14660, 14715), False, 'import os\n'), ((14872, 14922), 'os.path.join', 'os.path.join', (['openfaceModelDir', '"""nn4.small2.v1.t7"""'], {}), "(openfaceModelDir, 'nn4.small2.v1.t7')\n", (14884, 14922), False, 'import os\n'), ((2366, 2397), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'header': 'None'}), '(fname, header=None)\n', (2377, 2397), True, 'import pandas as pd\n'), ((2423, 2437), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2435, 2437), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2758, 2784), 'sklearn.mixture.GMM', 'GMM', ([], {'n_components': 'nClasses'}), '(n_components=nClasses)\n', (2761, 2784), False, 'from sklearn.mixture import GMM\n'), ((6199, 6254), 'os.path.join', 'os.path.join', (['args.featureFolder[0]', "(clfChoice + '.pkl')"], {}), "(args.featureFolder[0], clfChoice + '.pkl')\n", (6211, 6254), False, 'import os\n'), ((6339, 6357), 'pickle.load', 'pickle.load', (['f_clf'], {}), '(f_clf)\n', (6350, 6357), False, 'import pickle\n'), ((6404, 6441), 'pickle.load', 'pickle.load', (['f_clf'], {'encoding': '"""latin1"""'}), "(f_clf, encoding='latin1')\n", (6415, 6441), False, 'import pickle\n'), ((6628, 6658), 'os.listdir', 'os.listdir', (['args.testFolder[0]'], {}), '(args.testFolder[0])\n', (6638, 6658), False, 'import os\n'), ((6771, 6797), 'os.path.join', 'os.path.join', (['personSet', 'f'], {}), '(personSet, f)\n', (6783, 6797), False, 'import os\n'), ((7217, 7228), 'time.time', 'time.time', ([], {}), '()\n', (7226, 7228), False, 'import time\n'), ((7313, 7335), 'numpy.argmax', 'np.argmax', (['predictions'], {}), '(predictions)\n', (7322, 7335), True, 'import numpy as np\n'), ((10963, 11004), 'os.path.join', 'os.path.join', (['destPath', '"""train_known_raw"""'], {}), "(destPath, 'train_known_raw')\n", (10975, 11004), False, 'import os\n'), ((11183, 11201), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (11193, 11201), False, 'import os\n'), ((11851, 11883), 'shutil.move', 'shutil.move', (['images[i]', 'destFile'], {}), '(images[i], destFile)\n', (11862, 11883), False, 'import shutil\n'), ((2090, 2121), 'pandas.read_csv', 'pd.read_csv', (['fname'], {'header': 'None'}), '(fname, header=None)\n', (2101, 2121), True, 'import pandas as pd\n'), ((3097, 3146), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(1)', 'kernel': '"""rbf"""', 'probability': '(True)', 'gamma': '(2)'}), "(C=1, kernel='rbf', probability=True, gamma=2)\n", (3100, 3146), False, 'from sklearn.svm import SVC\n'), ((4766, 4777), 'time.time', 'time.time', ([], {}), '()\n', (4775, 4777), False, 'import time\n'), ((5174, 5185), 'time.time', 'time.time', ([], {}), '()\n', (5183, 5185), False, 'import time\n'), ((5437, 5448), 'time.time', 'time.time', ([], {}), '()\n', (5446, 5448), False, 'import time\n'), ((5796, 5807), 'time.time', 'time.time', ([], {}), '()\n', (5805, 5807), False, 'import time\n'), ((5997, 6008), 'time.time', 'time.time', ([], {}), '()\n', (6006, 6008), False, 'import time\n'), ((6807, 6828), 'os.listdir', 'os.listdir', (['personSet'], {}), '(personSet)\n', (6817, 6828), False, 'import os\n'), ((8288, 8326), 'numpy.linalg.norm', 'np.linalg.norm', (['(rep - clf.means_[maxI])'], {}), '(rep - clf.means_[maxI])\n', (8302, 8326), True, 'import numpy as np\n'), ((9184, 9202), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (9194, 9202), False, 'import os\n'), ((9506, 9528), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (9525, 9528), False, 'import operator\n'), ((9847, 9858), 'time.time', 'time.time', ([], {}), '()\n', (9856, 9858), False, 'import time\n'), ((10424, 10452), 'shutil.copy', 'shutil.copy', (['src', 'destFolder'], {}), '(src, destFolder)\n', (10435, 10452), False, 'import shutil\n'), ((10663, 10674), 'time.time', 'time.time', ([], {}), '()\n', (10672, 10674), False, 'import time\n'), ((12008, 12019), 'time.time', 'time.time', ([], {}), '()\n', (12017, 12019), False, 'import time\n'), ((12583, 12611), 'shutil.copy', 'shutil.copy', (['src', 'destFolder'], {}), '(src, destFolder)\n', (12594, 12611), False, 'import shutil\n'), ((12821, 12832), 'time.time', 'time.time', ([], {}), '()\n', (12830, 12832), False, 'import time\n'), ((13988, 13999), 'time.time', 'time.time', ([], {}), '()\n', (13997, 13999), False, 'import time\n'), ((14110, 14156), 'os.path.join', 'os.path.join', (['destPath', '"""train_known_features"""'], {}), "(destPath, 'train_known_features')\n", (14122, 14156), False, 'import os\n'), ((14391, 14402), 'time.time', 'time.time', ([], {}), '()\n', (14400, 14402), False, 'import time\n'), ((18542, 18553), 'time.time', 'time.time', ([], {}), '()\n', (18551, 18553), False, 'import time\n'), ((18880, 18891), 'time.time', 'time.time', ([], {}), '()\n', (18889, 18891), False, 'import time\n'), ((3228, 3264), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'max_depth': '(20)'}), '(max_depth=20)\n', (3250, 3264), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((4323, 4352), 'sklearn.lda.LDA', 'LDA', ([], {'n_components': 'args.ldaDim'}), '(n_components=args.ldaDim)\n', (4326, 4352), False, 'from sklearn.lda import LDA\n'), ((3323, 3335), 'sklearn.naive_bayes.GaussianNB', 'GaussianNB', ([], {}), '()\n', (3333, 3335), False, 'from sklearn.naive_bayes import GaussianNB\n'), ((3541, 3668), 'nolearn.dbn.DBN', 'DBN', (['[embeddings.shape[1], 500, labelsNum[-1:][0] + 1]'], {'learn_rates': '(0.3)', 'learn_rate_decays': '(0.9)', 'epochs': '(300)', 'verbose': 'verbose'}), '([embeddings.shape[1], 500, labelsNum[-1:][0] + 1], learn_rates=0.3,\n learn_rate_decays=0.9, epochs=300, verbose=verbose)\n', (3544, 3668), False, 'from nolearn.dbn import DBN\n'), ((7585, 7596), 'time.time', 'time.time', ([], {}), '()\n', (7594, 7596), False, 'import time\n')] |
'''
Created on Aug 8, 2014
@author: <EMAIL>
'''
import numpy as np
import logging
class F(object):
"""Product function."""
def __init__(self, combo):
self.combo = combo
def __call__(self, *args):
out = 1
for g, arg in zip(self.combo, args):
if isinstance(g, Basis):
pass # TODO: Wait, what?
out *= g(arg)
return out
def __repr__(self):
return "product of functions: %s" % ' * '.join(['('+str(g)+')' for g in self.combo])
def product(maxOrder, *bases):
"""Generate a Cartesian product basis from the given bases.
If maxOrder : int is given, f.order will be checked for each element in the
basis, to check that the sum of orders for any given combination from the
Cartesian product is <= maxOrder. Otherwise, all combinations are used.
>>> from pbPce.basisFit.polynomials import Basis, Polynomial1D, showBasis, StdBasis
>>> basis = product(3, StdBasis(2), StdBasis(1))
>>> print showBasis(basis, 2)
1
x1
x0
x0*x1
x0**2
x0**2*x1
>>> b1 = Basis([Polynomial1D([1]), Polynomial1D([0, 1])]); b1
Basis of 2 functions:
1D Polynomial: 1x**0
1D Polynomial: 1x**1 + 0x**0
>>> b2 = Basis([Polynomial1D([3]), Polynomial1D([5, 3, 9, 4]), Polynomial1D([5])]); b2
Basis of 3 functions:
1D Polynomial: 3x**0
1D Polynomial: 4x**3 + 9x**2 + 3x**1 + 5x**0
1D Polynomial: 5x**0
>>> basis = product(3, b1, b2)
>>> print showBasis(basis, 2)
3
4*x1**3 + 9*x1**2 + 3*x1 + 5
5
3*x0
5*x0
"""
from itertools import product
combos = []
logging.getLogger(__name__).debug('maxOrder = %d' % maxOrder)
logging.getLogger(__name__).debug('%d combinations:' %
len(list(product(*[list(b) for b in bases]))))
for combo in product(*[list(b) for b in bases]):
from sympy import symbols
xl = symbols(['x%d' % i for i in range(len(bases))])
totOrder = sum([f.order for f in combo])
f = F(combo)
fstr = str(f(*xl)) + " (order %d)" % totOrder
if maxOrder is None or totOrder <= maxOrder:
for g in combo:
assert callable(g)
logging.getLogger(__name__).debug('accepting %s' % fstr)
combos.append(combo)
else:
logging.getLogger(__name__).debug("rejecting %s" % fstr)
return [F(combo) for combo in combos]
class Polynomial1D(object):
"""A 1D polynomial constructed from the standard monomial basis.
>>> p = Polynomial1D((1, 2, 3))
>>> p
1D Polynomial: 3x**2 + 2x**1 + 1x**0
>>> p(5) == 1 + 2*5 + 3*5**2
True
>>> p.order
2
"""
# TODO : basisFit.polynomials.discrete.discretePolynomial needs to be merged into here.
def __init__(self, stdCoeffs):
self.order = len(stdCoeffs) - 1
assert self.order >= 0
self._stdCoeffs = stdCoeffs
self.p = len(stdCoeffs) - 1
self._stdBasis = StdBasis(self.p)
@property
def coeffs(self):
return self._stdCoeffs
@coeffs.setter
def coeffs(self, coeffs):
self._stdCoeffs = coeffs
def __call__(self, arg):
return sum([c*f(arg) for c, f in zip(self._stdCoeffs, self._stdBasis)])
def __repr__(self):
return "%s: %s" % ('1D Polynomial',
' + '.join(['%sx**%d' % (c, self.p - i)
for (i, c) in enumerate(self._stdCoeffs[::-1])]
)
)
def polynomialsFromMoments(X, maxOrd):
"""
<NAME>. & <NAME>. Data-driven uncertainty quantification using the
arbitrary polynomial chaos expansion. Reliab. Eng. Syst. Saf. 106, (2012).
>>> N = 100000; X = np.random.normal(loc=0, scale=1, size=(N,))
>>> polys = polynomialsFromMoments(X, 4)
Compare to a Hermite basis:
>>> from hermite import HndBasis; hpolys = HndBasis(4, 1)
>>> from sympy.abc import x; from sympy import expr
>>> for p in hpolys: print expr.Expr(p(x)).expand()
Expr(1)
Expr(x)
Expr(x**2 - 1)
Expr(x**3 - 3*x)
Expr(x**4 - 6*x**2 + 3)
notebook 20130318, pp 39-40
"""
p00 = 1.0
coeffs = [[p00]]
momentMatrix = np.empty((maxOrd+1, maxOrd+1))
mean = np.mean(X)
N = np.size(X)
def m(d):
return np.sum((X - mean)**d) / (N - 1)
for i in range(maxOrd+1):
for j in range(maxOrd+1):
momentMatrix[i, j] = m(i+j)
for k in range(1, maxOrd+1):
A = np.vstack((momentMatrix[0:k, 0:k+1], np.zeros((1, k+1))))
b = np.zeros((k+1, 1))
A[k, k] = b[k, 0] = 1
u, residuals, rank, singularValues = np.linalg.lstsq(A, b)
coeffs.append(u.ravel().tolist())
return [Polynomial1D(c) for c in coeffs]
def generate(X, maxOrd, method="moments"):
"""Generate 1D polynomials orthonormal WRT sampled real abscissae.
"""
method = method.lower()
if method == "moments":
return polynomialsFromMoments(X, maxOrd)
else:
raise NotImplementedError
class Basis(object):
"""A collection of callable Functions, which may or may not have other nice features.
>>> b = Basis([Polynomial1D((1,)), Polynomial1D((-3, 0, 0, 3))])
>>> b
Basis of 2 functions:
1D Polynomial: 1x**0
1D Polynomial: 3x**3 + 0x**2 + 0x**1 + -3x**0
"""
def __init__(self, basisList, pdf=None):
self._basisList = basisList
self.pdf = pdf
def recursionFormula(self):
raise NotImplementedError
def __getitem__(self, sl):
return self._basisList[sl]
def __len__(self):
return len(self._basisList)
def __setitem__(self, key, value):
# TODO: Actually, why would I ever want this?
self._basisList[key] = value
def __repr__(self):
typeStr = "Basis of %d functions" % len(self._basisList)
return "%s:\n %s" % (typeStr, str('\n '.join(
[repr(f) for f in self._basisList]
)))
def showBasis(basis, arity):
"""A human-readable depiction of a basis."""
# TODO : This should probably be the __str__ for the Basis class.
from sympy import symbols
args = symbols(['x%d' % i for i in range(arity)])
out = []
for f in basis:
assert callable(f)
out.append(str(f(*args)))
return '\n'.join(out)
class Monomial:
def __init__(self, order):
assert order >= 0
self.order = order
def __call__(self, x):
return x ** self.order
def __repr__(self):
return "monomial x^%d" % self.order
class StdBasis(Basis):
def __init__(self, maxOrder):
"""A lightweight (?) basis of standard monomials
(that know their own orders).
>>> basis = StdBasis(3)
>>> from sympy import Symbol; x = Symbol('x')
>>> [f(x) for f in basis]
[1, x, x**2, x**3]
>>> basis[-1].order
3
"""
super(StdBasis, self).__init__([Monomial(i) for i in range(maxOrder + 1)])
def _str__(self):
return showBasis(self, 1)
class ContinuousBasis(Basis):
"""Generate basis functions from data.
Parameters
==========
X : array_like (n, m)
Array of abscissae, sampled from the weighted space in which the
generated polynomials should be orthonormal.
The method used to generate the polynomials will depend on whether the
dtype of the array (or type of the first element) is int-like or
float-like.
maxOrd : int
The maximum total order allowed in the generated polynomials.
Returns
=======
basis : basis object
Demo:
>>> X = np.random.normal(size=(1000,)).reshape((1000,1))
>>> b = ContinuousBasis(X, 4)
"""
def __init__(self, X, maxOrd, assumeIndependent=True):
self.maxOrd = maxOrd
assert len(X.shape) == 2, X.shape
ndims = min(X.shape)
assert ndims == X.shape[1], "The shape of the data must start with ndims (%d)." % ndims
def oneDeeBasis(y): return generate(y, maxOrd, method="moments")
if ndims > 1:
if assumeIndependent:
listOfFunctions = [oneDeeBasis(X[:, i]) for i in range(ndims)]
listOfFunctions = product(maxOrd, *listOfFunctions)
else:
raise NotImplementedError
else:
listOfFunctions = oneDeeBasis(X)
super(ContinuousBasis, self).__init__(listOfFunctions)
def vandermonde(basis, explanMat):
"""Construct a pseudo-Vandermonde matrix in the given basis.
Parameters
----------
basis : list of callble basis functions of arity nvars
explanMat : (ndata, nvars) array_like
The explanatory data. Rows are observance instances; columns are the
multiple variables. If this were unidimensional, there would be 1 column.
Returns
-------
V : numpy.ndarray, shape=(ndata, len(basis))
>>> from pbPce.basisFit.polynomials.hermite import HndBasis
>>> vandermonde(HndBasis(2, 3), np.random.random((32, 3))).shape
(32, 10)
"""
# TODO: Make this into a class version of this, with __getitem__ etc. methods, to make larger problems possible without using all RAM.
if len(explanMat.shape) == 1:
explanMat = explanMat.reshape(explanMat.size, 1)
basisSize = len(basis)
numDatums = explanMat.shape[0]
V = np.empty((numDatums, basisSize))
for i in range(numDatums):
for j, f in enumerate(basis):
V[i, j] = f(*np.array(explanMat[i, :]).ravel())
return V
class BasisFit(object):
"""An expansion in a basis with the given coefficients.
>>> fit = BasisFit([4, 7], [lambda x: 3*x, lambda x: 15*x**2])
>>> fit.coeffs
[4, 7]
>>> fit.coeffs = [6, 13]
>>> fit._coeffs
[6, 13]
>>> fit(11) == 6*(3*11) + 13*(15*11**2)
True
And tested with an actual Basis:
>>> from pbPce.basisFit.polynomials import Basis, Polynomial1D
>>> fit = BasisFit([6, 13], Basis([Polynomial1D((0,3)), Polynomial1D((0,0,15))]))
>>> fit(11) == 6*(3*11) + 13*(15*11**2)
True
"""
def __init__(self, coeffs, basis):
self._coeffs = coeffs
self.basis = basis
def _getCoeffs(self):
return self._coeffs
def _setCoeffs(self, coeffs):
self._coeffs = coeffs
coeffs = property(_getCoeffs, _setCoeffs)
def __call__(self, *args):
return sum([c*f(*args) for (c, f) in zip(self._coeffs, self.basis)])
class BasisFitter(object):
"""Save the work of re-evaluating the Vandermonde matrix when fitting a basis.
Since we're usually interested in cases where the abscissae are fixed (and
therefore so is the appropriate orthonormal basis), while only the ordinates
change (and therefore so do the fitting coefficients), we can evaluate the
basis at the absicssae once and store that.
"""
def __init__(self, X, maxOrd=None, basis=None, fitMethod='lsq'):
"""
Parameters
==========
X : (ndata, nvars) array_like
maxOrd : int, optional
basis : collection of basis functions
fitMethod : str in ['lsq']
"""
# TODO : Abscissae should be passed in and stored here, and basis should be optional (normally generated from the abscissae).
if len(X.shape) == 1:
# Ensure X is 2D, with second index across heterogeneities.
X = X.reshape(X.size, 1)
self.X = X
if basis is None:
if maxOrd is None:
# If basis is not None, then presumably it handles maxOrd itself.
maxOrd = 3
basis = ContinuousBasis(X, maxOrd)
assert isinstance(basis, Basis), type(basis)
self.basis = basis
self._fitMethod = fitMethod
self.V = vandermonde(self.basis, self.X)
self.norms = np.sqrt(np.square(self.V.T).sum(1))
self.Vnorms = self.V / self.norms
def fit(self, y):
# TODO : This needs to go to the fitting module.
if self._fitMethod.lower() == 'lsq':
co, resid, rank, sing = np.linalg.lstsq(self.Vnorms, y)
co = (co.T / self.norms).T # Normalizing is important to keep the inversion here
# well-conditioned if, for example, p and X.max()/X.min()
else:
raise NotImplementedError
return BasisFit(co, self.basis)
def evaluate(self, coeffs):
return np.dot(self.V, coeffs)
| [
"numpy.size",
"numpy.sum",
"numpy.linalg.lstsq",
"numpy.empty",
"numpy.square",
"numpy.zeros",
"numpy.mean",
"numpy.array",
"itertools.product",
"numpy.dot",
"logging.getLogger"
] | [((4312, 4346), 'numpy.empty', 'np.empty', (['(maxOrd + 1, maxOrd + 1)'], {}), '((maxOrd + 1, maxOrd + 1))\n', (4320, 4346), True, 'import numpy as np\n'), ((4355, 4365), 'numpy.mean', 'np.mean', (['X'], {}), '(X)\n', (4362, 4365), True, 'import numpy as np\n'), ((4374, 4384), 'numpy.size', 'np.size', (['X'], {}), '(X)\n', (4381, 4384), True, 'import numpy as np\n'), ((9479, 9511), 'numpy.empty', 'np.empty', (['(numDatums, basisSize)'], {}), '((numDatums, basisSize))\n', (9487, 9511), True, 'import numpy as np\n'), ((4668, 4688), 'numpy.zeros', 'np.zeros', (['(k + 1, 1)'], {}), '((k + 1, 1))\n', (4676, 4688), True, 'import numpy as np\n'), ((4762, 4783), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'b'], {}), '(A, b)\n', (4777, 4783), True, 'import numpy as np\n'), ((12534, 12556), 'numpy.dot', 'np.dot', (['self.V', 'coeffs'], {}), '(self.V, coeffs)\n', (12540, 12556), True, 'import numpy as np\n'), ((1663, 1690), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1680, 1690), False, 'import logging\n'), ((1729, 1756), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1746, 1756), False, 'import logging\n'), ((4415, 4438), 'numpy.sum', 'np.sum', (['((X - mean) ** d)'], {}), '((X - mean) ** d)\n', (4421, 4438), True, 'import numpy as np\n'), ((12197, 12228), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['self.Vnorms', 'y'], {}), '(self.Vnorms, y)\n', (12212, 12228), True, 'import numpy as np\n'), ((4635, 4655), 'numpy.zeros', 'np.zeros', (['(1, k + 1)'], {}), '((1, k + 1))\n', (4643, 4655), True, 'import numpy as np\n'), ((8344, 8377), 'itertools.product', 'product', (['maxOrd', '*listOfFunctions'], {}), '(maxOrd, *listOfFunctions)\n', (8351, 8377), False, 'from itertools import product\n'), ((2269, 2296), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2286, 2296), False, 'import logging\n'), ((2385, 2412), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2402, 2412), False, 'import logging\n'), ((11965, 11984), 'numpy.square', 'np.square', (['self.V.T'], {}), '(self.V.T)\n', (11974, 11984), True, 'import numpy as np\n'), ((9606, 9631), 'numpy.array', 'np.array', (['explanMat[i, :]'], {}), '(explanMat[i, :])\n', (9614, 9631), True, 'import numpy as np\n')] |
import numpy as np
from matplotlib import pyplot
def plotData(X, y, grid=False):
"""
Plots the data points X and y into a new figure. Uses `+` for positive examples, and `o` for
negative examples. `X` is assumed to be a Mx2 matrix
Parameters
----------
X : numpy ndarray
X is assumed to be a Mx2 matrix.
y : numpy ndarray
The data labels.
grid : bool (Optional)
Specify whether or not to show the grid in the plot. It is False by default.
Notes
-----
This was slightly modified such that it expects y=1 or y=0.
"""
# Find Indices of Positive and Negative Examples
pos = y == 1
neg = y == 0
# Plot Examples
pyplot.plot(X[pos, 0], X[pos, 1], 'X', mew=1, ms=10, mec='k')
pyplot.plot(X[neg, 0], X[neg, 1], 'o', mew=1, mfc='y', ms=10, mec='k')
pyplot.grid(grid)
"""
Plots a non-linear decision boundary learned by the SVM and overlays the data on it.
Parameters
----------
X : array_like
(m x 2) The training data with two features (to plot in a 2-D plane).
y : array_like
(m, ) The data labels.
model : dict
Dictionary of model variables learned by SVM.
"""
plotData(X, y)
# make classification predictions over a grid of values
x1plot = np.linspace(min(X[:, 0]), max(X[:, 0]), 100)
x2plot = np.linspace(min(X[:, 1]), max(X[:, 1]), 100)
X1, X2 = np.meshgrid(x1plot, x2plot)
vals = np.zeros(X1.shape)
for i in range(X1.shape[1]):
this_X = np.stack((X1[:, i], X2[:, i]), axis=1)
vals[:, i] = svmPredict(model, this_X)
pyplot.contour(X1, X2, vals, colors='y', linewidths=2)
pyplot.pcolormesh(X1, X2, vals, cmap='YlGnBu', alpha=0.25, edgecolors='None', lw=0)
pyplot.grid(False)
"""
------------------------------------------------------------------------------------------------------------------------------------------------------
PorterStemmer
------------------------------------------------------------------------------------------------------------------------------------------------------
"""
class PorterStemmer:
"""
Porter Stemming Algorithm
This is the Porter stemming algorithm, ported to Python from the
version coded up in ANSI C by the author. It may be be regarded
as canonical, in that it follows the algorithm presented in
Porter, 1980, An algorithm for suffix stripping, Program, Vol. 14,
no. 3, pp 130-137,
only differing from it at the points maked --DEPARTURE-- below.
See also http://www.tartarus.org/~martin/PorterStemmer
The algorithm as described in the paper could be exactly replicated
by adjusting the points of DEPARTURE, but this is barely necessary,
because (a) the points of DEPARTURE are definitely improvements, and
(b) no encoding of the Porter stemmer I have seen is anything like
as exact as this version, even with the points of DEPARTURE!
<NAME> (<EMAIL>)
Release 1: January 2001
Further adjustments by <NAME> (<EMAIL>)
to allow word input not restricted to one word per line, leading
to:
release 2: July 2008
"""
def __init__(self):
"""
The main part of the stemming algorithm starts here.
b is a buffer holding a word to be stemmed. The letters are in b[k0],
b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is
readjusted downwards as the stemming progresses. Zero termination is
not in fact used in the algorithm.
Note that only lower case sequences are stemmed. Forcing to lower case
should be done before stem(...) is called.
"""
self.b = "" # buffer for word to be stemmed
self.k = 0
self.k0 = 0
self.j = 0 # j is a general offset into the string
def cons(self, i):
"""cons(i) is TRUE <=> b[i] is a consonant."""
if self.b[i] in 'aeiou':
return 0
if self.b[i] == 'y':
if i == self.k0:
return 1
else:
return not self.cons(i - 1)
return 1
def m(self):
"""
m() measures the number of consonant sequences between k0 and j.
if c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
....
"""
n = 0
i = self.k0
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
while 1:
while 1:
if i > self.j:
return n
if self.cons(i):
break
i = i + 1
i = i + 1
n = n + 1
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
def vowelinstem(self):
"""vowelinstem() is TRUE <=> k0,...j contains a vowel"""
for i in range(self.k0, self.j + 1):
if not self.cons(i):
return 1
return 0
def doublec(self, j):
""" doublec(j) is TRUE <=> j,(j-1) contain a double consonant. """
if j < (self.k0 + 1):
return 0
if self.b[j] != self.b[j-1]:
return 0
return self.cons(j)
def cvc(self, i):
"""
cvc(i) is TRUE <=> i-2,i-1,i has the form consonant - vowel - consonant
and also if the second c is not w,x or y. this is used when trying to
restore an e at the end of a short e.g.
cav(e), lov(e), hop(e), crim(e), but
snow, box, tray.
"""
if i < (self.k0 + 2) or not self.cons(i) or self.cons(i-1) or not self.cons(i-2):
return 0
ch = self.b[i]
if ch in 'wxy':
return 0
return 1
def ends(self, s):
"""ends(s) is TRUE <=> k0,...k ends with the string s."""
length = len(s)
if s[length - 1] != self.b[self.k]: # tiny speed-up
return 0
if length > (self.k - self.k0 + 1):
return 0
if self.b[self.k-length+1:self.k+1] != s:
return 0
self.j = self.k - length
return 1
def setto(self, s):
"""setto(s) sets (j+1),...k to the characters in the string s, readjusting k."""
length = len(s)
self.b = self.b[:self.j+1] + s + self.b[self.j+length+1:]
self.k = self.j + length
def r(self, s):
"""r(s) is used further down."""
if self.m() > 0:
self.setto(s)
def step1ab(self):
"""step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
ties -> ti
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == 's':
if self.ends("sses"):
self.k = self.k - 2
elif self.ends("ies"):
self.setto("i")
elif self.b[self.k - 1] != 's':
self.k = self.k - 1
if self.ends("eed"):
if self.m() > 0:
self.k = self.k - 1
elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem():
self.k = self.j
if self.ends("at"):
self.setto("ate")
elif self.ends("bl"):
self.setto("ble")
elif self.ends("iz"):
self.setto("ize")
elif self.doublec(self.k):
self.k = self.k - 1
ch = self.b[self.k]
if ch in 'lsz':
self.k += 1
elif self.m() == 1 and self.cvc(self.k):
self.setto("e")
def step1c(self):
"""step1c() turns terminal y to i when there is another vowel in the stem."""
if self.ends("y") and self.vowelinstem():
self.b = self.b[:self.k] + 'i' + self.b[self.k+1:]
def step2(self):
"""step2() maps double suffices to single ones.
so -ization ( = -ize plus -ation) maps to -ize etc. note that the
string before the suffix must give m() > 0.
"""
if self.b[self.k - 1] == 'a':
if self.ends("ational"): self.r("ate")
elif self.ends("tional"): self.r("tion")
elif self.b[self.k - 1] == 'c':
if self.ends("enci"): self.r("ence")
elif self.ends("anci"): self.r("ance")
elif self.b[self.k - 1] == 'e':
if self.ends("izer"): self.r("ize")
elif self.b[self.k - 1] == 'l':
if self.ends("bli"): self.r("ble") # --DEPARTURE--
# To match the published algorithm, replace this phrase with
# if self.ends("abli"): self.r("able")
elif self.ends("alli"): self.r("al")
elif self.ends("entli"): self.r("ent")
elif self.ends("eli"): self.r("e")
elif self.ends("ousli"): self.r("ous")
elif self.b[self.k - 1] == 'o':
if self.ends("ization"): self.r("ize")
elif self.ends("ation"): self.r("ate")
elif self.ends("ator"): self.r("ate")
elif self.b[self.k - 1] == 's':
if self.ends("alism"): self.r("al")
elif self.ends("iveness"): self.r("ive")
elif self.ends("fulness"): self.r("ful")
elif self.ends("ousness"): self.r("ous")
elif self.b[self.k - 1] == 't':
if self.ends("aliti"): self.r("al")
elif self.ends("iviti"): self.r("ive")
elif self.ends("biliti"): self.r("ble")
elif self.b[self.k - 1] == 'g': # --DEPARTURE--
if self.ends("logi"): self.r("log")
# To match the published algorithm, delete this phrase
def step3(self):
"""step3() dels with -ic-, -full, -ness etc. similar strategy to step2."""
if self.b[self.k] == 'e':
if self.ends("icate"): self.r("ic")
elif self.ends("ative"): self.r("")
elif self.ends("alize"): self.r("al")
elif self.b[self.k] == 'i':
if self.ends("iciti"): self.r("ic")
elif self.b[self.k] == 'l':
if self.ends("ical"): self.r("ic")
elif self.ends("ful"): self.r("")
elif self.b[self.k] == 's':
if self.ends("ness"): self.r("")
def step4(self):
"""step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
if self.b[self.k - 1] == 'a':
if self.ends("al"): pass
else: return
elif self.b[self.k - 1] == 'c':
if self.ends("ance"): pass
elif self.ends("ence"): pass
else: return
elif self.b[self.k - 1] == 'e':
if self.ends("er"): pass
else: return
elif self.b[self.k - 1] == 'i':
if self.ends("ic"): pass
else: return
elif self.b[self.k - 1] == 'l':
if self.ends("able"): pass
elif self.ends("ible"): pass
else: return
elif self.b[self.k - 1] == 'n':
if self.ends("ant"): pass
elif self.ends("ement"): pass
elif self.ends("ment"): pass
elif self.ends("ent"): pass
else: return
elif self.b[self.k - 1] == 'o':
if self.ends("ion") and (self.b[self.j] == 's' or self.b[self.j] == 't'): pass
elif self.ends("ou"): pass
# takes care of -ous
else: return
elif self.b[self.k - 1] == 's':
if self.ends("ism"): pass
else: return
elif self.b[self.k - 1] == 't':
if self.ends("ate"): pass
elif self.ends("iti"): pass
else: return
elif self.b[self.k - 1] == 'u':
if self.ends("ous"): pass
else: return
elif self.b[self.k - 1] == 'v':
if self.ends("ive"): pass
else: return
elif self.b[self.k - 1] == 'z':
if self.ends("ize"): pass
else: return
else:
return
if self.m() > 1:
self.k = self.j
def step5(self):
"""step5() removes a final -e if m() > 1, and changes -ll to -l if
m() > 1.
"""
self.j = self.k
if self.b[self.k] == 'e':
a = self.m()
if a > 1 or (a == 1 and not self.cvc(self.k-1)):
self.k = self.k - 1
if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:
self.k = self.k -1
def stem(self, p, i=0, j=None):
"""In stem(p,i,j), p is a char pointer, and the string to be stemmed
is from p[i] to p[j] inclusive. Typically i is zero and j is the
offset to the last character of a string, (p[j+1] == '\0'). The
stemmer adjusts the characters p[i] ... p[j] and returns the new
end-point of the string, k. Stemming never increases word length, so
i <= k <= j. To turn the stemmer into a module, declare 'stem' as
extern, and delete the remainder of this file.
"""
# copy the parameters into statics
self.b = p
self.k = j or len(p) - 1
self.k0 = i
if self.k <= self.k0 + 1:
return self.b # --DEPARTURE--
# With this line, strings of length 1 or 2 don't go through the
# stemming process, although no mention is made of this in the
# published algorithm. Remove the line to match the published
# algorithm.
self.step1ab()
self.step1c()
self.step2()
self.step3()
self.step4()
self.step5()
return self.b[self.k0:self.k+1]
| [
"numpy.stack",
"numpy.meshgrid",
"matplotlib.pyplot.plot",
"numpy.zeros",
"matplotlib.pyplot.contour",
"matplotlib.pyplot.pcolormesh",
"matplotlib.pyplot.grid"
] | [((705, 766), 'matplotlib.pyplot.plot', 'pyplot.plot', (['X[pos, 0]', 'X[pos, 1]', '"""X"""'], {'mew': '(1)', 'ms': '(10)', 'mec': '"""k"""'}), "(X[pos, 0], X[pos, 1], 'X', mew=1, ms=10, mec='k')\n", (716, 766), False, 'from matplotlib import pyplot\n'), ((771, 841), 'matplotlib.pyplot.plot', 'pyplot.plot', (['X[neg, 0]', 'X[neg, 1]', '"""o"""'], {'mew': '(1)', 'mfc': '"""y"""', 'ms': '(10)', 'mec': '"""k"""'}), "(X[neg, 0], X[neg, 1], 'o', mew=1, mfc='y', ms=10, mec='k')\n", (782, 841), False, 'from matplotlib import pyplot\n'), ((846, 863), 'matplotlib.pyplot.grid', 'pyplot.grid', (['grid'], {}), '(grid)\n', (857, 863), False, 'from matplotlib import pyplot\n'), ((1430, 1457), 'numpy.meshgrid', 'np.meshgrid', (['x1plot', 'x2plot'], {}), '(x1plot, x2plot)\n', (1441, 1457), True, 'import numpy as np\n'), ((1470, 1488), 'numpy.zeros', 'np.zeros', (['X1.shape'], {}), '(X1.shape)\n', (1478, 1488), True, 'import numpy as np\n'), ((1630, 1684), 'matplotlib.pyplot.contour', 'pyplot.contour', (['X1', 'X2', 'vals'], {'colors': '"""y"""', 'linewidths': '(2)'}), "(X1, X2, vals, colors='y', linewidths=2)\n", (1644, 1684), False, 'from matplotlib import pyplot\n'), ((1689, 1777), 'matplotlib.pyplot.pcolormesh', 'pyplot.pcolormesh', (['X1', 'X2', 'vals'], {'cmap': '"""YlGnBu"""', 'alpha': '(0.25)', 'edgecolors': '"""None"""', 'lw': '(0)'}), "(X1, X2, vals, cmap='YlGnBu', alpha=0.25, edgecolors=\n 'None', lw=0)\n", (1706, 1777), False, 'from matplotlib import pyplot\n'), ((1777, 1795), 'matplotlib.pyplot.grid', 'pyplot.grid', (['(False)'], {}), '(False)\n', (1788, 1795), False, 'from matplotlib import pyplot\n'), ((1539, 1577), 'numpy.stack', 'np.stack', (['(X1[:, i], X2[:, i])'], {'axis': '(1)'}), '((X1[:, i], X2[:, i]), axis=1)\n', (1547, 1577), True, 'import numpy as np\n')] |
import numpy as np
import argparse, sys, os, time
import progressbar
import tensorflow as tf
if int(tf.__version__.split('.')[1]) >= 14:
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from keras.metrics import categorical_accuracy
import keras.backend as K
from datasets import UT, SBU, NTU
from datasets.data_generator import DataGeneratorSeq
from misc.utils import read_config
from models.temporal_rn import get_model, get_fusion_model
def run_predict(model, verbose, val_generator):
if verbose > 0:
print("Starting predicting...")
Y_pred = []
Y_val = []
if verbose > 0: progbar = progressbar.ProgressBar(max_value=len(val_generator))
for batch_idx in range(len(val_generator)):
if verbose > 0: progbar.update(batch_idx)
batch_x, batch_y = val_generator[batch_idx]
Y_pred += list(model.predict_on_batch(batch_x))
Y_val += batch_y.tolist()
if verbose > 0: progbar.finish()
y_true = np.array(Y_val)
y_pred = np.array(Y_pred)
n_hits = np.sum(y_true.argmax(axis=-1) == y_pred.argmax(axis=-1))
acc = n_hits/y_true.shape[0]
if verbose > 0:
print("Validation acc: {:.2%}".format(acc))
# Convert back from to_categorical
Y_pred = np.argmax(Y_pred, axis=1).tolist()
Y_val = np.argmax(Y_val, axis=1).tolist()
return Y_pred, Y_val
#%% Functions
def predict_temp_rn(weights_path, dataset_name, model_kwargs, data_kwargs,
dataset_fold=None, batch_size=32, verbose=2):
if verbose > 0:
print("***** Predicting parameters *****")
print("\t weights_path:", weights_path)
print("\t Dataset:", dataset_name)
print("\t Dataset fold:", dataset_fold)
print("\t Skeleton info")
for key, value in data_kwargs.items():
print("\t > {}: {}".format(key, value))
print("\t Model info")
for key, value in model_kwargs.items():
print("\t > {}: {}".format(key, value))
print("\t Predicting options")
print("\t > Batch Size:", batch_size)
if verbose > 0:
print("Initializing Data Generator...")
val_generator = DataGeneratorSeq(dataset_name, dataset_fold, 'validation',
batch_size=batch_size, reshuffle=False, shuffle_indiv_order=False,
pad_sequences=True, **data_kwargs)
X_val, Y_val = val_generator[0]
timesteps = data_kwargs['timesteps']
add_joint_idx = data_kwargs['add_joint_idx']
add_body_part = data_kwargs['add_body_part']
_, seq_len, num_joints, *object_shape = np.array(X_val).shape
num_joints = num_joints//2
object_shape = tuple(object_shape)
output_size = len(Y_val[0])
overhead = add_joint_idx + add_body_part # True/False = 1/0
num_dim = (object_shape[0]-overhead)//timesteps
if verbose > 0:
print("Creating model...")
model = get_model(num_objs=num_joints, object_shape=object_shape,
output_size=output_size, num_dim=num_dim, overhead=overhead,
seq_len=seq_len, **model_kwargs)
if verbose > 0:
print("Loading weights...")
model.load_weights(weights_path)
Y_pred, Y_val = run_predict(model, verbose, val_generator)
return Y_pred, Y_val
def predict_fused_temp_rn(fusion_weights_path, dataset_name, dataset_fold,
config_filepaths, freeze_g_theta=False, fuse_at_fc1=False,
batch_size=32, verbose=2, gpus=1):
if verbose > 0:
print("***** Predicting parameters *****")
print("\t fusion_weights_path:", fusion_weights_path)
print("\t Dataset:", dataset_name)
print("\t Dataset fold:", dataset_fold)
print("\t Fusion info")
print("\t > config_filepaths:", config_filepaths)
print("\t > freeze_g_theta:", freeze_g_theta)
print("\t > fuse_at_fc1:", fuse_at_fc1)
print("\t Predicting options")
print("\t > Batch Size:", batch_size)
####
data_kwargs, _, _ = read_config(config_filepaths[0])
if verbose > 0:
print("Initializing Data Generator...")
val_generator = DataGeneratorSeq(dataset_name, dataset_fold, 'validation',
batch_size=batch_size, reshuffle=False, shuffle_indiv_order=False,
pad_sequences=True, **data_kwargs)
X_val, Y_val = val_generator[0]
_, seq_len, num_joints, *object_shape = np.array(X_val).shape
num_joints = num_joints//2
object_shape = tuple(object_shape)
output_size = len(Y_val[0])
models_kwargs = []
for config_filepath in config_filepaths:
data_kwargs, model_kwargs, train_kwargs = read_config(config_filepath)
timesteps = data_kwargs['timesteps']
add_joint_idx = data_kwargs['add_joint_idx']
add_body_part = data_kwargs['add_body_part']
overhead = add_joint_idx + add_body_part # True/False = 1/0
num_dim = (object_shape[0]-overhead)//timesteps
model_kwargs['num_dim'] = num_dim
model_kwargs['overhead'] = overhead
models_kwargs.append(model_kwargs)
train_kwargs['drop_rate'] = 0
weights_filepaths = [ [] for _ in config_filepaths ]
if verbose > 0:
print("Creating model...")
model = get_fusion_model(num_joints, object_shape, output_size, seq_len,
train_kwargs, models_kwargs, weights_filepaths,
freeze_g_theta=freeze_g_theta, fuse_at_fc1=fuse_at_fc1)
if verbose > 0:
print("Loading weights...")
model.load_weights(fusion_weights_path)
if verbose > 0:
print("Starting predicting...")
Y_pred = []
Y_val = []
if verbose > 0: progbar = progressbar.ProgressBar(max_value=len(val_generator))
for batch_idx in range(len(val_generator)):
if verbose > 0: progbar.update(batch_idx)
batch_x, batch_y = val_generator[batch_idx]
Y_pred += list(model.predict_on_batch(batch_x))
Y_val += batch_y.tolist()
if verbose > 0: progbar.finish()
y_true = np.array(Y_val)
y_pred = np.array(Y_pred)
n_hits = np.sum(y_true.argmax(axis=-1) == y_pred.argmax(axis=-1))
acc = n_hits/y_true.shape[0]
if verbose > 0:
print("Validation acc: {:.2%}".format(acc))
# Convert back from to_categorical
Y_pred = np.argmax(Y_pred, axis=1).tolist()
Y_val = np.argmax(Y_val, axis=1).tolist()
return Y_pred, Y_val
#%% Main
if __name__ == '__main__':
args = vars(load_args())
print('> Starting Predict RN - ', time.asctime( time.localtime(time.time()) ))
print_args = args.pop('print_args')
if print_args:
print("Program arguments and values:")
for argument, value in args.items():
print('\t', argument, ":", value)
predict_fused_rn(**args)
print('\n> Finished Predict RN -', time.asctime( time.localtime(time.time()) ))
| [
"misc.utils.read_config",
"models.temporal_rn.get_fusion_model",
"numpy.argmax",
"tensorflow.__version__.split",
"models.temporal_rn.get_model",
"datasets.data_generator.DataGeneratorSeq",
"time.time",
"numpy.array",
"tensorflow.compat.v1.logging.set_verbosity"
] | [((142, 204), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (176, 204), True, 'import tensorflow as tf\n'), ((989, 1004), 'numpy.array', 'np.array', (['Y_val'], {}), '(Y_val)\n', (997, 1004), True, 'import numpy as np\n'), ((1018, 1034), 'numpy.array', 'np.array', (['Y_pred'], {}), '(Y_pred)\n', (1026, 1034), True, 'import numpy as np\n'), ((2184, 2354), 'datasets.data_generator.DataGeneratorSeq', 'DataGeneratorSeq', (['dataset_name', 'dataset_fold', '"""validation"""'], {'batch_size': 'batch_size', 'reshuffle': '(False)', 'shuffle_indiv_order': '(False)', 'pad_sequences': '(True)'}), "(dataset_name, dataset_fold, 'validation', batch_size=\n batch_size, reshuffle=False, shuffle_indiv_order=False, pad_sequences=\n True, **data_kwargs)\n", (2200, 2354), False, 'from datasets.data_generator import DataGeneratorSeq\n'), ((2910, 3071), 'models.temporal_rn.get_model', 'get_model', ([], {'num_objs': 'num_joints', 'object_shape': 'object_shape', 'output_size': 'output_size', 'num_dim': 'num_dim', 'overhead': 'overhead', 'seq_len': 'seq_len'}), '(num_objs=num_joints, object_shape=object_shape, output_size=\n output_size, num_dim=num_dim, overhead=overhead, seq_len=seq_len, **\n model_kwargs)\n', (2919, 3071), False, 'from models.temporal_rn import get_model, get_fusion_model\n'), ((4014, 4046), 'misc.utils.read_config', 'read_config', (['config_filepaths[0]'], {}), '(config_filepaths[0])\n', (4025, 4046), False, 'from misc.utils import read_config\n'), ((4140, 4310), 'datasets.data_generator.DataGeneratorSeq', 'DataGeneratorSeq', (['dataset_name', 'dataset_fold', '"""validation"""'], {'batch_size': 'batch_size', 'reshuffle': '(False)', 'shuffle_indiv_order': '(False)', 'pad_sequences': '(True)'}), "(dataset_name, dataset_fold, 'validation', batch_size=\n batch_size, reshuffle=False, shuffle_indiv_order=False, pad_sequences=\n True, **data_kwargs)\n", (4156, 4310), False, 'from datasets.data_generator import DataGeneratorSeq\n'), ((5258, 5435), 'models.temporal_rn.get_fusion_model', 'get_fusion_model', (['num_joints', 'object_shape', 'output_size', 'seq_len', 'train_kwargs', 'models_kwargs', 'weights_filepaths'], {'freeze_g_theta': 'freeze_g_theta', 'fuse_at_fc1': 'fuse_at_fc1'}), '(num_joints, object_shape, output_size, seq_len,\n train_kwargs, models_kwargs, weights_filepaths, freeze_g_theta=\n freeze_g_theta, fuse_at_fc1=fuse_at_fc1)\n', (5274, 5435), False, 'from models.temporal_rn import get_model, get_fusion_model\n'), ((6026, 6041), 'numpy.array', 'np.array', (['Y_val'], {}), '(Y_val)\n', (6034, 6041), True, 'import numpy as np\n'), ((6055, 6071), 'numpy.array', 'np.array', (['Y_pred'], {}), '(Y_pred)\n', (6063, 6071), True, 'import numpy as np\n'), ((2598, 2613), 'numpy.array', 'np.array', (['X_val'], {}), '(X_val)\n', (2606, 2613), True, 'import numpy as np\n'), ((4410, 4425), 'numpy.array', 'np.array', (['X_val'], {}), '(X_val)\n', (4418, 4425), True, 'import numpy as np\n'), ((4657, 4685), 'misc.utils.read_config', 'read_config', (['config_filepath'], {}), '(config_filepath)\n', (4668, 4685), False, 'from misc.utils import read_config\n'), ((101, 126), 'tensorflow.__version__.split', 'tf.__version__.split', (['"""."""'], {}), "('.')\n", (121, 126), True, 'import tensorflow as tf\n'), ((1277, 1302), 'numpy.argmax', 'np.argmax', (['Y_pred'], {'axis': '(1)'}), '(Y_pred, axis=1)\n', (1286, 1302), True, 'import numpy as np\n'), ((1324, 1348), 'numpy.argmax', 'np.argmax', (['Y_val'], {'axis': '(1)'}), '(Y_val, axis=1)\n', (1333, 1348), True, 'import numpy as np\n'), ((6314, 6339), 'numpy.argmax', 'np.argmax', (['Y_pred'], {'axis': '(1)'}), '(Y_pred, axis=1)\n', (6323, 6339), True, 'import numpy as np\n'), ((6361, 6385), 'numpy.argmax', 'np.argmax', (['Y_val'], {'axis': '(1)'}), '(Y_val, axis=1)\n', (6370, 6385), True, 'import numpy as np\n'), ((6560, 6571), 'time.time', 'time.time', ([], {}), '()\n', (6569, 6571), False, 'import argparse, sys, os, time\n'), ((6877, 6888), 'time.time', 'time.time', ([], {}), '()\n', (6886, 6888), False, 'import argparse, sys, os, time\n')] |
import matplotlib.pyplot as plt
from matplotlib import patches
import numpy as np
import copy
from mpmath import mpf
from .circle_intersection import Geometry
from .errors import PrecisionError
from .utils import (
mobius,
upper_to_disc,
complex_to_vector,
get_arc
)
class HyperbolicPlane:
def __init__(self):
"""
This class stores geodesics on the hyperbolic plane
"""
self.geodesics = []
self.points = []
def tesselate(
self,
fundamental_domain,
mobius_transformations,
num_iter=2
):
domains = [fundamental_domain]
iteration_number = 0
while domains:
domain = domains.pop()
for transformation in mobius_transformations:
copied_dom = copy.deepcopy(domain)
copied_dom.mobius(transformation)
self.geodesics.extend(copied_dom.boundary)
if iteration_number < num_iter:
domains.append(copied_dom)
iteration_number += 1
def plot_upper_half(self, fig_num=1):
fig = plt.figure(fig_num)
ax = fig.add_subplot(1, 3, 1)
for geodesic in self.geodesics:
geodesic.plot_upper_half(ax)
x_coords = [float(x.real) for x in self.points]
y_coords = [float(x.imag) for x in self.points]
ax.scatter(x_coords, y_coords)
plt.axis([-2, 2, 0, 8])
def plot_disc(self, fig_num = 1):
fig = plt.figure(fig_num)
ax = fig.add_subplot(1, 3, 2)
circ = plt.Circle((0, 0), 1, color='black', fill=False)
ax.add_patch(circ)
for geodesic in self.geodesics:
geodesic.plot_disc(ax)
x_coords = [float(x.real) for x in self.points]
y_coords = [float(x.imag) for x in self.points]
ax.scatter(x_coords, y_coords)
plt.axis([-2, 2, -2, 2])
class Geodesic:
"""Semi circles on upper half plane
perpendicular circles of disc
"""
def __init__(self, start, end, color='blue'):
self.roots = (start, end)
self.color = color
if end > start:
self.orient = 1
else:
self.orient = -1
if end == float('inf') or start == float('inf'):
self.orient = None
def mobius(self, matrix):
self.roots = tuple(map(lambda x: mobius(matrix, x), self.roots))
def get_center(self):
return sum(self.roots) / 2 + 0j
def get_radius(self):
return abs((self.roots[1] - self.roots[0])) / 2
def get_roots_disc(self):
return list(map(lambda x: upper_to_disc(x), self.roots))
def get_center_disc(self):
boundary_point1, boundary_point2 = list(
map(lambda x: np.array([x.real, x.imag]), self.get_roots_disc())
)
perp_vec1, perp_vec2 = list(
map(lambda x: np.array([-x.imag, x.real]), self.get_roots_disc())
)
if abs(np.dot(perp_vec1, perp_vec2)) == 1:
return float("inf")
intersection = np.dot(
boundary_point1 - boundary_point2,
boundary_point1
) / np.dot(boundary_point1, perp_vec2)
center = np.dot(intersection, perp_vec2) + boundary_point2
return center[0] + center[1] * 1j
def plot_disc(self, ax):
roots = [complex_to_vector(x) for x in self.get_roots_disc()]
center = complex(self.get_center_disc())
if center.real == float("inf"):
x_coords = [float(x[0]) for x in roots]
y_coords = [float(x[1]) for x in roots]
ax.plot(x_coords, y_coords)
else:
roots = [complex(x) for x in self.get_roots_disc()]
theta1, theta2 = get_arc(center, roots)
root1 = roots[0]
radius = float(np.linalg.norm([
center.real - root1.real, center.imag - root1.imag
]))
arc = patches.Arc(
(center.real, center.imag),
2 * radius, 2 * radius,
0.0,
float(theta1),
float(theta2),
color=self.color
)
ax.add_patch(arc)
def plot_upper_half(self, ax):
center = complex(self.get_center())
radius = float(self.get_radius())
if radius == float('inf'):
non_inf_root = [float(x) for x in self.roots if float(x) < float('inf')]
ax.axvline(x=non_inf_root[0], color=self.color)
else:
circ = plt.Circle((center.real, center.imag), radius, color=self.color, fill=False)
ax.add_patch(circ)
class FiniteGeodesic(Geodesic):
def __init__(self, start, end, color='r'):
"""
all finite geodesics will lie on a semi circle
we are not interested in the finite geodesics that lie
on an infinite line since such geodesics can only arise from
non reduced words
"""
# Find roots of circle (center - radius), (center + radius) with center on real
# line and passes through start, end
center = (abs(end) ** 2 - abs(start) ** 2)\
/ (2 * (end.real - start.real))
radius = abs(start - center)
if start.real < end.real:
Geodesic.__init__(self, center - radius, center + radius, color)
else:
Geodesic.__init__(self, center + radius, center - radius, color)
self.bounds = (start, end)
self.theta1 = None
self.theta2 = None
def __str__(self):
return f"roots: {str(self.roots)}, bounds: {str(self.bounds)}"
def mobius(self, a):
self.roots = tuple(map(lambda x: mobius(a, x), self.roots))
self.bounds = tuple(map(lambda x: mobius(a, x), self.bounds))
self.set_arc()
def set_arc(self):
center = self.get_center()
if self.bounds[0].real < self.bounds[1].real:
self.orient = 1
else:
self.orient = -1
self.theta = get_arc(center, self.bounds)
def plot_upper_half(self, ax):
self.set_arc()
center = complex(self.get_center())
radius = float(self.get_radius())
theta1, theta2 = self.theta
arc = patches.Arc(
(center.real, center.imag),
2 * radius, 2 * radius,
0.0,
float(theta1),
float(theta2),
color=self.color
)
ax.add_patch(arc)
def plot_disc(self, ax):
center = complex(self.get_center_disc())
points = [complex(upper_to_disc(x)) for x in self.bounds]
radius = abs(center - points[0])
theta1, theta2 = get_arc(center, points)
arc = patches.Arc(
(center.real, center.imag),
2 * radius,
2 * radius,
0.0,
float(theta1),
float(theta2),
color=self.color
)
ax.add_patch(arc)
class Segment(FiniteGeodesic):
def __init__(self, path, fundamental_domain, partial_word):
"""
segment is a finite geodesic in the fundamental domain
pWord maps the segment back to a segment of the original geodesic
"""
self.partial_word = partial_word
self.absolute_max = None
# bounds are intersection
bounds = []
for boundary_geodesic in fundamental_domain.boundary:
path_center = path.get_center().real
path_radius = path.get_radius()
if boundary_geodesic.roots[0] == mpf("inf"):
boundary_x = boundary_geodesic.roots[1]
# checks for intersection
y_squared = path_radius ** 2 - (boundary_x - path_center) ** 2
if y_squared > 0:
y_coord = y_squared ** 0.5
intersection = boundary_x + y_coord * 1j
bounds.append(intersection)
elif boundary_geodesic.roots[1] == mpf("inf"):
boundary_x = boundary_geodesic.roots[0]
# checks for intersection
y_squared = path_radius ** 2 - (boundary_x - path_center) ** 2
if y_squared > 0:
y_coord = y_squared ** 0.5
intersection = boundary_x + y_coord * 1j
bounds.append(intersection)
else:
boundary_geodesic_radius = boundary_geodesic.get_radius()
boundary_geodesic_center = boundary_geodesic.get_center().real
boundary_geodesic_circle = (boundary_geodesic_center, 0, boundary_geodesic_radius)
path_circle = (path_center, 0, path_radius)
intersection = Geometry().circle_intersection(
boundary_geodesic_circle,
path_circle
)
if intersection is not None:
intersection = intersection[0] + 1j * intersection[1]
bounds.append(intersection)
# catch precision error
if len(bounds) != 2:
print('error finding segment')
raise PrecisionError()
else:
FiniteGeodesic.__init__(self, bounds[0], bounds[1])
center = self.get_center()
radius = self.get_radius()
# max at center
if center.real <= self.bounds[1].real and \
center.real >= self.bounds[0].real:
self.absolute_max = center.real + radius * 1j
# max at left boundary
elif self.bounds[0].imag > self.bounds[1].imag:
self.absolute_max = self.bounds[0].real + \
self.bounds[0].imag * 1j
# max at right most boundary
else:
self.absolute_max = self.bounds[1].real + \
self.bounds[1].imag * 1j
def lift(self, point):
"""lifts point on segment to universal cover"""
return self.partial_word.transformation(point)
def inverse_lift(self, point):
"""maps point on universal cover to fundamental domain"""
return self.partial_word.inverse_transformation(point)
class Domain:
def __init__(self, boundary):
"""
takes a list of geodesics that define the fundamental domain
in the upper half plane model
"""
self.boundary = boundary
def mobius(self, matrix):
for geodesic in self.boundary:
geodesic.mobius(matrix)
if __name__ == '__main__':
roots = [(mpf("inf"), -1.0), (-1.0, 0.0), (0.0, 1.0), (mpf("inf"), 1.0)]
bounds = list(map(lambda x: Geodesic(x[0], x[1]), roots))
fundamental_domain = Domain(bounds)
hyperbolic_plane = HyperbolicPlane()
# A, B, A inverse, B inverse
mobius_transformations = [
np.array([[1, 1], [1, 2]]),
np.array([[1, -1], [-1, 2]]),
np.array([[2, -1], [-1, 1]]),
np.array([[2, 1], [1, 1]])
]
hyperbolic_plane.tesselate(fundamental_domain, mobius_transformations)
hyperbolic_plane.plot_upper_half()
hyperbolic_plane.plot_disc()
plt.show()
| [
"copy.deepcopy",
"matplotlib.pyplot.show",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure",
"mpmath.mpf",
"numpy.array",
"matplotlib.pyplot.Circle",
"numpy.linalg.norm",
"numpy.dot"
] | [((11193, 11203), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11201, 11203), True, 'import matplotlib.pyplot as plt\n'), ((1124, 1143), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {}), '(fig_num)\n', (1134, 1143), True, 'import matplotlib.pyplot as plt\n'), ((1425, 1448), 'matplotlib.pyplot.axis', 'plt.axis', (['[-2, 2, 0, 8]'], {}), '([-2, 2, 0, 8])\n', (1433, 1448), True, 'import matplotlib.pyplot as plt\n'), ((1502, 1521), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {}), '(fig_num)\n', (1512, 1521), True, 'import matplotlib.pyplot as plt\n'), ((1575, 1623), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(0, 0)', '(1)'], {'color': '"""black"""', 'fill': '(False)'}), "((0, 0), 1, color='black', fill=False)\n", (1585, 1623), True, 'import matplotlib.pyplot as plt\n'), ((1888, 1912), 'matplotlib.pyplot.axis', 'plt.axis', (['[-2, 2, -2, 2]'], {}), '([-2, 2, -2, 2])\n', (1896, 1912), True, 'import matplotlib.pyplot as plt\n'), ((10893, 10919), 'numpy.array', 'np.array', (['[[1, 1], [1, 2]]'], {}), '([[1, 1], [1, 2]])\n', (10901, 10919), True, 'import numpy as np\n'), ((10930, 10958), 'numpy.array', 'np.array', (['[[1, -1], [-1, 2]]'], {}), '([[1, -1], [-1, 2]])\n', (10938, 10958), True, 'import numpy as np\n'), ((10969, 10997), 'numpy.array', 'np.array', (['[[2, -1], [-1, 1]]'], {}), '([[2, -1], [-1, 1]])\n', (10977, 10997), True, 'import numpy as np\n'), ((11008, 11034), 'numpy.array', 'np.array', (['[[2, 1], [1, 1]]'], {}), '([[2, 1], [1, 1]])\n', (11016, 11034), True, 'import numpy as np\n'), ((3088, 3146), 'numpy.dot', 'np.dot', (['(boundary_point1 - boundary_point2)', 'boundary_point1'], {}), '(boundary_point1 - boundary_point2, boundary_point1)\n', (3094, 3146), True, 'import numpy as np\n'), ((3183, 3217), 'numpy.dot', 'np.dot', (['boundary_point1', 'perp_vec2'], {}), '(boundary_point1, perp_vec2)\n', (3189, 3217), True, 'import numpy as np\n'), ((3236, 3267), 'numpy.dot', 'np.dot', (['intersection', 'perp_vec2'], {}), '(intersection, perp_vec2)\n', (3242, 3267), True, 'import numpy as np\n'), ((4579, 4655), 'matplotlib.pyplot.Circle', 'plt.Circle', (['(center.real, center.imag)', 'radius'], {'color': 'self.color', 'fill': '(False)'}), '((center.real, center.imag), radius, color=self.color, fill=False)\n', (4589, 4655), True, 'import matplotlib.pyplot as plt\n'), ((10613, 10623), 'mpmath.mpf', 'mpf', (['"""inf"""'], {}), "('inf')\n", (10616, 10623), False, 'from mpmath import mpf\n'), ((10659, 10669), 'mpmath.mpf', 'mpf', (['"""inf"""'], {}), "('inf')\n", (10662, 10669), False, 'from mpmath import mpf\n'), ((805, 826), 'copy.deepcopy', 'copy.deepcopy', (['domain'], {}), '(domain)\n', (818, 826), False, 'import copy\n'), ((2996, 3024), 'numpy.dot', 'np.dot', (['perp_vec1', 'perp_vec2'], {}), '(perp_vec1, perp_vec2)\n', (3002, 3024), True, 'import numpy as np\n'), ((3857, 3925), 'numpy.linalg.norm', 'np.linalg.norm', (['[center.real - root1.real, center.imag - root1.imag]'], {}), '([center.real - root1.real, center.imag - root1.imag])\n', (3871, 3925), True, 'import numpy as np\n'), ((7606, 7616), 'mpmath.mpf', 'mpf', (['"""inf"""'], {}), "('inf')\n", (7609, 7616), False, 'from mpmath import mpf\n'), ((2777, 2803), 'numpy.array', 'np.array', (['[x.real, x.imag]'], {}), '([x.real, x.imag])\n', (2785, 2803), True, 'import numpy as np\n'), ((2910, 2937), 'numpy.array', 'np.array', (['[-x.imag, x.real]'], {}), '([-x.imag, x.real])\n', (2918, 2937), True, 'import numpy as np\n'), ((8035, 8045), 'mpmath.mpf', 'mpf', (['"""inf"""'], {}), "('inf')\n", (8038, 8045), False, 'from mpmath import mpf\n')] |
import torch.utils.data as data
import numpy as np
import glob
import h5py
import pickle as pkl
import random
import pdb
import matplotlib.pyplot as plt
from torchvision.transforms import Resize
import imp
from torch.utils.data import DataLoader
import os
from classifier_control.classifier.utils.general_utils import AttrDict, map_dict
from classifier_control.classifier.utils.general_utils import resize_video
class BaseVideoDataset(data.Dataset):
def __init__(self, data_dir, mpar, data_conf, phase, shuffle=True):
"""
:param data_dir:
:param mpar:
:param data_conf:
:param phase:
:param shuffle: whether to shuffle within batch, set to False for computing metrics
:param dataset_size:
"""
self.phase = phase
self.data_dir = data_dir
self.data_conf = data_conf
self.shuffle = shuffle and phase == 'train'
self.img_sz = mpar.img_sz
if shuffle:
self.n_worker = 8
else:
self.n_worker = 1
def get_data_loader(self, batch_size):
print('len {} dataset {}'.format(self.phase, len(self)))
return DataLoader(self, batch_size=batch_size, shuffle=self.shuffle, num_workers=self.n_worker,
drop_last=True)
class FixLenVideoDataset(BaseVideoDataset):
"""
Variable length video dataset
"""
def __init__(self, data_dir, mpar, data_conf, phase='train', shuffle=True):
"""
:param data_dir:
:param data_conf:
:param data_conf: Attrdict with keys
:param phase:
:param shuffle: whether to shuffle within batch, set to False for computing metrics
:param dataset_size:
"""
super().__init__(data_dir, mpar, data_conf, phase, shuffle)
self.filenames = self._maybe_post_split(self._get_filenames())
random.seed(1)
random.shuffle(self.filenames)
self._data_conf = data_conf
self.traj_per_file = self.get_traj_per_file(self.filenames[0])
if hasattr(data_conf, 'T'):
self.T = data_conf.T
else: self.T = self.get_total_seqlen(self.filenames[0])
self.transform = Resize([data_conf.img_sz[0], data_conf.img_sz[1]])
self.flatten_im = False
self.filter_repeated_tail = False
print(phase)
print(len(self.filenames))
def _get_filenames(self):
assert 'hdf5' not in self.data_dir, "hdf5 most not be containted in the data dir!"
filenames = sorted(glob.glob(os.path.join(self.data_dir, os.path.join('hdf5', self.phase) + '/*')))
if not filenames:
raise RuntimeError('No filenames found in {}'.format(self.data_dir))
return filenames
def get_traj_per_file(self, path):
with h5py.File(path, 'r') as F:
return F['traj_per_file'].value
def get_total_seqlen(self, path):
with h5py.File(path, 'r') as F:
return F['traj0']['images'].value.shape[0]
def _get_num_from_str(self, s):
return int(''.join(filter(str.isdigit, s)))
def get_extra_obs(self, traj_ind):
main_dir = self.data_dir
raw_dir = os.path.join(main_dir, 'raw')
group_dir = os.path.join(raw_dir, f'traj_group{traj_ind//1000}')
obs_path = os.path.join(os.path.join(group_dir, f'traj{traj_ind}'), 'obs_dict.pkl')
with open(obs_path, 'rb') as f:
obs_data = pkl.load(f)
return obs_data
def __getitem__(self, index):
file_index = index // self.traj_per_file
path = self.filenames[file_index]
start_ind_str, _ = path.split('/')[-1][:-3].split('to')
start_ind = self._get_num_from_str(start_ind_str)
with h5py.File(path, 'r') as F:
ex_index = index % self.traj_per_file # get the index
key = 'traj{}'.format(ex_index)
traj_ind = start_ind + ex_index
data_dict = AttrDict(images=(F[key + '/images'].value))
# Fetch data into a dict
for name in F[key].keys():
if name in ['states', 'actions', 'pad_mask']:
data_dict[name] = F[key + '/' + name].value.astype(np.float32)
data_dict = self.process_data_dict(data_dict)
if self._data_conf.sel_len != -1:
data_dict = self.sample_rand_shifts(data_dict)
data_dict['index'] = index
return data_dict
def process_data_dict(self, data_dict):
data_dict.demo_seq_images = self.preprocess_images(data_dict['images'])
return data_dict
def sample_rand_shifts(self, data_dict):
""" This function processes data tensors so as to have length equal to max_seq_len
by sampling / padding if necessary """
offset = np.random.randint(0, self.T - self._data_conf.sel_len, 1)
data_dict = map_dict(lambda tensor: self._croplen(tensor, offset, self._data_conf.sel_len), data_dict)
if 'actions' in data_dict:
data_dict.actions = data_dict.actions[:-1]
return data_dict
def preprocess_images(self, images):
# Resize video
if len(images.shape) == 5:
images = images[:, 0] # Number of cameras, used in RL environments
assert images.dtype == np.uint8, 'image need to be uint8!'
images = resize_video(images, (self.img_sz[0], self.img_sz[1]))
images = np.transpose(images, [0, 3, 1, 2]) # convert to channel-first
images = images.astype(np.float32) / 255 * 2 - 1
assert images.dtype == np.float32, 'image need to be float32!'
if self.flatten_im:
images = np.reshape(images, [images.shape[0], -1])
return images
def _maybe_post_split(self, filenames):
"""Splits dataset percentage-wise if respective field defined."""
try:
return self._split_with_percentage(self.data_conf.train_val_split['post_split'], filenames)
except (KeyError, AttributeError):
return filenames
def _split_with_percentage(self, frac, filenames):
assert sum(frac.values()) <= 1.0 # fractions cannot sum up to more than 1
assert self.phase in frac
if self.phase == 'train':
start, end = 0, frac['train']
elif self.phase == 'val':
start, end = frac['train'], frac['train'] + frac['val']
else:
start, end = frac['train'] + frac['val'], frac['train'] + frac['val'] + frac['test']
start, end = int(len(filenames) * start), int(len(filenames) * end)
return filenames[start:end]
@staticmethod
def _repeat_tail(data_dict, end_ind):
data_dict.images[end_ind:] = data_dict.images[end_ind][None]
if 'states' in data_dict:
data_dict.states[end_ind:] = data_dict.states[end_ind][None]
data_dict.pad_mask = np.ones_like(data_dict.pad_mask)
end_ind = data_dict.pad_mask.shape[0] - 1
return data_dict, end_ind
def __len__(self):
return len(self.filenames) * self.traj_per_file
@staticmethod
def _croplen(val, offset, target_length):
"""Pads / crops sequence to desired length."""
val = val[int(offset):]
len = val.shape[0]
if len > target_length:
return val[:target_length]
elif len < target_length:
raise ValueError("not enough length")
else:
return val
@staticmethod
def get_dataset_spec(data_dir):
return imp.load_source('dataset_spec', os.path.join(data_dir, 'dataset_spec.py')).dataset_spec
if __name__ == '__main__':
data_dir = os.environ['VMPC_DATA'] + '/classifier_control/data_collection/sim/1_obj_cartgripper_xz_rejsamp'
hp = AttrDict(img_sz=(48, 64),
sel_len=-1,
T=31)
loader = FixLenVideoDataset(data_dir, hp).get_data_loader(32)
for i_batch, sample_batched in enumerate(loader):
images = np.asarray(sample_batched['demo_seq_images'])
pdb.set_trace()
images = np.transpose((images + 1) / 2, [0, 1, 3, 4, 2]) # convert to channel-first
actions = np.asarray(sample_batched['actions'])
print('actions', actions)
plt.imshow(np.asarray(images[0, 0]))
plt.show()
| [
"h5py.File",
"numpy.ones_like",
"matplotlib.pyplot.show",
"torch.utils.data.DataLoader",
"classifier_control.classifier.utils.general_utils.resize_video",
"random.shuffle",
"numpy.asarray",
"numpy.transpose",
"classifier_control.classifier.utils.general_utils.AttrDict",
"numpy.random.randint",
"... | [((7749, 7792), 'classifier_control.classifier.utils.general_utils.AttrDict', 'AttrDict', ([], {'img_sz': '(48, 64)', 'sel_len': '(-1)', 'T': '(31)'}), '(img_sz=(48, 64), sel_len=-1, T=31)\n', (7757, 7792), False, 'from classifier_control.classifier.utils.general_utils import AttrDict, map_dict\n'), ((1167, 1276), 'torch.utils.data.DataLoader', 'DataLoader', (['self'], {'batch_size': 'batch_size', 'shuffle': 'self.shuffle', 'num_workers': 'self.n_worker', 'drop_last': '(True)'}), '(self, batch_size=batch_size, shuffle=self.shuffle, num_workers=\n self.n_worker, drop_last=True)\n', (1177, 1276), False, 'from torch.utils.data import DataLoader\n'), ((1896, 1910), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (1907, 1910), False, 'import random\n'), ((1919, 1949), 'random.shuffle', 'random.shuffle', (['self.filenames'], {}), '(self.filenames)\n', (1933, 1949), False, 'import random\n'), ((2218, 2268), 'torchvision.transforms.Resize', 'Resize', (['[data_conf.img_sz[0], data_conf.img_sz[1]]'], {}), '([data_conf.img_sz[0], data_conf.img_sz[1]])\n', (2224, 2268), False, 'from torchvision.transforms import Resize\n'), ((3200, 3229), 'os.path.join', 'os.path.join', (['main_dir', '"""raw"""'], {}), "(main_dir, 'raw')\n", (3212, 3229), False, 'import os\n'), ((3250, 3304), 'os.path.join', 'os.path.join', (['raw_dir', 'f"""traj_group{traj_ind // 1000}"""'], {}), "(raw_dir, f'traj_group{traj_ind // 1000}')\n", (3262, 3304), False, 'import os\n'), ((4798, 4855), 'numpy.random.randint', 'np.random.randint', (['(0)', '(self.T - self._data_conf.sel_len)', '(1)'], {}), '(0, self.T - self._data_conf.sel_len, 1)\n', (4815, 4855), True, 'import numpy as np\n'), ((5348, 5402), 'classifier_control.classifier.utils.general_utils.resize_video', 'resize_video', (['images', '(self.img_sz[0], self.img_sz[1])'], {}), '(images, (self.img_sz[0], self.img_sz[1]))\n', (5360, 5402), False, 'from classifier_control.classifier.utils.general_utils import resize_video\n'), ((5420, 5454), 'numpy.transpose', 'np.transpose', (['images', '[0, 3, 1, 2]'], {}), '(images, [0, 3, 1, 2])\n', (5432, 5454), True, 'import numpy as np\n'), ((6872, 6904), 'numpy.ones_like', 'np.ones_like', (['data_dict.pad_mask'], {}), '(data_dict.pad_mask)\n', (6884, 6904), True, 'import numpy as np\n'), ((7968, 8013), 'numpy.asarray', 'np.asarray', (["sample_batched['demo_seq_images']"], {}), "(sample_batched['demo_seq_images'])\n", (7978, 8013), True, 'import numpy as np\n'), ((8023, 8038), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (8036, 8038), False, 'import pdb\n'), ((8056, 8103), 'numpy.transpose', 'np.transpose', (['((images + 1) / 2)', '[0, 1, 3, 4, 2]'], {}), '((images + 1) / 2, [0, 1, 3, 4, 2])\n', (8068, 8103), True, 'import numpy as np\n'), ((8150, 8187), 'numpy.asarray', 'np.asarray', (["sample_batched['actions']"], {}), "(sample_batched['actions'])\n", (8160, 8187), True, 'import numpy as np\n'), ((8276, 8286), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8284, 8286), True, 'import matplotlib.pyplot as plt\n'), ((2815, 2835), 'h5py.File', 'h5py.File', (['path', '"""r"""'], {}), "(path, 'r')\n", (2824, 2835), False, 'import h5py\n'), ((2938, 2958), 'h5py.File', 'h5py.File', (['path', '"""r"""'], {}), "(path, 'r')\n", (2947, 2958), False, 'import h5py\n'), ((3335, 3377), 'os.path.join', 'os.path.join', (['group_dir', 'f"""traj{traj_ind}"""'], {}), "(group_dir, f'traj{traj_ind}')\n", (3347, 3377), False, 'import os\n'), ((3458, 3469), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (3466, 3469), True, 'import pickle as pkl\n'), ((3756, 3776), 'h5py.File', 'h5py.File', (['path', '"""r"""'], {}), "(path, 'r')\n", (3765, 3776), False, 'import h5py\n'), ((3964, 4005), 'classifier_control.classifier.utils.general_utils.AttrDict', 'AttrDict', ([], {'images': "F[key + '/images'].value"}), "(images=F[key + '/images'].value)\n", (3972, 4005), False, 'from classifier_control.classifier.utils.general_utils import AttrDict, map_dict\n'), ((5660, 5701), 'numpy.reshape', 'np.reshape', (['images', '[images.shape[0], -1]'], {}), '(images, [images.shape[0], -1])\n', (5670, 5701), True, 'import numpy as np\n'), ((8242, 8266), 'numpy.asarray', 'np.asarray', (['images[0, 0]'], {}), '(images[0, 0])\n', (8252, 8266), True, 'import numpy as np\n'), ((7543, 7584), 'os.path.join', 'os.path.join', (['data_dir', '"""dataset_spec.py"""'], {}), "(data_dir, 'dataset_spec.py')\n", (7555, 7584), False, 'import os\n'), ((2587, 2619), 'os.path.join', 'os.path.join', (['"""hdf5"""', 'self.phase'], {}), "('hdf5', self.phase)\n", (2599, 2619), False, 'import os\n')] |
import numpy as np
import math
from struct import unpack_from
from tifinity.parser.errors import InvalidTiffError
ifdtype = {
1: (1, "read_bytes", "insert_bytes"), # byte - 1 byte
2: (1, "read_bytes", "insert_bytes"), # ascii - 1 byte
3: (2, "read_shorts", "insert_shorts"), # short - 2 bytes
4: (4, "read_ints", "insert_ints"), # long - 4 bytes
5: (8, "read_rationals", "insert_rationals"), # rational - 8 bytes
6: (1, "read", "insert_bytes"), # sbyte - 1 byte
7: (1, "read_bytes", "insert_bytes"), # undefined - 1 byte
8: (2, "read_shorts", "insert_shorts"), # sshort - 2 bytes
9: (4, "read_ints", "insert_ints"), # slong - 4 bytes
10: (8, "read_rationals", "insert_rationals"), # srational - 8 bytes
11: (4, "read_floats", "insert_floats"), # float - 4 bytes
12: (8, "read_doubles", "insert_doubles") # double - 8 bytes
}
ifdtag = {
254: "NewSubfileType",
255: "SubfileType",
256: "ImageWidth",
257: "ImageLength",
258: "BitsPerSample",
259: "Compression",
262: "PhotometricInterpretation",
263: "Thresholding",
264: "CellWidth",
265: "CellLength",
266: "FillOrder",
269: "DocumentName", # ext; TIFF 6.0 Section 12
270: "ImageDescription",
271: "Make",
272: "Model",
273: "StripOffsets",
274: "Orientation",
277: "SamplesPerPixel",
278: "RowsPerStrip",
279: "StripByteCounts",
280: "MinSampleValue",
281: "MaxSampleValue",
282: "XResolution",
283: "YResolution",
284: "PlanarConfiguration",
285: "PageName", # ext
288: "FreeOffsets",
289: "FreeByteCounts",
290: "GrayResponseUnit",
291: "GrayResponseCurve",
296: "ResolutionUnit",
297: "PageNumber", # ext
305: "Software",
306: "DateTime",
315: "Artist",
316: "HostComputer",
317: "Predictor", # ext; TIFF 6.0 Section 14
318: "WhitePoint", # ext; TIFF 6.0 Section 20
319: "PrimaryChromaticities", # ext; TIFF 6.0 Section 20
320: "ColorMap",
338: "ExtraSamples",
339: "SampleFormat", # ext; TIFF 6.0 Section 19
700: "XMP",
33432: "Copyright",
33723: "IPTC",
34377: "Photoshop",
34665: "EXIF_IFD",
34675: "ICC_Profile",
-1: "UNKNOWN"
}
inv_ifdtag = {v: k for k, v in ifdtag.items()}
# TIFF stuff
class Directory:
def __init__(self, tag, ttype, count, value, valid_type):
self.tag = tag
self.type = ttype
self.type_valid = valid_type
self.count = count
self.value = value
self.sot_offset = 0
def set_tag_offset(self, offset):
self.sot_offset = offset # start of tag offset
def tostring(self, limit_value=False):
tagname = "Unknown"
if self.tag in ifdtag:
tagname = ifdtag[self.tag]
val_to_print = self.value
if self.type == 2:
val_to_print = ''.join(chr(i) for i in val_to_print)
if limit_value:
val_to_print = str(val_to_print[:100]) #str(self.value[:100])
if len(self.value)>100:
val_to_print += '...'
return "[{0}]\t{1:31}\t{2:2}{3}\t{4:6}\t{5}{6}".format(self.tag, tagname, self.type,
'' if self.type_valid else '*',
self.count,
val_to_print,
'' if self.type_valid else '\t[*Unknown Tag Type]')
class IFD:
def __init__(self, offset):
self.offset = offset
self.numtags = 0
self.directories = {}
self.nextifd = 0
self.pointerlocation = 0
self.img_data = None
self.ifd_data = None
def add_directory(self, directory):
self.directories[directory.tag] = directory
def get_image_width(self):
return self.directories[256].value[0]
def get_image_height(self):
return self.directories[257].value[0]
def get_bits_per_sample(self):
return self.directories[258].value
def set_bits_per_sample(self, bps=None):
if bps is None:
bps = [8, 8, 8]
# bps_bytes = [x.to_bytes(2, byteorder='little') for x in bps]
self.directories[inv_ifdtag["BitsPerSample"]].value = bps
# def getCompression(self):
# return self.directories[259].value
#
# def getPhotometrics(self):
# return self.directories[262].value
def get_rows_per_strip(self):
"""Returns the number of pixel rows per strip in this IFD's image"""
# TODO: Default number of rows per strip
return self.directories[inv_ifdtag["RowsPerStrip"]].value[0]
def set_rows_per_strip(self, rows):
self.directories[inv_ifdtag["RowsPerStrip"]].value = rows
def get_number_strips(self):
"""Returns the number of Strips for this IFD's image"""
rps = self.get_rows_per_strip()
return math.floor((self.get_image_height() + rps - 1) / rps)
def get_strips(self):
"""Returns a list of tuples about each Strip in this IFD's image(strip_offset, strip_byte_count)"""
return list(zip(self.directories[273].value, self.directories[279].value))
def set_strip_offsets(self, offsets):
"""Sets the strip offsets for this IFD's image"""
assert (len(offsets) == self.get_tag_count(inv_ifdtag["StripOffsets"]))
# self.directories[inv_ifdtag["StripOffsets"]].setTagOffset
# TODO: Finish setStripOffsets (if necessary)
def get_strip_offsets(self):
"""Returns a list of offsets for each Strip in this IFD's image"""
return self.directories[273].value
def set_strip_byte_counts(self, counts):
# TODO: store counts in byte size relating to tag type
# counts_bytes = [x.to_bytes(4, byteorder='little') for x in counts]
self.directories[inv_ifdtag["StripByteCounts"]].value = counts
def get_bytes_per_pixel(self):
return sum([int(x / 8) for x in self.directories[258].value])
def get_tag_type(self, tag):
return self.directories[tag].type
def get_tag_type_size(self, tag):
if not isinstance(tag, int):
tag = inv_ifdtag[tag]
return ifdtype[self.directories[tag].type][0]
def get_tag_count(self, tag):
return self.directories[tag].count
def set_tag_count(self, tag, count):
self.directories[tag].count = count
def get_tag_value_by_name(self, tagname):
"""Returns a tag's value from the IFD, accessed via the tag name itself."""
return self.directories[inv_ifdtag[tagname]].value
def get_tag_value(self, tag):
try:
value = self.directories[tag].value
except KeyError:
value = None
return value
def get_tag_offset(self, tag):
return self.directories[tag].sot_offset
# def setTagValue(self, tag, value):
# self.directories[tag].sot_offset = value
def print_ifd_header(self):
print("IFD (Offset: " + str(self.offset) + " | num tags: " + str(self.numtags) + " | next IFD: " + str(
self.nextifd) + ")")
def print_tag(self, tag):
try:
dir = self.directories[tag]
except KeyError:
try:
dir = self.directories[inv_ifdtag[tag]]
except KeyError:
print("Tag Element Not Found")
return
print(dir.tostring())
def print_ifd(self):
print("IFD (Offset: " + str(self.offset) + " | num tags: " + str(self.numtags) + " | next IFD: " + str(
self.nextifd) + ")")
for tag, directory in self.directories.items():
print(directory.tostring())
# Can use 'r+b' mode to overwrite bytes at the current location
#
# TIFF:
# - header
# - endianness, 42, IFD0 pointer
# [- IFDn
# - num tags
# - tags
# [- tag type count value]+
# - next IFD pointer
# - Offset values
# - Image data]+
# - 0000
# Pointers:
# - Header -> IFD0
# - IFD:
# - Tag Value(s) -> actual value [for arrays larger than 4 bytes]
# - Next IFD
class Tiff:
def __init__(self, filename: str):
"""Creates a new Tiff object from the specified Tiff file"""
self.tif_file = None
self.byteOrder = 'big'
self.magic = None
self.ifds = []
if filename is not None:
self.tif_file = TiffFileHandler(filename)
self.load_tiff()
def raw_data(self):
"""Returns the numpy array for the entire file"""
return self.tif_file.raw_data()
def load_tiff(self):
"""Loads this TIFF into an internal data structure, ready for maniupulation"""
try:
# Byte order
h = bytes(self.tif_file.read(2))
self.byteOrder = {b'II': 'little', b'MM': 'big'}[h]
assert (self.byteOrder == 'little' or self.byteOrder == 'big')
self.tif_file.set_byte_order(self.byteOrder)
# Magic number
self.magic = self.tif_file.read_int(2)
assert (self.magic == 42)
except (KeyError, AssertionError):
raise InvalidTiffError(self.tif_file._filename, "Incorrect header")
# IFD offset
nextifd_offset = self.tif_file.read_int(4) # returns offset to first IFD
# read in each IFD and image data
while nextifd_offset != 0:
ifd = self.read_ifd(nextifd_offset)
self.ifds.append(ifd)
self.read_image(ifd)
nextifd_offset = ifd.nextifd
def save_tiff(self, to_file=None):
"""Saves the TIFF represented by the internal data structure into the specified file"""
self.tif_file.clear() # Empty the array first
# Header
byteo = 'II'
if self.byteOrder != 'little':
byteo = 'MM'
self.tif_file.insert_bytes(list(byteo.encode())) # byte order
self.tif_file.insert_int(42, 2) # Magic number
self.tif_file.insert_int(8, 4) # first IFD always at 0x08
for ifd in self.ifds:
# self.calculateIFDSpace(ifd) # Readjusts counts because of changes to image data
endpos = self.save_ifd(ifd)
self.save_image(ifd, endpos)
self.tif_file.write(to_file) # lastly, write to file
# # Do this if change stuff having read the TIFF, e.g. migrated the image data. Otherwise
# # assume all is the same size - even if the offsets have changed.
# def calculate_ifd_space(self, ifd):
# strips_per_image = ifd.get_strips_per_image()
# ifd.set_tag_count(inv_ifdtag["StripOffsets"], strips_per_image)
# ifd.set_tag_count(inv_ifdtag["StripByteCounts"], strips_per_image)
def read_ifd(self, ifd_offset):
# go through IFD
ifd = IFD(ifd_offset)
self.tif_file.seek(ifd.offset)
ifd.numtags = self.tif_file.read_int(2)
# save the raw data in the IFD
ifd.ifd_data = self.tif_file.read(size=(2+(ifd.numtags*12)+4), location=ifd_offset)
for i in range(ifd.numtags):
# read IFD bytes
tag = self.tif_file.read_int(2)
tag_type = self.tif_file.read_int(2)
type_valid = True # True if tag is in valid TIFF v6 range
count = self.tif_file.read_int(4)
value_loc = self.tif_file.tell() # current location in tiff array
# TIFF v6 spec, pg 16:
# "Warning: It is possible that other TIFF field types will be added in the future.
# Readers should skip over fields containing an unexpected field type."
ifdtype_tuple = ifdtype.get(tag_type)
if ifdtype_tuple is not None:
# found a tag type within TIFF v6 specified ranges
if count * ifdtype_tuple[0] > 4: # next 4 bytes are a pointer to the value's location
value_loc = self.tif_file.read_int(4)
read_func = getattr(self.tif_file, ifdtype_tuple[1])
value = read_func(count=count, location=value_loc)
# TODO: Need to handle case where <4 bytes are read
if count * ifdtype_tuple[0] <= 4:
self.tif_file.offset(4)
else:
# tag type outside TIFF v6 specified ranges
# just read next 4 bytes and mark directory as "Unknown tag type"
value = self.tif_file.read_ints(4)
type_valid = False
# add directory
ifd.add_directory(Directory(tag, tag_type, count, value, type_valid))
# finally get the next IFD offset
ifd.nextifd = self.tif_file.read_int(4)
# return the IFD
return ifd
def save_ifd(self, ifd):
# Writes: num directories, directories, offset values, space for next ifd
start_of_ifd = self.tif_file.tell()
# first calculate end of IFD offset where directory values can be written
# end = curpos + (num directories) + (n directories) + offset to nextIFD
num_bytes = 2 + (ifd.numtags * 12) + 4
end_of_ifd = start_of_ifd + num_bytes # location after IFD for IFD values
self.tif_file.insert_bytes(np.zeros((num_bytes,), dtype='uint8')) # write empty IFD
self.tif_file.seek(start_of_ifd)
self.tif_file.insert_int(ifd.numtags, size=2, overwrite=True)
# assumes sorted tags.
for tag, directory in ifd.directories.items():
directory.set_tag_offset(self.tif_file.tell()) # set start of tag offset for later
self.tif_file.insert_int(directory.tag, size=2, overwrite=True)
self.tif_file.insert_int(directory.type, size=2, overwrite=True)
self.tif_file.insert_int(directory.count, size=4, overwrite=True)
write_func = getattr(self.tif_file, ifdtype[directory.type][2])
overwrite_value = True
value_loc = self.tif_file.tell()
if directory.count * ifdtype[directory.type][0] > 4: # next 4 bytes are a pointer to the value
self.tif_file.insert_int(end_of_ifd, size=4, overwrite=True) # so write pointer then jump to location
value_loc = end_of_ifd
overwrite_value = False
num_written = write_func(directory.value, location=value_loc, overwrite=overwrite_value)
if directory.count * ifdtype[directory.type][0] > 4:
end_of_ifd += num_written
else:
self.tif_file.offset(4) # _offset is not updated if location set in write_func
self.tif_file.insert_int(ifd.nextifd, size=4, overwrite=True) # pointer to next IFD, or 0x00000000
return end_of_ifd
def read_image(self, ifd):
"""Reads the full image data for the specified IFD into a numpy array"""
ifd.img_data = np.array([], dtype='uint8')
strips = ifd.get_strips() # [(strip_offset, strip_byte_count)]
for strip in strips:
ifd.img_data = np.append(ifd.img_data, self.tif_file.read(size=strip[1], location=strip[0]))
def save_image(self, ifd, endpos):
"""Inserts the specified IFD's image data into the tiff numpy array at the specified end position."""
# Assumes:
# 1. StripOffsets has count set appropriately, however values are not set/correct and need updating
# 2. StripByteCounts has count and values set appropriately.
# 3. StripOffsets or StripByteCounts count value is correct for the number of strips per image (at least for
# chunky planar configuration (RGBRGBRGB...))
num_strips = ifd.get_number_strips()
strip_byte_counts = [y for (x, y) in ifd.get_strips()]
strip_offsets = []
self.tif_file.seek(endpos) # jump to the end for writing image data
start_pos = 0
for num_bytes in strip_byte_counts:
strip_offsets.append(self.tif_file.tell()) # record position of strip start
self.tif_file.insert_bytes(ifd.img_data[start_pos:start_pos + num_bytes])
start_pos += num_bytes
# now set strip offsets in IFD
strip_offset_tag = inv_ifdtag["StripOffsets"]
strip_offset_value_location = ifd.get_tag_offset(strip_offset_tag) + 8
if ifdtype[ifd.get_tag_type(strip_offset_tag)][0] * num_strips > 4:
# value to write is larger than 4 bytes, so get the offset for the value array
strip_offset_value_location = ifd.get_tag_value(strip_offset_tag)
# now write the offsets
tag_type_size = ifd.get_tag_type_size("StripOffsets")
self.tif_file.insert_ints(strip_offsets, tag_type_size, location=strip_offset_value_location, overwrite=True)
class TiffFileHandler(object):
"""Handler which imports a TIFF file into a numpy array for reading and/or writing.
Writing creates a copy of the file."""
def __init__(self, filename: str) -> object:
self._byteorder = 'little'
self._filename = filename
self._offset = 0
with open(filename, 'rb') as in_file:
self._tiff = np.fromfile(in_file, dtype="uint8")
def raw_data(self):
return self._tiff
def set_byte_order(self, byteorder='little'):
"""Sets the byte order to be used for subsequent reads"""
self._byteorder = byteorder
def clear(self):
"""Empties the current numpy array for this Tiff"""
self._tiff = np.array([], dtype="uint8")
self._offset = 0
def read(self, size=1, count=1, location=None):
"""Reads the next 'size' bytes at the specified location, or the current offset if no location is supplied.
If location is specified, this read will not update the current offset"""
if self._tiff is not None:
off = self._offset
if location is not None:
off = location
b = self._tiff[off:off+size]
if location is None:
self._offset += size
return b
def read_bytes(self, count=1, location=None):
"""Reads 'count' bytes from the specified location, or the current offset if no location is supplied."""
return_vals = []
if self._tiff is not None:
off = self._offset
if location is not None:
off = location
return_vals = list(self._tiff[off:off+count])
if location is None:
self._offset += count
return return_vals
def insert_bytes(self, bytes_to_write, location=None, overwrite=False):
"""Inserts or overwrites the values at the specified location, or the current offset if no location
is specified."""
num_bytes = len(bytes_to_write)
off = self._offset
if location is not None:
off = location
if overwrite:
self._tiff[off:off + num_bytes] = bytes_to_write
else:
self._tiff = np.insert(self._tiff, off, bytes_to_write)
if location is None:
self._offset += num_bytes
return num_bytes
def read_floats(self, count=1, location=None):
"""Reads the next 'count' lots of 4 bytes at the specified location, or the current offset if no location is supplied,
and interprets these bytes as a IEEE 754 float.
If location is specified, this read will not update the current offset"""
return_vals = []
byteorder = {'little':'<f', 'big':'>f'}[self._byteorder]
if self._tiff is not None:
off = self._offset
if location is not None:
off = location
for c in range(count):
return_vals.append(unpack_from(byteorder, self._tiff[off:off+4])[0])
off += 4# size
if location is None:
self._offset += (count * 4) #size)
return return_vals
def read_doubles(self, count=1, location=None):
"""Reads the next 'count' lots of 8 bytes at the specified location, or the current offset if no location is supplied,
and interprets these bytes as a IEEE 754 double.
If location is specified, this read will not update the current offset"""
return_vals = []
byteorder = {'little': '<d', 'big': '>d'}[self._byteorder]
if self._tiff is not None:
off = self._offset
if location is not None:
off = location
for c in range(count):
return_vals.append(unpack_from(byteorder, self._tiff[off:off + 8])[0])
off += 8 # size
if location is None:
self._offset += (count * 8) # size)
return return_vals
def insert_floats(self, numbers, location=None, overwrite=False):
"""Inserts the specified IEEE 754 floats into the tiff array at the specified location.
If overwrite is True, the bytes overwrite those at the write location; if False, the bytes are
inserted at the write location"""
pass
# def flatten(l): return [x for sublist in l for x in sublist]
# def tobytes(x): return list(x.to_bytes(size, byteorder=self._byteorder))
#
# bytes_to_write = flatten([tobytes(x) for x in numbers])
# return self.insert_bytes(bytes_to_write, location, overwrite)
def read_int(self, size=4, location=None):
"""Reads a single int of 'size' bytes at the specified location, or the current offset if no location is
supplied."""
return self.read_ints(size=size, location=location)[0]
def read_ints(self, size=4, count=1, location=None):
"""Reads the next 'count' 'size' bytes at the specified location, or the current offset if no location is supplied,
and interprets these bytes as an integer.
If location is specified, this read will not update the current offset"""
return_vals = []
if self._tiff is not None:
off = self._offset
if location is not None:
off = location
for c in range(count):
return_vals.append(int.from_bytes(self._tiff[off:off+size], byteorder=self._byteorder))
off += size
if location is None:
self._offset += (count * size)
return return_vals
def insert_int(self, value, size=4, location=None, overwrite=False):
"""Inserts the specified value encoded in size bytes into the tiff array at the specified location.
If overwrite is True, the bytes overwrite those at the write location; if False, the bytes are
inserted at the write location"""
self.insert_ints([value], size=size, location=location, overwrite=overwrite)
def insert_ints(self, numbers, size=4, location=None, overwrite=False):
"""Inserts the specified number encoded in size bytes into the tiff array at the specified location.
If overwrite is True, the bytes overwrite those at the write location; if False, the bytes are
inserted at the write location"""
def flatten(l): return [x for sublist in l for x in sublist]
def tobytes(x): return list(x.to_bytes(size, byteorder=self._byteorder))
bytes_to_write = flatten([tobytes(x) for x in numbers])
return self.insert_bytes(bytes_to_write, location, overwrite)
def read_rationals(self, count=1, location=None):
"""Reads in a TIFF Rational data type (2 4-byte integers)"""
return_vals = []
if self._tiff is not None:
off = self._offset
if location is not None:
off = location
for c in range(count):
num = int.from_bytes(self._tiff[off:off + 4], byteorder=self._byteorder)
denom = int.from_bytes(self._tiff[off+4:off + 8], byteorder=self._byteorder)
return_vals.append((num, denom))
off += 8
if location is None:
self._offset += (count * 8)
return return_vals
def insert_rationals(self, values, location=None, overwrite=False):
"""Inserts or overwrites the specified rational values at the specified location, or the current offset
if no location is specified."""
def flatten(l): return [x for sublist in l for x in sublist]
def tobytes(x): return list(x.to_bytes(4, byteorder=self._byteorder))
bytes_to_write = flatten([tobytes(n) + tobytes(d) for (n, d) in values])
num_bytes = len(bytes_to_write)
off = self._offset
if location is not None:
off = location
if overwrite:
self._tiff[off:off + num_bytes] = bytes_to_write
else:
self._tiff = np.insert(self._tiff, off, bytes_to_write)
if location is None:
self._offset += num_bytes
return num_bytes
def read_shorts(self, count=1, location=None):
"""Reads in a TIFF Short data type (2-byte integer)"""
return self.read_ints(size=2, count=count, location=location)
def insert_shorts(self, numbers, location=None, overwrite=False):
"""Inserts or overwrites the specified short numbers at the specified location, or the current offset if
no location is specified."""
return self.insert_ints(numbers, 2, location, overwrite)
def write(self, tofile=None):
"""Writes the current np byte array to the specified file, or a copy of this TIFF file (if no file is
specified)."""
if tofile is None:
tofile = self._filename[:-4]+"_tifinity.tiff"
with open(tofile, 'wb') as out_file:
self._tiff.tofile(out_file) # numpy.tofile()
def seek(self, offset, location=0):
"""Sets the current offset relative to the specified location"""
if (0 <= location <= len(self._tiff)) and (0 <= location+offset <= len(self._tiff)):
self._offset = location+offset
def offset(self, offset):
"""Sets the offset relative to the current offset"""
self._offset += offset
def tell(self):
"""Returns the current offset"""
return self._offset
| [
"tifinity.parser.errors.InvalidTiffError",
"numpy.fromfile",
"numpy.zeros",
"numpy.insert",
"numpy.array",
"struct.unpack_from"
] | [((15194, 15221), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""uint8"""'}), "([], dtype='uint8')\n", (15202, 15221), True, 'import numpy as np\n'), ((17830, 17857), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""uint8"""'}), "([], dtype='uint8')\n", (17838, 17857), True, 'import numpy as np\n'), ((13522, 13559), 'numpy.zeros', 'np.zeros', (['(num_bytes,)'], {'dtype': '"""uint8"""'}), "((num_bytes,), dtype='uint8')\n", (13530, 13559), True, 'import numpy as np\n'), ((17487, 17522), 'numpy.fromfile', 'np.fromfile', (['in_file'], {'dtype': '"""uint8"""'}), "(in_file, dtype='uint8')\n", (17498, 17522), True, 'import numpy as np\n'), ((19350, 19392), 'numpy.insert', 'np.insert', (['self._tiff', 'off', 'bytes_to_write'], {}), '(self._tiff, off, bytes_to_write)\n', (19359, 19392), True, 'import numpy as np\n'), ((25196, 25238), 'numpy.insert', 'np.insert', (['self._tiff', 'off', 'bytes_to_write'], {}), '(self._tiff, off, bytes_to_write)\n', (25205, 25238), True, 'import numpy as np\n'), ((9360, 9421), 'tifinity.parser.errors.InvalidTiffError', 'InvalidTiffError', (['self.tif_file._filename', '"""Incorrect header"""'], {}), "(self.tif_file._filename, 'Incorrect header')\n", (9376, 9421), False, 'from tifinity.parser.errors import InvalidTiffError\n'), ((20106, 20153), 'struct.unpack_from', 'unpack_from', (['byteorder', 'self._tiff[off:off + 4]'], {}), '(byteorder, self._tiff[off:off + 4])\n', (20117, 20153), False, 'from struct import unpack_from\n'), ((20926, 20973), 'struct.unpack_from', 'unpack_from', (['byteorder', 'self._tiff[off:off + 8]'], {}), '(byteorder, self._tiff[off:off + 8])\n', (20937, 20973), False, 'from struct import unpack_from\n')] |
import numpy as np
from girard import monte_carlo as mc
def get_sample_std(omega, N):
return np.sqrt(omega * (1 - omega) / N)
def get_confidence_interval_prediction(omega, N):
std = get_sample_std(omega, N)
return omega - 2*std, omega + 2*std
def get_sample_distribution_for_solid_angle(cone_vectors, samples_per_estimate, population_size):
estimators = [mc.estimate_solid_angle(cone_vectors, samples_per_estimate) for i in range(population_size)]
mean = np.mean(estimators)
real_std = np.std(estimators)
predicted_std = get_sample_std(mean, samples_per_estimate)
return estimators, mean, real_std, predicted_std
| [
"numpy.std",
"numpy.mean",
"girard.monte_carlo.estimate_solid_angle",
"numpy.sqrt"
] | [((98, 130), 'numpy.sqrt', 'np.sqrt', (['(omega * (1 - omega) / N)'], {}), '(omega * (1 - omega) / N)\n', (105, 130), True, 'import numpy as np\n'), ((479, 498), 'numpy.mean', 'np.mean', (['estimators'], {}), '(estimators)\n', (486, 498), True, 'import numpy as np\n'), ((514, 532), 'numpy.std', 'np.std', (['estimators'], {}), '(estimators)\n', (520, 532), True, 'import numpy as np\n'), ((374, 433), 'girard.monte_carlo.estimate_solid_angle', 'mc.estimate_solid_angle', (['cone_vectors', 'samples_per_estimate'], {}), '(cone_vectors, samples_per_estimate)\n', (397, 433), True, 'from girard import monte_carlo as mc\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from numpy.linalg import solve, norm, det
import scipy.stats as stats
def norm_cop_pdf(u, mu, sigma2):
"""For details, see here.
Parameters
----------
u : array, shape (n_,)
mu : array, shape (n_,)
sigma2 : array, shape (n_, n_)
Returns
-------
pdf_u : scalar
"""
# Step 1: Compute the inverse marginal cdf's
svec = np.sqrt(np.diag(sigma2))
x = stats.norm.ppf(u.flatten(), mu.flatten(), svec).reshape(-1, 1)
# Step 2: Compute the joint pdf
n_ = len(u)
pdf_x = (2*np.pi)**(-n_ / 2)*((det(sigma2))**(-.5))*np.exp(-0.5 * (x - mu.reshape(-1, 1)).T@(solve(sigma2, x - mu.reshape(-1, 1))))
# Step 3: Compute the marginal pdf's
pdf_xn = stats.norm.pdf(x.flatten(), mu.flatten(), svec)
# Compute the pdf of the copula
pdf_u = np.squeeze(pdf_x / np.prod(pdf_xn))
return pdf_u
| [
"numpy.diag",
"numpy.linalg.det",
"numpy.prod"
] | [((441, 456), 'numpy.diag', 'np.diag', (['sigma2'], {}), '(sigma2)\n', (448, 456), True, 'import numpy as np\n'), ((891, 906), 'numpy.prod', 'np.prod', (['pdf_xn'], {}), '(pdf_xn)\n', (898, 906), True, 'import numpy as np\n'), ((618, 629), 'numpy.linalg.det', 'det', (['sigma2'], {}), '(sigma2)\n', (621, 629), False, 'from numpy.linalg import solve, norm, det\n')] |
import pytest
import numpy as np
import tbats.error as error
from tbats import TBATS
class TestTBATS(object):
def test_constant_model(self):
y = [3.2] * 20
estimator = TBATS()
model = estimator.fit(y)
assert np.allclose([0.0] * len(y), model.resid)
assert np.allclose(y, model.y_hat)
assert np.allclose([3.2] * 5, model.forecast(steps=5))
def test_normalize_seasonal_periods(self):
seasonal_periods = [7, 0, 1, 9, 9, 8.8, 10.11, 3, -1, 2, 1.01]
with pytest.warns(error.InputArgsWarning):
estimator = TBATS(seasonal_periods=seasonal_periods)
# seasonal periods should be normalized in constructor
# seasonal periods should be greater than 1, unique and sorted
assert np.array_equal([1.01, 2, 3, 7, 8.8, 9, 10.11], estimator.seasonal_periods)
@pytest.mark.parametrize(
"definition, expected_components",
[
[ # default settings allow for all components
dict(),
dict(use_box_cox=True, use_trend=True, use_damped_trend=True,
seasonal_periods=[], seasonal_harmonics=[]),
],
[ # default settings allow for all components
dict(use_box_cox=False, use_trend=False),
dict(use_box_cox=False, use_trend=False, use_damped_trend=False,
seasonal_periods=[], seasonal_harmonics=[]),
],
[ # default settings allow for all components
dict(use_box_cox=True, use_damped_trend=False, seasonal_periods=[7, 31]),
dict(use_box_cox=True, use_trend=True, use_damped_trend=False,
seasonal_periods=[7, 31], seasonal_harmonics=[1, 1]),
],
]
)
def test_create_most_complex_components(self, definition, expected_components):
estimator = TBATS(**definition)
components = estimator.create_most_complex_components()
# ARMA is false as it will be used in the end, once harmonics were chosen
assert False == components.use_arma_errors
assert expected_components['use_box_cox'] == components.use_trend
assert expected_components['use_trend'] == components.use_trend
assert expected_components['use_damped_trend'] == components.use_damped_trend
assert np.array_equal(expected_components['seasonal_periods'], components.seasonal_periods)
assert np.array_equal(expected_components['seasonal_harmonics'], components.seasonal_harmonics)
def test_trend_and_seasonal(self):
T = 30
steps = 5
phi = 0.99
period_length = 6
y = [0] * T
b = b0 = 2.1
l = l0 = 1.2
s = s0 = 0
s_star = s0_star = 0.2
for t in range(0, T):
y[t] = l + phi * b + s
l = l + phi * b
b = phi * b
lam = 2 * np.pi / period_length
s_prev = s
s = s_prev * np.cos(lam) + s_star * np.sin(lam)
s_star = - s_prev * np.sin(lam) + s_star * np.cos(lam)
y_to_fit = y[:(T - steps)]
y_to_predict = y[(T - steps):]
# pytest does not work well with spawn multiprocessing method
# https://github.com/pytest-dev/pytest/issues/958
estimator = TBATS(use_arma_errors=False, use_trend=True, use_damped_trend=True, use_box_cox=False,
seasonal_periods=[period_length], multiprocessing_start_method='fork')
fitted_model = estimator.fit(y_to_fit)
resid = fitted_model.resid
# seasonal model with 1 harmonic should be chosen
assert np.array_equal([1], fitted_model.params.components.seasonal_harmonics)
assert np.array_equal([period_length], fitted_model.params.components.seasonal_periods)
assert np.isclose(phi, fitted_model.params.phi, atol=0.01)
# from some point residuals should be close to 0
assert np.allclose([0] * (T - steps - 10), resid[10:], atol=0.06)
assert np.allclose(y_to_fit[10:], fitted_model.y_hat[10:], atol=0.06)
# forecast should be close to actual sequence
y_predicted = fitted_model.forecast(steps=steps)
assert np.allclose(y_to_predict, y_predicted, atol=0.5)
@pytest.mark.parametrize(
"seasonal_periods, seasonal_harmonics, starting_values",
[
[
[12], [2], [[1, 2, 0.5, 0.6]] # s1, s2, s1*, s2*
],
[
[7, 365], [2, 3], [[1, 2, 0.5, 0.6], [0.5, 0.2, 0.4, 0.1, 0.9, 0.3]]
],
[ # non-integer period lengths should also work
[7.2, 12.25], [2, 1], [[0.4, 0.7, 0.2, 0.1], [0.9, 0.8]]
],
[ # 3 periods
[7, 11, 13.2], [2, 4, 3],
[[1, 2, 0.5, 0.6], [0.5, 0.2, 0.4, 0.1, 0.9, 0.3, 1.1, 1.2], [-0.1, 0.2, 0.7, 0.6, 0.3, -0.3]]
],
]
)
def test_fit_predict_trigonometric_seasonal(self, seasonal_periods, seasonal_harmonics, starting_values):
"""
The aim of the test is to check if model is correctly discovering trigonometric series with no noise
"""
T = 100
steps = 10
l = 3.1
x0 = [[l]]
# construct trigonometric series
y = [l] * T
for period in range(0, len(seasonal_periods)):
period_length = seasonal_periods[period]
period_harmonics = seasonal_harmonics[period]
s_harmonic = np.array(starting_values[period])
s = s_harmonic[:int(len(s_harmonic) / 2)]
s_star = s_harmonic[int(len(s_harmonic) / 2):]
x0.append(s_harmonic)
lambdas = 2 * np.pi * (np.arange(1, period_harmonics + 1)) / period_length
# add periodic impact to y
for t in range(0, T):
y[t] += np.sum(s)
s_prev = s
s = s_prev * np.cos(lambdas) + s_star * np.sin(lambdas)
s_star = - s_prev * np.sin(lambdas) + s_star * np.cos(lambdas)
x0 = np.concatenate(x0)
y_to_fit = y[:(T - steps)]
y_to_predict = y[(T - steps):]
# pytest does not work well with spawn multiprocessing method
# https://github.com/pytest-dev/pytest/issues/958
estimator = TBATS(use_box_cox=False, use_arma_errors=False, use_trend=False,
seasonal_periods=seasonal_periods,
multiprocessing_start_method='fork')
fitted_model = estimator.fit(y_to_fit)
resid = fitted_model.resid
# seasonal model should be discovered
assert np.array_equal(seasonal_periods, fitted_model.params.components.seasonal_periods)
# at least as many harmonics as in original series
assert np.all(np.asarray(seasonal_harmonics) <= fitted_model.params.components.seasonal_harmonics)
# sequence should be modelled properly
assert np.allclose([0] * (T - steps), resid, atol=0.2)
assert np.allclose(y_to_fit, fitted_model.y_hat, atol=0.2)
# forecast should be close to actual
y_predicted = fitted_model.forecast(steps=steps)
assert np.allclose(y_to_predict, y_predicted, 0.2)
| [
"numpy.sum",
"tbats.TBATS",
"pytest.warns",
"numpy.allclose",
"numpy.asarray",
"numpy.isclose",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.cos",
"numpy.array_equal",
"pytest.mark.parametrize",
"numpy.concatenate"
] | [((4287, 4681), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""seasonal_periods, seasonal_harmonics, starting_values"""', '[[[12], [2], [[1, 2, 0.5, 0.6]]], [[7, 365], [2, 3], [[1, 2, 0.5, 0.6], [\n 0.5, 0.2, 0.4, 0.1, 0.9, 0.3]]], [[7.2, 12.25], [2, 1], [[0.4, 0.7, 0.2,\n 0.1], [0.9, 0.8]]], [[7, 11, 13.2], [2, 4, 3], [[1, 2, 0.5, 0.6], [0.5,\n 0.2, 0.4, 0.1, 0.9, 0.3, 1.1, 1.2], [-0.1, 0.2, 0.7, 0.6, 0.3, -0.3]]]]'], {}), "('seasonal_periods, seasonal_harmonics, starting_values'\n , [[[12], [2], [[1, 2, 0.5, 0.6]]], [[7, 365], [2, 3], [[1, 2, 0.5, 0.6\n ], [0.5, 0.2, 0.4, 0.1, 0.9, 0.3]]], [[7.2, 12.25], [2, 1], [[0.4, 0.7,\n 0.2, 0.1], [0.9, 0.8]]], [[7, 11, 13.2], [2, 4, 3], [[1, 2, 0.5, 0.6],\n [0.5, 0.2, 0.4, 0.1, 0.9, 0.3, 1.1, 1.2], [-0.1, 0.2, 0.7, 0.6, 0.3, -\n 0.3]]]])\n", (4310, 4681), False, 'import pytest\n'), ((192, 199), 'tbats.TBATS', 'TBATS', ([], {}), '()\n', (197, 199), False, 'from tbats import TBATS\n'), ((304, 331), 'numpy.allclose', 'np.allclose', (['y', 'model.y_hat'], {}), '(y, model.y_hat)\n', (315, 331), True, 'import numpy as np\n'), ((779, 853), 'numpy.array_equal', 'np.array_equal', (['[1.01, 2, 3, 7, 8.8, 9, 10.11]', 'estimator.seasonal_periods'], {}), '([1.01, 2, 3, 7, 8.8, 9, 10.11], estimator.seasonal_periods)\n', (793, 853), True, 'import numpy as np\n'), ((1897, 1916), 'tbats.TBATS', 'TBATS', ([], {}), '(**definition)\n', (1902, 1916), False, 'from tbats import TBATS\n'), ((2364, 2453), 'numpy.array_equal', 'np.array_equal', (["expected_components['seasonal_periods']", 'components.seasonal_periods'], {}), "(expected_components['seasonal_periods'], components.\n seasonal_periods)\n", (2378, 2453), True, 'import numpy as np\n'), ((2464, 2557), 'numpy.array_equal', 'np.array_equal', (["expected_components['seasonal_harmonics']", 'components.seasonal_harmonics'], {}), "(expected_components['seasonal_harmonics'], components.\n seasonal_harmonics)\n", (2478, 2557), True, 'import numpy as np\n'), ((3319, 3484), 'tbats.TBATS', 'TBATS', ([], {'use_arma_errors': '(False)', 'use_trend': '(True)', 'use_damped_trend': '(True)', 'use_box_cox': '(False)', 'seasonal_periods': '[period_length]', 'multiprocessing_start_method': '"""fork"""'}), "(use_arma_errors=False, use_trend=True, use_damped_trend=True,\n use_box_cox=False, seasonal_periods=[period_length],\n multiprocessing_start_method='fork')\n", (3324, 3484), False, 'from tbats import TBATS\n'), ((3660, 3730), 'numpy.array_equal', 'np.array_equal', (['[1]', 'fitted_model.params.components.seasonal_harmonics'], {}), '([1], fitted_model.params.components.seasonal_harmonics)\n', (3674, 3730), True, 'import numpy as np\n'), ((3746, 3831), 'numpy.array_equal', 'np.array_equal', (['[period_length]', 'fitted_model.params.components.seasonal_periods'], {}), '([period_length], fitted_model.params.components.seasonal_periods\n )\n', (3760, 3831), True, 'import numpy as np\n'), ((3843, 3894), 'numpy.isclose', 'np.isclose', (['phi', 'fitted_model.params.phi'], {'atol': '(0.01)'}), '(phi, fitted_model.params.phi, atol=0.01)\n', (3853, 3894), True, 'import numpy as np\n'), ((3968, 4026), 'numpy.allclose', 'np.allclose', (['([0] * (T - steps - 10))', 'resid[10:]'], {'atol': '(0.06)'}), '([0] * (T - steps - 10), resid[10:], atol=0.06)\n', (3979, 4026), True, 'import numpy as np\n'), ((4042, 4104), 'numpy.allclose', 'np.allclose', (['y_to_fit[10:]', 'fitted_model.y_hat[10:]'], {'atol': '(0.06)'}), '(y_to_fit[10:], fitted_model.y_hat[10:], atol=0.06)\n', (4053, 4104), True, 'import numpy as np\n'), ((4232, 4280), 'numpy.allclose', 'np.allclose', (['y_to_predict', 'y_predicted'], {'atol': '(0.5)'}), '(y_to_predict, y_predicted, atol=0.5)\n', (4243, 4280), True, 'import numpy as np\n'), ((6089, 6107), 'numpy.concatenate', 'np.concatenate', (['x0'], {}), '(x0)\n', (6103, 6107), True, 'import numpy as np\n'), ((6332, 6472), 'tbats.TBATS', 'TBATS', ([], {'use_box_cox': '(False)', 'use_arma_errors': '(False)', 'use_trend': '(False)', 'seasonal_periods': 'seasonal_periods', 'multiprocessing_start_method': '"""fork"""'}), "(use_box_cox=False, use_arma_errors=False, use_trend=False,\n seasonal_periods=seasonal_periods, multiprocessing_start_method='fork')\n", (6337, 6472), False, 'from tbats import TBATS\n'), ((6665, 6751), 'numpy.array_equal', 'np.array_equal', (['seasonal_periods', 'fitted_model.params.components.seasonal_periods'], {}), '(seasonal_periods, fitted_model.params.components.\n seasonal_periods)\n', (6679, 6751), True, 'import numpy as np\n'), ((6976, 7023), 'numpy.allclose', 'np.allclose', (['([0] * (T - steps))', 'resid'], {'atol': '(0.2)'}), '([0] * (T - steps), resid, atol=0.2)\n', (6987, 7023), True, 'import numpy as np\n'), ((7039, 7090), 'numpy.allclose', 'np.allclose', (['y_to_fit', 'fitted_model.y_hat'], {'atol': '(0.2)'}), '(y_to_fit, fitted_model.y_hat, atol=0.2)\n', (7050, 7090), True, 'import numpy as np\n'), ((7209, 7252), 'numpy.allclose', 'np.allclose', (['y_to_predict', 'y_predicted', '(0.2)'], {}), '(y_to_predict, y_predicted, 0.2)\n', (7220, 7252), True, 'import numpy as np\n'), ((527, 563), 'pytest.warns', 'pytest.warns', (['error.InputArgsWarning'], {}), '(error.InputArgsWarning)\n', (539, 563), False, 'import pytest\n'), ((589, 629), 'tbats.TBATS', 'TBATS', ([], {'seasonal_periods': 'seasonal_periods'}), '(seasonal_periods=seasonal_periods)\n', (594, 629), False, 'from tbats import TBATS\n'), ((5522, 5555), 'numpy.array', 'np.array', (['starting_values[period]'], {}), '(starting_values[period])\n', (5530, 5555), True, 'import numpy as np\n'), ((5887, 5896), 'numpy.sum', 'np.sum', (['s'], {}), '(s)\n', (5893, 5896), True, 'import numpy as np\n'), ((6828, 6858), 'numpy.asarray', 'np.asarray', (['seasonal_harmonics'], {}), '(seasonal_harmonics)\n', (6838, 6858), True, 'import numpy as np\n'), ((2993, 3004), 'numpy.cos', 'np.cos', (['lam'], {}), '(lam)\n', (2999, 3004), True, 'import numpy as np\n'), ((3016, 3027), 'numpy.sin', 'np.sin', (['lam'], {}), '(lam)\n', (3022, 3027), True, 'import numpy as np\n'), ((3060, 3071), 'numpy.sin', 'np.sin', (['lam'], {}), '(lam)\n', (3066, 3071), True, 'import numpy as np\n'), ((3083, 3094), 'numpy.cos', 'np.cos', (['lam'], {}), '(lam)\n', (3089, 3094), True, 'import numpy as np\n'), ((5738, 5772), 'numpy.arange', 'np.arange', (['(1)', '(period_harmonics + 1)'], {}), '(1, period_harmonics + 1)\n', (5747, 5772), True, 'import numpy as np\n'), ((5953, 5968), 'numpy.cos', 'np.cos', (['lambdas'], {}), '(lambdas)\n', (5959, 5968), True, 'import numpy as np\n'), ((5980, 5995), 'numpy.sin', 'np.sin', (['lambdas'], {}), '(lambdas)\n', (5986, 5995), True, 'import numpy as np\n'), ((6032, 6047), 'numpy.sin', 'np.sin', (['lambdas'], {}), '(lambdas)\n', (6038, 6047), True, 'import numpy as np\n'), ((6059, 6074), 'numpy.cos', 'np.cos', (['lambdas'], {}), '(lambdas)\n', (6065, 6074), True, 'import numpy as np\n')] |
import numpy as np
import networkx as nx
from numba import njit
from itertools import chain
from time import process_time
from datetime import timedelta
from kmmi.enumeration.graphlet_enumeration import *
from kmmi.utils.utils import *
from kmmi.utils.autoload import *
def prune_by_density(U: np.array, A: np.array, ds: np.array=None,
rho_min: float=0.7, rho_max: float=1.00) -> np.array:
"""Prune all subgraphs G_s s.t. rho_min <= rho(G_s) <= rho_max"""
if ds is None:
_, ds = graphlet_scores(U, A)
d_sel = (rho_min <= ds) & (ds <= rho_max)
assert d_sel.sum() > 0, "All graphlets were pruned; " \
"selected density range may be too narrow, " \
"lower the rho_min or increase the rho_max " \
"to relax the requirement."
return U[d_sel,:]
def strongly_connected(A: np.array, U: np.array) -> np.array:
"""Select all graphlets s in S which fulfill the definition of strongly
connected component in a directed network, namely that for each pair of
nodes u,v \in S there needs to exist at least one path for which u is
reachable from node v and vice versa.
Parameters
----------
U : input graphlets as rows of an array with shape (n, k_max) where elements
are node indices in the adjacency matrix of the input network. Rows are
padded from the right with -1 for graphlet sizes < k_max
A : weighted adjacency matrix of the input network
Returns:
--------
U : output graphlets, array of shape (n_sel, k_max) containing remaining
candidate graphlets as rows of node indices in the adjacency matrix
of the input network.
"""
SCC = lambda s: nx.is_strongly_connected(nx.DiGraph(A[s,:][:,s]))
idxs = [SCC(s[s!=-1]) for s in U]
return U[idxs, :]
@njit
def graphlet_scores(U: np.array, A: np.array) -> np.array:
"""Compute graphlet scores for graphlets in U. Graphlet scoring
function is defined as such that, i!=j and
$$\tau = \frac{1}{(n*(n-1))} \sum_{i,j \in s} A_{ij}$$
where A is the weighted adjacency matrix of the subgraph G_s
induced by the set of nodes $s = \{u_1,u_2,...,u_n}$, and n is
the number of edges in the induced subgraph G_s.
Returns:
--------
ws (np.array): array of shape (U.shape[0]) containing the graphlets' weighted
densities as rows such that indices are ordered corresponding
to the order of graplets in U.
ds (np.array): array of shape (U.shape[0]) containing the graphlets' densities
as rows such that indices are in order with corresponding
graplets in U.
"""
n = U.shape[0]
ws = np.zeros(n)
ds = np.zeros(n)
for v in range(n):
w = c = d = 0
n_s = U[v,:].shape[0]
for i in range(n_s):
for j in range(n_s):
if i == j: continue
if U[v,i] != -1 and U[v,j] != -1:
Aij = A[U[v,i],U[v,j]]
w = w + Aij
c += 1
if Aij > 0:
d += 1
ws[v] = w / c if c > 0 else 0
ds[v] = d / c if c > 0 else 0
return ws, ds
@njit
def select_nrank(U: np.array, A: np.array, Vs: np.array, p_min: int, p_max: int,
ptol: int=0.01, n_iters: int=10, presorted=False, adaptive_p=True,
verbose=False) -> np.array:
"""Selects p highest ranking graphlets per each node in the seed node set
Vs. This method assumes that U is already ordered in ascending ranking
order based on desired ranking criteria.
Parameters
----------
U : input graphlets as rows of an array with shape (n, k_max) where elements
are node indices in the adjacency matrix of the input network. Rows are
padded from the right with -1 for graphlet sizes < k_max
A : weighted adjacency matrix of the input network
Vs : set of seed nodes that will be selected for
p_min : target number of graphlets to select per each seed node
p_max : upper bound for the number of times each node can appear in the set of
selected graphlets
ptol : allowed tolerance parameter as a fraction of number of seed nodes
presorted : set True if U rows are already in an ascending sorted order
from the most to the least optimal.
adaptive_p : set True if p_max is allowed to be relaxed adaptively, this allows
finding the minimum p_max value such that all seed nodes are included
at least p_min times
Returns
-------
U_out : output graphlets, array of shape (n_sel, k_max) containing remaining
candidate graphlets as rows of node indices in the adjacency matrix of the
input network
u_sel : boolean array of size U.shape[0] which determines which graphlets in
the original list of graphlets
C : counts of seed nodes in the set of selected output graphlets
p_max : updated number of times each node can appear in the set of selected
graphlets
Notes
-----
Setting `verbose=True` will give useful information for diagnosing
the run.
"""
assert p_min <= p_max
n = U.shape[0]
k = U.shape[1]
n_v = A.shape[0]
n_vs = Vs.shape[0]
if not presorted:
if verbose: print(':: Computing tau scores...')
taus, _ = graphlet_scores(U, A)
if verbose: print(':: Sorting based on tau scores...')
idxs = np.argsort(taus)[::-1]
U = U[idxs,:]
while True:
for ii in range(n_iters):
if ii == 0:
U_idxs = np.arange(n)
else:
step = 100 if n > 1000 else 10
U_idxs = __block_shuffle(n, step)
if verbose:
print(':: * Targeting at least', p_min, \
' graphlets per node\n\tPROGRESS:')
count = 0
stop_flag = False
u_sel = np.array([False]*n)
C = np.zeros(n_v)
prcs = set([int(i) for i in (n * (np.arange(1,10) / 10))])
for i in range(n):
if verbose:
if i+1 in prcs:
prc = np.round((i+1) / n * 100, 1)
avg_fill = np.round(C[Vs].mean(), 2)
print(' \t*',prc,'% i:',i, \
'|', u_sel.sum(),'graphlets (', avg_fill, \
'per seed ) |', (C > 0).sum(), 'seeds')
# Condition 1
cond_one = False
for j in range(k):
v = U[U_idxs[i],j]
if v != -1:
if C[v]+1 >= p_max:
cond_one = True
break
if cond_one: continue
u_sel[U_idxs[i]] = True
# Count
for j in range(k):
v = U[U_idxs[i],j]
if v >= 0:
C[v] += 1
# Condition 2
if C[v] == p_min:
count += 1
if count == n_vs:
stop_flag = True
break
if stop_flag:
if verbose:
prc = np.round((i+1) / n * 100, 1)
print(':: Selection ready at iteration ',i,'(',prc,'%).')
print(':: Selected ', u_sel.sum(),'graphlets')
break
if stop_flag: break
if n_vs-count <= ptol*n_vs:
if verbose:
print('::', count, '/', n_vs, 'seed nodes were selected, ' \
'selection successful.')
print(':: Final p range: [', p_min, ',', p_max, ']')
break
else:
if verbose:
print(':: Not all seed nodes could be selected,', \
np.sum(C>0), '/', n_vs, 'seed nodes were included in the selection, '\
'of which', count, 'nodes at least p_min times.')
if adaptive_p and verbose:
print(':: Relaxing p_max criteria to:', p_max+1)
print(50*'--')
if adaptive_p:
p_max += 1
else:
break
return U[u_sel,:], u_sel, C, p_max
@njit
def __block_shuffle(n, step=10):
"""Generate approximate ordering of the U by blocking indeces of U into
blocks of size `step` and shuffling the order of the indeces in each block.
"""
U_idxs = np.arange(n)
for i in np.arange(0,n,step):
if i <= n - 2*step:
Ui = U_idxs[i:i+step]
np.random.shuffle(Ui)
U_idxs[i:i+step] = Ui
else:
Ui = U_idxs[i:]
np.random.shuffle(Ui)
U_idxs[i:] = Ui
return U_idxs
def binary_search_p(U: np.array, A: np.array, Vs: np.array, p_max: int, tol: float=0.1,
ptol: float=0.05, n_max: int=5000, n_iters=1, verbose=False) -> tuple:
"""Compute the best value of p parameter for the select_nrank function. Useful when
the number of graphlet candidates is larger than what the resources available for
running the downstream tasks that use the coarse-grained network. This implementation
uses binary search to search for the limit p such that approximately n_max graphlets
will be selected (tolerance is defined by tol parameter).
Parameters
----------
U : input graphlets as rows of an array with shape (n, k_max) where elements
are node indices in the adjacency matrix of the input network. Rows are
padded from the right with -1 for graphlet sizes < k_max
A : weighted adjacency matrix of the input network
Vs : set of seed nodes that will be selected for
tol : tolerance of error for the n_max (fraction of n_max)
ptol : tolerance of error for the selection method (fraction of p)
Returns
-------
L (int) : optimal p value
U (np.array) : output graphlets, array of shape (n_sel, k_max) containing remaining
candidate graphlets as rows of node indices in the adjacency matrix
of the input network.
"""
n = U.shape[0]
n_v = A.shape[0]
n_U = len(unique_nodes(U, n_v))
assert n > n_max, f'n: {n} > n_max: {n_max}, binary search isn\'t required'
if n_max > 2e4: print(f'WARNING: n_max: {n_max} is higher than what the ' \
'pipeline has been benchmarked for.')
if verbose:
print(':: Initializing binary search for determining upper bound for p_min...')
print(f':: Tolerance: {tol}')
i = n_sel = 1
while n_sel < n_max:
p = 2**i
p_max = np.max([p, p_max])
_, idxs, _, p_max = select_nrank(U, A, Vs, p, p_max,
n_iters=n_iters,
presorted=True,
adaptive_p=False,
ptol=ptol,
verbose=verbose)
n_sel = idxs.sum()
i += 1
if verbose: print(f':: Initial upper bound found for the p_min: {p}')
d1 = np.inf
S0 = idxs
L, R = p/2, p
while True:
m = int((L + R) / 2)
p_max = np.max([p, p_max])
_, idxs, C, p_max = select_nrank(U, A, Vs, m, p_max,
n_iters=n_iters,
presorted=True,
adaptive_p=True,
ptol=ptol,
verbose=verbose)
d1 = idxs.sum() - S0.sum()
S0 = idxs
if idxs.sum() <= n_max:
L = m
if verbose: print(f':: * {m} ({idxs.sum()}) set as the lower bound')
else:
R = m
if verbose: print(f':: * {m} ({idxs.sum()}) set as the upper bound')
if np.abs(d1) < tol*n_max:
break
if n_U - (C>=p_min).sum() > n_U*tol:
print(':: WARNING! Only {}/{} nodes included, does not satisfy the tolerance {:.2f}%.' \
' Bounds p in [{},{}]'.format((C>=p_min).sum()), n_U, tol*100, p_min, p_max)
else:
if verbose: print(':: Convergence succesful, final p_min: {} ({} selected)'
.format(m, S0.sum()))
return m, U[idxs,:]
def prune_subgraphs(U: np.array, k_min: int, k_max: int) -> np.array:
"""For two node sets V1 and V2, if V1 is subgraph V2, prune V1. Guaranteed to find
all V2.
Returns:
--------
U (np.array): array of shape (n_sel, k_max) containing remaining candidate
graphlets as rows of node indices in the adjacency matrix of
the underlying network.
Notes:
------
Worst-case running time is O(n^2), but on average the running time is quasi-linear
as the implementation explores the maximal graphlet candidates starting first from
the longest graphlets.
"""
n = U.shape[0]
sel = np.array([True]*n)
lens = {i:[] for i in range(k_min, k_max+1)}
for i in range(n):
Ui = U[i,:]
u = frozenset(Ui[Ui >= 0])
lens[len(u)].append((i,u))
for li in range(k_min, k_max+1):
for i,u_i in lens[li]:
for lj in range(li+1,k_max+1)[::-1]:
for j,u_j in lens[lj]:
if u_i.issubset(u_j):
sel[i] = False
break
if not sel[i]:
break
return U[sel,:]
@njit
def overlap_selection(U: np.array, omega: float=0.5, n_max: int=5000,
presorted=False, verbose=False):
"""Select at most n_max graphlets such that any pair (s_i, s_j) of graphlets
will have at most omega overlap coefficient.
Parameters
----------
U : input graphlets as rows of an array with shape (n, k_max) where elements
are node indices in the adjacency matrix of the input network. Rows are
padded from the right with -1 for graphlet sizes < k_max
omega : threshold parameter for maximum allowed overlap
n_max : maximum cap for how many graphlets can be selected
presorted : set True if U rows are already in an ascending sorted order
from the most to the least optimal.
Returns:
--------
S (list of sets): selected graphlets as list of sets
Notes:
------
Running time is positively correlated on both the allowed overlap (omega)
and n_max, decreasing both will result in reduced running times.
Time complexity in worst-case is approximately O(mn_max^2), where m is
the constant cost of the set intersection operation for pair of graphlets.
However, note that as omega -> 0, the rate of growth of S will slow down,
and there is a regime of omega beyond which size of S will only be able to
reach a fraction of n_max.
"""
if not presorted:
if verbose: print(':: Computing tau scores...')
taus, _ = graphlet_scores(U, A)
if verbose: print(':: Sorting based on tau scores...')
idxs = np.argsort(taus)[::-1]
U = U[idxs,:]
if verbose: print(':: Sorting completed...')
n = U.shape[0]
S = []
u_sel = np.array([False]*n)
for i in range(n):
ols = False
si = set(U[i,:]) - set([-1])
for sj in S:
li, lj = len(si), len(sj)
dnmr = li if li < lj else lj
if len(si & sj) / dnmr > omega:
ols = True
break
if not ols:
u_sel[i] = True
S.append(si)
if len(S) == n_max:
break
return S, u_sel
def prune(A: np.array, U: np.array, k_min: int, k_max: int,
n_sel: int=None, verbose=False, weakly_connected=False) -> np.array:
"""Prune candidate graphlets using a multi-step pruning pipeline.
Steps:
1. keep strongly connected graphlets
2. keep only graphlets that are maximal sets
3. select up to n_sel top ranking graphlets based on tau scores
with a guarantee that each node will be included in at least
p graphlets
Returns:
--------
U (np.array): array of shape (n_sel, k_max) containing remaining candidate
graphlets as rows of node indices in the adjacency matrix of
the underlying network.
"""
t0p = process_time()
n = U.shape[0]
U_pruned = U
Vs = unique_nodes(U_pruned, A.shape[0])
if not weakly_connected:
if verbose:
t0 = process_time()
print(':: Pruning 1/3: Selecting strongly connected graphlets')
U_pruned = strongly_connected(A, U_pruned)
if verbose:
print(':: * Number of graphlets after ' \
'selection: {}'.format(U_pruned.shape[0]))
td = process_time() - t0
print(':: (@ {})\n'.format(timedelta(seconds=td)))
# Prune subgrahps
if verbose:
t0 = process_time()
print(':: Pruning 2/3: Reducing subgraphs to ' \
'their largest supergraph-graphlets')
U_pruned = prune_subgraphs(U_pruned, k_min, k_max)
if verbose:
print(':: * Number of graphlets after subgraph ' \
'pruning: {}'.format(U_pruned.shape[0]))
td = process_time() - t0
print(':: (@ {})\n'.format(timedelta(seconds=td)))
if n_sel is not None:
if n_sel < U_pruned.shape[0]:
if verbose:
t0 = process_time()
print(f':: Pruning 3/3: Selecting {n_sel} top ranking ' \
'graphlets')
print(':: Computing tau scores...')
taus, _ = graphlet_scores(U_pruned, A)
if verbose: print(':: Sorting based on tau scores...')
idxs = np.argsort(taus)[::-1]
_, U_pruned = binary_search_p(U_pruned[idxs,:], A, Vs, n_max=n_sel,
tol=int(n_sel*0.1), verbose=verbose)
if verbose:
td = process_time() - t0
print(':: (@ {})\n'.format(timedelta(seconds=td)))
print(':: Pruning ready, {} graphlets selected'
.format(U_pruned.shape[0]))
td = process_time() - t0p
print(':: Total elapsed time @ {}\n'.format(timedelta(seconds=td)))
else:
if verbose: print(':: n_sel >= n, skipping ranked selection.')
return U_pruned | [
"numpy.abs",
"numpy.sum",
"time.process_time",
"numpy.zeros",
"numpy.argsort",
"numpy.max",
"numpy.array",
"numpy.arange",
"datetime.timedelta",
"networkx.DiGraph",
"numpy.round",
"numpy.random.shuffle"
] | [((2795, 2806), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2803, 2806), True, 'import numpy as np\n'), ((2816, 2827), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2824, 2827), True, 'import numpy as np\n'), ((8863, 8875), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (8872, 8875), True, 'import numpy as np\n'), ((8889, 8910), 'numpy.arange', 'np.arange', (['(0)', 'n', 'step'], {}), '(0, n, step)\n', (8898, 8910), True, 'import numpy as np\n'), ((13463, 13483), 'numpy.array', 'np.array', (['([True] * n)'], {}), '([True] * n)\n', (13471, 13483), True, 'import numpy as np\n'), ((15763, 15784), 'numpy.array', 'np.array', (['([False] * n)'], {}), '([False] * n)\n', (15771, 15784), True, 'import numpy as np\n'), ((16954, 16968), 'time.process_time', 'process_time', ([], {}), '()\n', (16966, 16968), False, 'from time import process_time\n'), ((11072, 11090), 'numpy.max', 'np.max', (['[p, p_max]'], {}), '([p, p_max])\n', (11078, 11090), True, 'import numpy as np\n'), ((11665, 11683), 'numpy.max', 'np.max', (['[p, p_max]'], {}), '([p, p_max])\n', (11671, 11683), True, 'import numpy as np\n'), ((17590, 17604), 'time.process_time', 'process_time', ([], {}), '()\n', (17602, 17604), False, 'from time import process_time\n'), ((1806, 1831), 'networkx.DiGraph', 'nx.DiGraph', (['A[s, :][:, s]'], {}), '(A[s, :][:, s])\n', (1816, 1831), True, 'import networkx as nx\n'), ((5650, 5666), 'numpy.argsort', 'np.argsort', (['taus'], {}), '(taus)\n', (5660, 5666), True, 'import numpy as np\n'), ((6144, 6165), 'numpy.array', 'np.array', (['([False] * n)'], {}), '([False] * n)\n', (6152, 6165), True, 'import numpy as np\n'), ((6180, 6193), 'numpy.zeros', 'np.zeros', (['n_v'], {}), '(n_v)\n', (6188, 6193), True, 'import numpy as np\n'), ((8984, 9005), 'numpy.random.shuffle', 'np.random.shuffle', (['Ui'], {}), '(Ui)\n', (9001, 9005), True, 'import numpy as np\n'), ((9094, 9115), 'numpy.random.shuffle', 'np.random.shuffle', (['Ui'], {}), '(Ui)\n', (9111, 9115), True, 'import numpy as np\n'), ((12352, 12362), 'numpy.abs', 'np.abs', (['d1'], {}), '(d1)\n', (12358, 12362), True, 'import numpy as np\n'), ((15606, 15622), 'numpy.argsort', 'np.argsort', (['taus'], {}), '(taus)\n', (15616, 15622), True, 'import numpy as np\n'), ((17121, 17135), 'time.process_time', 'process_time', ([], {}), '()\n', (17133, 17135), False, 'from time import process_time\n'), ((17944, 17958), 'time.process_time', 'process_time', ([], {}), '()\n', (17956, 17958), False, 'from time import process_time\n'), ((5803, 5815), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (5812, 5815), True, 'import numpy as np\n'), ((17437, 17451), 'time.process_time', 'process_time', ([], {}), '()\n', (17449, 17451), False, 'from time import process_time\n'), ((17999, 18020), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'td'}), '(seconds=td)\n', (18008, 18020), False, 'from datetime import timedelta\n'), ((18137, 18151), 'time.process_time', 'process_time', ([], {}), '()\n', (18149, 18151), False, 'from time import process_time\n'), ((18463, 18479), 'numpy.argsort', 'np.argsort', (['taus'], {}), '(taus)\n', (18473, 18479), True, 'import numpy as np\n'), ((8215, 8228), 'numpy.sum', 'np.sum', (['(C > 0)'], {}), '(C > 0)\n', (8221, 8228), True, 'import numpy as np\n'), ((17496, 17517), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'td'}), '(seconds=td)\n', (17505, 17517), False, 'from datetime import timedelta\n'), ((18702, 18716), 'time.process_time', 'process_time', ([], {}), '()\n', (18714, 18716), False, 'from time import process_time\n'), ((18941, 18955), 'time.process_time', 'process_time', ([], {}), '()\n', (18953, 18955), False, 'from time import process_time\n'), ((6404, 6434), 'numpy.round', 'np.round', (['((i + 1) / n * 100)', '(1)'], {}), '((i + 1) / n * 100, 1)\n', (6412, 6434), True, 'import numpy as np\n'), ((7567, 7597), 'numpy.round', 'np.round', (['((i + 1) / n * 100)', '(1)'], {}), '((i + 1) / n * 100, 1)\n', (7575, 7597), True, 'import numpy as np\n'), ((18765, 18786), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'td'}), '(seconds=td)\n', (18774, 18786), False, 'from datetime import timedelta\n'), ((19022, 19043), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'td'}), '(seconds=td)\n', (19031, 19043), False, 'from datetime import timedelta\n'), ((6241, 6257), 'numpy.arange', 'np.arange', (['(1)', '(10)'], {}), '(1, 10)\n', (6250, 6257), True, 'import numpy as np\n')] |
import functools
import numpy as np
from collections import Container, Mapping
from numpy.testing import assert_equal, assert_allclose
from .misc import settingerr, strenum
__all__ = ['assert_eq',
'assert_in',
'assert_not_in',
'assert_is',
'assert_is_not',
'assert_is_instance',
'assert_is_not_instance',
'assert_is_none',
'assert_is_not_none',
'assert_is_type',
'assert_raises',
'skiptest',
'skiptest_if',
'skiptest_unless_module']
def assert_same(actual, desired, atol=0, rtol=5, broadcasting=False):
"""
Compare arrays of floats. The relative error depends on the data type.
Parameters
----------
atol : float
Absolute tolerance to account for numerical error propagation, in
unit of eps.
rtol : float
Relative tolerance to account for numerical error propagation, in
unit of eps.
broadcasting : bool, optional
If true, allow broadcasting betwee, actual and desired array.
"""
actual = np.asarray(actual)
desired = np.asarray(desired)
if actual.dtype.kind not in ('b', 'i', 'u', 'f', 'c') or \
desired.dtype.kind not in ('b', 'i', 'u', 'f', 'c'):
raise TypeError('Non numeric type.')
if not broadcasting and actual.shape != desired.shape:
raise AssertionError(
"The actual array shape '{0}' is different from the desired one '{"
"1}'.".format(actual.shape, desired.shape))
if actual.dtype.kind in ('b', 'i', 'u') and \
desired.dtype.kind in ('b', 'i', 'u'):
if not broadcasting:
assert_equal(actual, desired)
else:
assert np.all(actual == desired)
return
if actual.dtype.kind in ('b', 'i', 'u'):
dtype = desired.dtype
elif desired.dtype.kind in ('b', 'i', 'u'):
dtype = actual.dtype
else:
dtype = sorted(_.dtype for _ in (actual, desired))[0]
eps1 = np.finfo(dtype).eps * rtol
eps2 = np.finfo(dtype).eps * atol
with settingerr('ignore'):
same_ = abs(actual - desired) <= \
eps1 * np.minimum(abs(actual), abs(desired)) + eps2
same = (same_ | np.isnan(actual) & np.isnan(desired) |
(actual == desired))
if np.all(same):
return
msg = 'Arrays are not equal (mismatch {0:.1%}'.format(1-np.mean(same))
if np.any(~same_ & np.isfinite(actual) & np.isfinite(desired)):
rtolmin = np.nanmax(abs(actual - desired) /
np.minimum(abs(actual), abs(desired)))
atolmin = np.nanmax(abs(actual - desired))
msg += ', min rtol: {0}, min atol: {1}'.format(
rtolmin / np.finfo(dtype).eps,
atolmin / np.finfo(dtype).eps)
check_nan = (np.isnan(actual) & ~np.isnan(desired) |
np.isnan(desired) & ~np.isnan(actual))
if np.any(check_nan):
msg += ', check nan'
if np.any(~check_nan & (np.isinf(actual) | np.isinf(desired)) &
(actual != desired)):
msg += ', check infinite'
def trepr(x):
r = repr(x).split('\n')
if len(r) > 3:
r = [r[0], r[1], r[2] + ' ...']
return '\n'.join(r)
raise AssertionError(msg + ")\n x: {1}\n y: {2}".format(
1 - np.mean(same), trepr(actual), trepr(desired)))
def assert_eq(a, b, msg=''):
""" Assert that the two arguments are equal. """
if a is b:
return
if not msg:
msg = 'Items are not equal:\n ACTUAL: {0}\n DESIRED: {1}'.format(a, b)
# a or b is an ndarray sub-class
if isinstance(a, np.ndarray) and type(a) not in (np.matrix, np.ndarray) or\
isinstance(b, np.ndarray) and type(b) not in (np.matrix, np.ndarray):
assert_is(type(a), type(b))
assert_allclose(a.view(np.ndarray), b.view(np.ndarray), err_msg=msg)
assert_eq(a.__dict__, b.__dict__, msg)
return
# a and b are ndarray or one of them is an ndarray and the other is a seq.
num_types = (bool, int, float, complex, np.ndarray, np.number)
if isinstance(a, num_types) and isinstance(b, num_types) or \
isinstance(a, np.ndarray) and isinstance(b, (list, tuple)) or \
isinstance(b, np.ndarray) and isinstance(a, (list, tuple)):
assert_allclose(a, b, err_msg=msg)
return
if isinstance(a, np.ndarray) or isinstance(b, np.ndarray):
raise AssertionError(msg)
if isinstance(a, Mapping) and isinstance(b, Mapping):
assert_equal(set(a.keys()), set(b.keys()), err_msg=msg)
for k in a:
assert_eq(a[k], b[k], msg)
return
if isinstance(a, Container) and not isinstance(a, (set, str)) and \
isinstance(b, Container) and not isinstance(b, (set, str)):
assert_equal(len(a), len(b), msg)
for a_, b_ in zip(a, b):
assert_eq(a_, b_, msg)
return
try:
equal = a == b
except:
equal = False
assert equal, msg
def assert_in(a, b, msg=None):
""" Assert that the first argument is in the second one. """
if a in b:
return
assert False, str(a) + ' is not in ' + str(b) + _get_msg(msg)
def assert_not_in(a, b, msg=None):
""" Assert that the first argument is not in second one. """
if a not in b:
return
assert False, str(a) + ' is in ' + str(b) + _get_msg(msg)
def assert_is(a, b, msg=None):
""" Assert arguments are equal as determined by the 'is' operator. """
if a is b:
return
assert False, str(a) + ' is not ' + str(b) + _get_msg(msg)
def assert_is_not(a, b, msg=None):
""" Assert arguments are not equal as determined by the 'is' operator. """
if a is not b:
return
assert False, str(a) + ' is ' + str(b) + _get_msg(msg)
def assert_is_instance(a, cls, msg=None):
""" Assert that the first argument is an instance of the second one. """
if isinstance(a, cls):
return
assert False, str(a) + " is not a '" + cls.__name__ + "' instance" + \
_get_msg(msg)
def assert_is_not_instance(a, cls, msg=None):
"""
Assert that the first argument is not an instance of the second one.
"""
if not isinstance(a, cls):
return
assert False, str(a) + " is a '" + cls.__name__ + "' instance" + \
_get_msg(msg)
def assert_is_none(a, msg=None):
""" Assert argument is None. """
if a is None:
return
assert False, str(a) + ' is not None' + _get_msg(msg)
def assert_is_not_none(a, msg=None):
""" Assert argument is not None. """
if a is not None:
return
assert False, str(a) + ' is None' + _get_msg(msg)
def assert_is_type(a, cls, msg=None):
""" Assert argument is of a specified type. """
if type(cls) is type:
cls = (cls,)
else:
cls = tuple(cls)
if any(type(a) is t for t in cls):
return
raise AssertionError(
"{0} is of type '{1}' instead of {2}{3}".format(
a, type(a).__name__, strenum(c.__name__ for c in cls), _get_msg(msg)))
def assert_raises(*args, **kwargs):
np.testing.assert_raises(*args, **kwargs)
assert_raises.__doc__ = np.testing.assert_raises.__doc__
def skiptest(func):
from nose.plugins.skip import SkipTest
@functools.wraps(func)
def _():
raise SkipTest()
return _
def skiptest_if(condition):
from nose.plugins.skip import SkipTest
def decorator(func):
@functools.wraps(func)
def _():
if condition:
raise SkipTest()
func()
return _
return decorator
def skiptest_unless_module(module):
from nose.plugins.skip import SkipTest
def decorator(func):
@functools.wraps(func)
def _():
try:
__import__(module)
except ImportError:
raise SkipTest()
func()
return _
return decorator
def _get_msg(msg):
if not msg:
return '.'
return ': ' + str(msg) + '.'
| [
"nose.plugins.skip.SkipTest",
"numpy.testing.assert_raises",
"numpy.asarray",
"numpy.isfinite",
"numpy.isnan",
"numpy.isinf",
"numpy.any",
"numpy.finfo",
"numpy.mean",
"functools.wraps",
"numpy.testing.assert_equal",
"numpy.testing.assert_allclose",
"numpy.all"
] | [((1114, 1132), 'numpy.asarray', 'np.asarray', (['actual'], {}), '(actual)\n', (1124, 1132), True, 'import numpy as np\n'), ((1147, 1166), 'numpy.asarray', 'np.asarray', (['desired'], {}), '(desired)\n', (1157, 1166), True, 'import numpy as np\n'), ((7249, 7290), 'numpy.testing.assert_raises', 'np.testing.assert_raises', (['*args'], {}), '(*args, **kwargs)\n', (7273, 7290), True, 'import numpy as np\n'), ((7418, 7439), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (7433, 7439), False, 'import functools\n'), ((2356, 2368), 'numpy.all', 'np.all', (['same'], {}), '(same)\n', (2362, 2368), True, 'import numpy as np\n'), ((3009, 3026), 'numpy.any', 'np.any', (['check_nan'], {}), '(check_nan)\n', (3015, 3026), True, 'import numpy as np\n'), ((4444, 4478), 'numpy.testing.assert_allclose', 'assert_allclose', (['a', 'b'], {'err_msg': 'msg'}), '(a, b, err_msg=msg)\n', (4459, 4478), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((7467, 7477), 'nose.plugins.skip.SkipTest', 'SkipTest', ([], {}), '()\n', (7475, 7477), False, 'from nose.plugins.skip import SkipTest\n'), ((7598, 7619), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (7613, 7619), False, 'import functools\n'), ((7868, 7889), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (7883, 7889), False, 'import functools\n'), ((1697, 1726), 'numpy.testing.assert_equal', 'assert_equal', (['actual', 'desired'], {}), '(actual, desired)\n', (1709, 1726), False, 'from numpy.testing import assert_equal, assert_allclose\n'), ((1760, 1785), 'numpy.all', 'np.all', (['(actual == desired)'], {}), '(actual == desired)\n', (1766, 1785), True, 'import numpy as np\n'), ((2037, 2052), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (2045, 2052), True, 'import numpy as np\n'), ((2075, 2090), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (2083, 2090), True, 'import numpy as np\n'), ((2454, 2467), 'numpy.mean', 'np.mean', (['same'], {}), '(same)\n', (2461, 2467), True, 'import numpy as np\n'), ((2518, 2538), 'numpy.isfinite', 'np.isfinite', (['desired'], {}), '(desired)\n', (2529, 2538), True, 'import numpy as np\n'), ((2898, 2914), 'numpy.isnan', 'np.isnan', (['actual'], {}), '(actual)\n', (2906, 2914), True, 'import numpy as np\n'), ((2959, 2976), 'numpy.isnan', 'np.isnan', (['desired'], {}), '(desired)\n', (2967, 2976), True, 'import numpy as np\n'), ((7685, 7695), 'nose.plugins.skip.SkipTest', 'SkipTest', ([], {}), '()\n', (7693, 7695), False, 'from nose.plugins.skip import SkipTest\n'), ((2269, 2285), 'numpy.isnan', 'np.isnan', (['actual'], {}), '(actual)\n', (2277, 2285), True, 'import numpy as np\n'), ((2288, 2305), 'numpy.isnan', 'np.isnan', (['desired'], {}), '(desired)\n', (2296, 2305), True, 'import numpy as np\n'), ((2496, 2515), 'numpy.isfinite', 'np.isfinite', (['actual'], {}), '(actual)\n', (2507, 2515), True, 'import numpy as np\n'), ((2918, 2935), 'numpy.isnan', 'np.isnan', (['desired'], {}), '(desired)\n', (2926, 2935), True, 'import numpy as np\n'), ((2980, 2996), 'numpy.isnan', 'np.isnan', (['actual'], {}), '(actual)\n', (2988, 2996), True, 'import numpy as np\n'), ((8013, 8023), 'nose.plugins.skip.SkipTest', 'SkipTest', ([], {}), '()\n', (8021, 8023), False, 'from nose.plugins.skip import SkipTest\n'), ((2809, 2824), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (2817, 2824), True, 'import numpy as np\n'), ((2856, 2871), 'numpy.finfo', 'np.finfo', (['dtype'], {}), '(dtype)\n', (2864, 2871), True, 'import numpy as np\n'), ((3093, 3109), 'numpy.isinf', 'np.isinf', (['actual'], {}), '(actual)\n', (3101, 3109), True, 'import numpy as np\n'), ((3112, 3129), 'numpy.isinf', 'np.isinf', (['desired'], {}), '(desired)\n', (3120, 3129), True, 'import numpy as np\n'), ((3458, 3471), 'numpy.mean', 'np.mean', (['same'], {}), '(same)\n', (3465, 3471), True, 'import numpy as np\n')] |
"""
Tests thte BLAS capability for the opt_einsum module.
"""
import numpy as np
import pytest
from opt_einsum import blas, helpers, contract
blas_tests = [
# DOT
((['k', 'k'], '', set('k')), 'DOT'), # DDOT
((['ijk', 'ijk'], '', set('ijk')), 'DOT'), # DDOT
# GEMV?
# GEMM
((['ij', 'jk'], 'ik', set('j')), 'GEMM'), # GEMM N N
((['ijl', 'jlk'], 'ik', set('jl')), 'GEMM'), # GEMM N N Tensor
((['ij', 'kj'], 'ik', set('j')), 'GEMM'), # GEMM N T
((['ijl', 'kjl'], 'ik', set('jl')), 'GEMM'), # GEMM N T Tensor
((['ji', 'jk'], 'ik', set('j')), 'GEMM'), # GEMM T N
((['jli', 'jlk'], 'ik', set('jl')), 'GEMM'), # GEMM T N Tensor
((['ji', 'kj'], 'ik', set('j')), 'GEMM'), # GEMM T T
((['jli', 'kjl'], 'ik', set('jl')), 'GEMM'), # GEMM T T Tensor
# GEMM with final transpose
((['ij', 'jk'], 'ki', set('j')), 'GEMM'), # GEMM N N
((['ijl', 'jlk'], 'ki', set('jl')), 'GEMM'), # GEMM N N Tensor
((['ij', 'kj'], 'ki', set('j')), 'GEMM'), # GEMM N T
((['ijl', 'kjl'], 'ki', set('jl')), 'GEMM'), # GEMM N T Tensor
((['ji', 'jk'], 'ki', set('j')), 'GEMM'), # GEMM T N
((['jli', 'jlk'], 'ki', set('jl')), 'GEMM'), # GEMM T N Tensor
((['ji', 'kj'], 'ki', set('j')), 'GEMM'), # GEMM T T
((['jli', 'kjl'], 'ki', set('jl')), 'GEMM'), # GEMM T T Tensor
# Tensor Dot (requires copy), lets not deal with this for now
((['ilj', 'jlk'], 'ik', set('jl')), 'TDOT'), # FT GEMM N N Tensor
((['ijl', 'ljk'], 'ik', set('jl')), 'TDOT'), # ST GEMM N N Tensor
((['ilj', 'kjl'], 'ik', set('jl')), 'TDOT'), # FT GEMM N T Tensor
((['ijl', 'klj'], 'ik', set('jl')), 'TDOT'), # ST GEMM N T Tensor
((['lji', 'jlk'], 'ik', set('jl')), 'TDOT'), # FT GEMM T N Tensor
((['jli', 'ljk'], 'ik', set('jl')), 'TDOT'), # ST GEMM T N Tensor
((['lji', 'jlk'], 'ik', set('jl')), 'TDOT'), # FT GEMM T N Tensor
((['jli', 'ljk'], 'ik', set('jl')), 'TDOT'), # ST GEMM T N Tensor
# Tensor Dot (requires copy), lets not deal with this for now with transpose
((['ilj', 'jlk'], 'ik', set('lj')), 'TDOT'), # FT GEMM N N Tensor
((['ijl', 'ljk'], 'ik', set('lj')), 'TDOT'), # ST GEMM N N Tensor
((['ilj', 'kjl'], 'ik', set('lj')), 'TDOT'), # FT GEMM N T Tensor
((['ijl', 'klj'], 'ik', set('lj')), 'TDOT'), # ST GEMM N T Tensor
((['lji', 'jlk'], 'ik', set('lj')), 'TDOT'), # FT GEMM T N Tensor
((['jli', 'ljk'], 'ik', set('lj')), 'TDOT'), # ST GEMM T N Tensor
((['lji', 'jlk'], 'ik', set('lj')), 'TDOT'), # FT GEMM T N Tensor
((['jli', 'ljk'], 'ik', set('lj')), 'TDOT'), # ST GEMM T N Tensor
# Other
((['ijk', 'ikj'], '', set('ijk')), 'DOT/EINSUM' ), # Transpose DOT
((['i', 'j'], 'ij', set()), 'OUTER/EINSUM'), # Outer
((['ijk', 'ik'], 'j', set('ik')), 'GEMV/EINSUM' ), # Matrix-vector
((['ijj', 'jk'], 'ik', set('j')), False ), # Double index
((['ijk', 'j'], 'ij', set()), False ), # Index sum 1
((['ij', 'ij'], 'ij', set()), False ), # Index sum 2
]
@pytest.mark.parametrize("inp,benchmark", blas_tests)
def test_can_blas(inp, benchmark):
result = blas.can_blas(*inp)
assert result == benchmark
@pytest.mark.parametrize("inp,benchmark", blas_tests)
def test_tensor_blas(inp, benchmark):
# Weed out non-blas cases
if benchmark is False:
return
tensor_strs, output, reduced_idx = inp
einsum_str = ','.join(tensor_strs) + '->' + output
# Only binary operations should be here
if len(tensor_strs) != 2:
assert False
view_left, view_right = helpers.build_views(einsum_str)
einsum_result = np.einsum(einsum_str, view_left, view_right)
blas_result = blas.tensor_blas(view_left, tensor_strs[0], view_right, tensor_strs[1], output, reduced_idx)
assert np.allclose(einsum_result, blas_result)
def test_blas_out():
a = np.random.rand(4, 4)
b = np.random.rand(4, 4)
c = np.random.rand(4, 4)
d = np.empty((4, 4))
contract('ij,jk->ik', a, b, out=d)
assert np.allclose(d, np.dot(a, b))
contract('ij,jk,kl->il', a, b, c, out=d)
assert np.allclose(d, np.dot(a, b).dot(c))
| [
"numpy.dot",
"opt_einsum.blas.tensor_blas",
"opt_einsum.helpers.build_views",
"numpy.empty",
"numpy.allclose",
"numpy.einsum",
"opt_einsum.contract",
"opt_einsum.blas.can_blas",
"numpy.random.rand",
"pytest.mark.parametrize"
] | [((3214, 3266), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inp,benchmark"""', 'blas_tests'], {}), "('inp,benchmark', blas_tests)\n", (3237, 3266), False, 'import pytest\n'), ((3369, 3421), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inp,benchmark"""', 'blas_tests'], {}), "('inp,benchmark', blas_tests)\n", (3392, 3421), False, 'import pytest\n'), ((3315, 3334), 'opt_einsum.blas.can_blas', 'blas.can_blas', (['*inp'], {}), '(*inp)\n', (3328, 3334), False, 'from opt_einsum import blas, helpers, contract\n'), ((3757, 3788), 'opt_einsum.helpers.build_views', 'helpers.build_views', (['einsum_str'], {}), '(einsum_str)\n', (3776, 3788), False, 'from opt_einsum import blas, helpers, contract\n'), ((3810, 3854), 'numpy.einsum', 'np.einsum', (['einsum_str', 'view_left', 'view_right'], {}), '(einsum_str, view_left, view_right)\n', (3819, 3854), True, 'import numpy as np\n'), ((3873, 3969), 'opt_einsum.blas.tensor_blas', 'blas.tensor_blas', (['view_left', 'tensor_strs[0]', 'view_right', 'tensor_strs[1]', 'output', 'reduced_idx'], {}), '(view_left, tensor_strs[0], view_right, tensor_strs[1],\n output, reduced_idx)\n', (3889, 3969), False, 'from opt_einsum import blas, helpers, contract\n'), ((3978, 4017), 'numpy.allclose', 'np.allclose', (['einsum_result', 'blas_result'], {}), '(einsum_result, blas_result)\n', (3989, 4017), True, 'import numpy as np\n'), ((4049, 4069), 'numpy.random.rand', 'np.random.rand', (['(4)', '(4)'], {}), '(4, 4)\n', (4063, 4069), True, 'import numpy as np\n'), ((4078, 4098), 'numpy.random.rand', 'np.random.rand', (['(4)', '(4)'], {}), '(4, 4)\n', (4092, 4098), True, 'import numpy as np\n'), ((4107, 4127), 'numpy.random.rand', 'np.random.rand', (['(4)', '(4)'], {}), '(4, 4)\n', (4121, 4127), True, 'import numpy as np\n'), ((4136, 4152), 'numpy.empty', 'np.empty', (['(4, 4)'], {}), '((4, 4))\n', (4144, 4152), True, 'import numpy as np\n'), ((4158, 4192), 'opt_einsum.contract', 'contract', (['"""ij,jk->ik"""', 'a', 'b'], {'out': 'd'}), "('ij,jk->ik', a, b, out=d)\n", (4166, 4192), False, 'from opt_einsum import blas, helpers, contract\n'), ((4238, 4278), 'opt_einsum.contract', 'contract', (['"""ij,jk,kl->il"""', 'a', 'b', 'c'], {'out': 'd'}), "('ij,jk,kl->il', a, b, c, out=d)\n", (4246, 4278), False, 'from opt_einsum import blas, helpers, contract\n'), ((4219, 4231), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (4225, 4231), True, 'import numpy as np\n'), ((4305, 4317), 'numpy.dot', 'np.dot', (['a', 'b'], {}), '(a, b)\n', (4311, 4317), True, 'import numpy as np\n')] |
# multiple classification with Logistic Regression
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.optimize as opt
def importCSV(dir, columns):
data = pd.read_csv(dir, header=None, names=columns)
data.insert(0, 'Ones', 1)
return data
def plot_image(data):
fig, ax = plt.subplots(figsize=(12, 8))
ax.scatter(data['District'], data['Time'], s=50, c='b', marker='o', label='Cause1')
ax.scatter(data['District'], data['Time'], s=50, c='b', marker='x', label='Cause2')
ax.scatter(data['District'], data['Time'], s=50, c='r', marker='o', label='Cause3')
ax.scatter(data['District'], data['Time'], s=50, c='r', marker='x', label='Cause4')
ax.legend()
ax.set_xlabel('District')
ax.set_ylabel('Time')
plt.show()
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def cost(theta, X, y, l):
theta = np.mat(theta)
X = np.mat(X)
y = np.mat(y)
first_term = np.multiply(-y, np.log(sigmoid(X * theta.T)))
second_term = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
reg_term = (l / (2 * len(X))) * np.sum(np.power(theta[:,1:theta.shape[1]], 2))
return np.sum(first_term - second_term) / len(X) + reg_term
def gradient(theta, X, y, l):
# matrix
theta = np.mat(theta)
X = np.mat(X)
y = np.mat(y)
times = int(theta.ravel().shape[1])
grad = np.zeros(times)
error = sigmoid(X * theta.T) - y
for i in range(times):
tmp = np.multiply(error, X[:,i])
if i==0:
grad[i] = np.sum(tmp)/len(X)
else:
grad[i] = np.sum(tmp)/len(X) + l/len(X) * theta[:,1]
return grad
def logistic_regression(X, y, num_labels, l):
times = X.shape[1]
all_theta = np.zeros((num_labels, times))
for i in range(1, num_labels+1):
theta = np.zeros(times)
y_i = np.array([1 if label == i else 0 for label in y])
y_i = y_i.reshape(X.shape[0],1)
# minimize
fmin = opt.minimize(fun=cost, x0=theta, args=(X,y_i,l), method='TNC', jac=gradient)
all_theta[i-1,:] = fmin.x
return all_theta
def predict(X, all_theta):
X = np.mat(X)
all_theta = np.mat(all_theta)
h = sigmoid(X * all_theta.T)
# create array of the index with the maximum probability
h_argmax = np.argmax(h, axis=1)
# because our array was zero-indexed we need to add one for the true label prediction
h_argmax = h_argmax + 1
#print(h_argmax)
return h_argmax
if __name__ == '__main__':
# initialization
columns = ['District', 'Time', 'Cause']
data = importCSV('featureDataSet.csv', columns)
row_num = data.shape[0]
col_num = data.shape[1]
flag_num = 4
x_mat = np.array(data.iloc[:, 0:col_num-1])
y_mat = np.array(data.iloc[:, col_num-1:col_num])
# draw
plot_image(data)
# analysis
all_theta = logistic_regression(x_mat,y_mat,flag_num,1)
#print(all_theta)
y_pred = predict(x_mat,all_theta)
correct = [1 if a == b else 0 for (a, b) in zip(y_pred, y_mat)]
accuracy = sum(map(int, correct)) / float(len(correct))
#print(correct)
print('accuracy = {0}%'.format(accuracy * 100))
| [
"scipy.optimize.minimize",
"matplotlib.pyplot.show",
"numpy.multiply",
"numpy.sum",
"numpy.argmax",
"pandas.read_csv",
"numpy.power",
"numpy.zeros",
"numpy.array",
"numpy.mat",
"numpy.exp",
"matplotlib.pyplot.subplots"
] | [((195, 239), 'pandas.read_csv', 'pd.read_csv', (['dir'], {'header': 'None', 'names': 'columns'}), '(dir, header=None, names=columns)\n', (206, 239), True, 'import pandas as pd\n'), ((325, 354), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (337, 354), True, 'import matplotlib.pyplot as plt\n'), ((783, 793), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (791, 793), True, 'import matplotlib.pyplot as plt\n'), ((884, 897), 'numpy.mat', 'np.mat', (['theta'], {}), '(theta)\n', (890, 897), True, 'import numpy as np\n'), ((906, 915), 'numpy.mat', 'np.mat', (['X'], {}), '(X)\n', (912, 915), True, 'import numpy as np\n'), ((924, 933), 'numpy.mat', 'np.mat', (['y'], {}), '(y)\n', (930, 933), True, 'import numpy as np\n'), ((1276, 1289), 'numpy.mat', 'np.mat', (['theta'], {}), '(theta)\n', (1282, 1289), True, 'import numpy as np\n'), ((1298, 1307), 'numpy.mat', 'np.mat', (['X'], {}), '(X)\n', (1304, 1307), True, 'import numpy as np\n'), ((1316, 1325), 'numpy.mat', 'np.mat', (['y'], {}), '(y)\n', (1322, 1325), True, 'import numpy as np\n'), ((1378, 1393), 'numpy.zeros', 'np.zeros', (['times'], {}), '(times)\n', (1386, 1393), True, 'import numpy as np\n'), ((1742, 1771), 'numpy.zeros', 'np.zeros', (['(num_labels, times)'], {}), '((num_labels, times))\n', (1750, 1771), True, 'import numpy as np\n'), ((2151, 2160), 'numpy.mat', 'np.mat', (['X'], {}), '(X)\n', (2157, 2160), True, 'import numpy as np\n'), ((2177, 2194), 'numpy.mat', 'np.mat', (['all_theta'], {}), '(all_theta)\n', (2183, 2194), True, 'import numpy as np\n'), ((2305, 2325), 'numpy.argmax', 'np.argmax', (['h'], {'axis': '(1)'}), '(h, axis=1)\n', (2314, 2325), True, 'import numpy as np\n'), ((2719, 2756), 'numpy.array', 'np.array', (['data.iloc[:, 0:col_num - 1]'], {}), '(data.iloc[:, 0:col_num - 1])\n', (2727, 2756), True, 'import numpy as np\n'), ((2767, 2810), 'numpy.array', 'np.array', (['data.iloc[:, col_num - 1:col_num]'], {}), '(data.iloc[:, col_num - 1:col_num])\n', (2775, 2810), True, 'import numpy as np\n'), ((1473, 1500), 'numpy.multiply', 'np.multiply', (['error', 'X[:, i]'], {}), '(error, X[:, i])\n', (1484, 1500), True, 'import numpy as np\n'), ((1826, 1841), 'numpy.zeros', 'np.zeros', (['times'], {}), '(times)\n', (1834, 1841), True, 'import numpy as np\n'), ((1856, 1907), 'numpy.array', 'np.array', (['[(1 if label == i else 0) for label in y]'], {}), '([(1 if label == i else 0) for label in y])\n', (1864, 1907), True, 'import numpy as np\n'), ((1981, 2059), 'scipy.optimize.minimize', 'opt.minimize', ([], {'fun': 'cost', 'x0': 'theta', 'args': '(X, y_i, l)', 'method': '"""TNC"""', 'jac': 'gradient'}), "(fun=cost, x0=theta, args=(X, y_i, l), method='TNC', jac=gradient)\n", (1993, 2059), True, 'import scipy.optimize as opt\n'), ((832, 842), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (838, 842), True, 'import numpy as np\n'), ((1114, 1153), 'numpy.power', 'np.power', (['theta[:, 1:theta.shape[1]]', '(2)'], {}), '(theta[:, 1:theta.shape[1]], 2)\n', (1122, 1153), True, 'import numpy as np\n'), ((1165, 1197), 'numpy.sum', 'np.sum', (['(first_term - second_term)'], {}), '(first_term - second_term)\n', (1171, 1197), True, 'import numpy as np\n'), ((1540, 1551), 'numpy.sum', 'np.sum', (['tmp'], {}), '(tmp)\n', (1546, 1551), True, 'import numpy as np\n'), ((1595, 1606), 'numpy.sum', 'np.sum', (['tmp'], {}), '(tmp)\n', (1601, 1606), True, 'import numpy as np\n')] |
# Copyright (c) <NAME>, <NAME>
# All rights reserved
# Modified by <NAME>, April 5, 2021 for use in his thesis; lord help him.
import numpy as np
import matplotlib.pylab as plt
import pickle
import os
from scipy.optimize import minimize, dual_annealing
import subprocess
import random
def kernel_optim_lbfgs_log(input_data,
target_data,
cost='correlation',
loss='exp_sum',
init_sig_mean=10.0,
init_sig_var=0.5,
dropout=0.0,
num_loops=10000,
method='L-BFGS-B',
log_foldername='./',
resume=None,
logging=False,
verbose=True,
allow_resume=True,
test_data=None):
if (verbose):
print(
"* training gaussian kernels with cost '{}' and method '{}'".format(
cost, method))
num_model_features, num_stimuli = input_data.shape[0], input_data.shape[1]
no_samples = num_stimuli * (num_stimuli - 1) / 2
if resume != None:
init_seed = resume['init_seed']
init_sigmas = resume['sigmas']
gradients = resume['gradients']
correlations = resume['correlations']
retrieved_loop = resume['retrieved_loop']
else:
init_sigmas = np.abs(
init_sig_mean + init_sig_var * np.random.randn(num_model_features, 1))
init_seed = init_sigmas
gradients = np.zeros((num_model_features, 1))
correlations = [] # = np.zeros((num_loops, ))
retrieved_loop = 0
num_model_features, num_stimuli = input_data.shape[0], input_data.shape[1]
no_samples = num_stimuli * (num_stimuli - 1) / 2
idx_triu = np.triu_indices(target_data.shape[0], k=1)
target_values = target_data[idx_triu]
mean_target = np.mean(target_values)
std_target = np.std(target_values)
testing_correlations = []
optimization_options = {'disp': None, 'maxls': 50, 'iprint': -1,
'gtol': 1e-36, 'eps': 1e-8, 'maxiter': num_loops,
'ftol': 1e-36}
# Set bounds to 1, 1e15 for all model features.
optimization_bounds = [
(1.0 * i, 1e15 * i) for i in np.ones((num_model_features,))
]
if logging:
pickle.dump({
'seed': init_seed,
'cost': cost,
'loss': loss,
'method': method,
'init_sig_mean': init_sig_mean,
'init_sig_var': init_sig_var,
'num_loops': num_loops,
'log_foldername': log_foldername,
'optimization_options': {'options': optimization_options, 'bounds': optimization_bounds}
}, open(os.path.join(log_foldername, 'optim_config.pkl'), 'wb'))
def corr(x):
kernel = np.zeros((num_stimuli, num_stimuli))
# Make an index value for every entry in all the input data (all stims).
idx = [i for i in range(len(input_data))]
# Clip to the within the boundary values.
x = np.clip(x, a_min=1.0, a_max=1e15)
# If using dropout, randomly truncate index list.
if dropout > 0.0:
random.shuffle(idx)
idx = idx[:int((1.0 - dropout) * len(idx))]
for i in range(num_stimuli):
for j in range(i + 1, num_stimuli):
# Calculate the distance value, where x is sigma being trained.
kernel[i, j] = -np.sum(
np.power(
np.divide(input_data[idx, i] - input_data[idx, j],
(x[idx] + np.finfo(float).eps)), 2))
kernel_v = kernel[idx_triu]
mean_kernel = np.mean(kernel_v)
std_kernel = np.std(kernel_v)
Jn = np.sum(np.multiply(kernel_v - mean_kernel, target_values - mean_target))
Jd = no_samples * std_target * std_kernel
return Jn / Jd
def grad_corr(sigmas):
idx = [i for i in range(len(input_data))]
if dropout > 0.0:
random.shuffle(idx)
idx = idx[:int((1.0 - dropout) * len(idx))]
ndims = len(idx)
kernel = np.zeros((num_stimuli, num_stimuli))
dkernel = np.zeros((num_stimuli, num_stimuli, ndims))
for i in range(num_stimuli):
for j in range(i + 1, num_stimuli):
kernel[i, j] = -np.sum(
np.power(
np.divide(input_data[idx, i] - input_data[idx, j],
(sigmas[idx] + np.finfo(float).eps)), 2))
dkernel[i, j, :] = 2 * np.power(
(input_data[idx, i] - input_data[idx, j]), 2) / (
np.power(sigmas[idx], 3) + np.finfo(
float).eps)
kernel_v = kernel[idx_triu]
mean_kernel = np.mean(kernel_v)
std_kernel = np.std(kernel_v)
Jn = np.sum(np.multiply(kernel_v - mean_kernel, target_values - mean_target))
Jd = no_samples * std_target * std_kernel
for k in range(ndims):
tmp = dkernel[:, :, k][idx_triu]
dJn = np.sum(tmp * (target_values - mean_target))
dJd = std_target / (std_kernel + np.finfo(float).eps) * np.sum(
tmp * (kernel_v - mean_kernel))
gradients[k] = (Jd * dJn - Jn * dJd) / (
np.power(Jd, 2) + np.finfo(float).eps)
return gradients
def print_corr(xk):
kernel = np.zeros((num_stimuli, num_stimuli))
for i in range(num_stimuli):
for j in range(i + 1, num_stimuli):
kernel[i, j] = np.exp(-np.sum(
np.power(
np.divide(input_data[:, i] - input_data[:, j],
(xk + np.finfo(float).eps)), 2)))
kernel_v = kernel[idx_triu]
mean_kernel = np.mean(kernel_v)
std_kernel = np.std(kernel_v)
Jn = np.sum(np.multiply(kernel_v - mean_kernel, target_values - mean_target))
Jd = no_samples * std_target * std_kernel
if not os.path.isfile(os.path.join(log_foldername, 'tmp.pkl')):
loop_cpt = 1
pickle.dump({'loop': loop_cpt, 'correlation': [Jn / Jd]},
open(os.path.join(log_foldername, 'tmp.pkl'), 'wb'))
correlations = [Jn / Jd]
pickle.dump({
'sigmas': xk,
'kernel': kernel,
'Jn': Jn,
'Jd': Jd,
'correlations': correlations,
}, open(os.path.join(log_foldername,
'optim_process_l={}.pkl'.format(loop_cpt)),
'wb'))
else:
last_loop = pickle.load(
open(os.path.join(log_foldername, 'tmp.pkl'), 'rb'))
loop_cpt = last_loop['loop'] + 1
correlations = last_loop['correlation']
correlations.append(Jn / Jd)
if test_data != None:
# testing data
test_input = test_data[0]
test_target = test_data[1]
mean_target_test = np.mean(test_target)
std_target_test = np.std(test_target)
distances = np.zeros((input_data.shape[1], 1))
for i in range(len(distances)):
distances[i, 0] = -np.sum(np.power(
np.divide(test_input - input_data[:, i],
(xk + np.finfo(float).eps)), 2))
mean_distances = np.mean(distances)
stddev_distances = np.std(distances)
Jn_ = np.sum(np.multiply(distances - mean_distances,
test_target - mean_target_test))
Jd_ = std_target_test * stddev_distances * (num_stimuli - 1)
testing_correlations.append(Jn_ / Jd_)
else:
testing_correlations.append(0.0)
monitoring_step = 25
if (loop_cpt % monitoring_step == 0):
print(' |_ loop={} J={:.6f} {:.6f}'.format(loop_cpt, Jn / Jd,
testing_correlations[
-1]))
pickle.dump({
'sigmas': xk,
'kernel': kernel,
'Jn': Jn,
'Jd': Jd,
'correlations': correlations
}, open(os.path.join(log_foldername,
'optim_process_l={}.pkl'.format(loop_cpt)),
'wb'))
# plt.figure(figsize=(10,10))
# plt.subplot(1,2,1)
# plt.plot(xk)
# plt.subplot(1,2,2)
# plt.plot(correlations)
# plt.plot(testing_correlations)
# plt.savefig('log_sig_corr_lbfgs.pdf')
pickle.dump(
{'loop': loop_cpt, 'correlation': correlations, 'sigmas': xk},
open(os.path.join(log_foldername, 'tmp.pkl'), 'wb'))
res = minimize(corr, init_sigmas, args=(), method=method, jac=grad_corr,
callback=print_corr, options=optimization_options,
bounds=optimization_bounds)
last_loop = pickle.load(open(os.path.join(log_foldername, 'tmp.pkl'), 'rb'))
sigmas_ = last_loop['sigmas']
correlations = last_loop['correlation']
subprocess.run(["rm", os.path.join(log_foldername, 'tmp.pkl')])
return correlations, sigmas_ | [
"scipy.optimize.minimize",
"numpy.multiply",
"numpy.sum",
"numpy.random.randn",
"numpy.std",
"random.shuffle",
"numpy.power",
"numpy.zeros",
"numpy.triu_indices",
"numpy.clip",
"numpy.ones",
"numpy.finfo",
"numpy.mean",
"os.path.join"
] | [((1897, 1939), 'numpy.triu_indices', 'np.triu_indices', (['target_data.shape[0]'], {'k': '(1)'}), '(target_data.shape[0], k=1)\n', (1912, 1939), True, 'import numpy as np\n'), ((2000, 2022), 'numpy.mean', 'np.mean', (['target_values'], {}), '(target_values)\n', (2007, 2022), True, 'import numpy as np\n'), ((2040, 2061), 'numpy.std', 'np.std', (['target_values'], {}), '(target_values)\n', (2046, 2061), True, 'import numpy as np\n'), ((9300, 9450), 'scipy.optimize.minimize', 'minimize', (['corr', 'init_sigmas'], {'args': '()', 'method': 'method', 'jac': 'grad_corr', 'callback': 'print_corr', 'options': 'optimization_options', 'bounds': 'optimization_bounds'}), '(corr, init_sigmas, args=(), method=method, jac=grad_corr, callback\n =print_corr, options=optimization_options, bounds=optimization_bounds)\n', (9308, 9450), False, 'from scipy.optimize import minimize, dual_annealing\n'), ((1633, 1666), 'numpy.zeros', 'np.zeros', (['(num_model_features, 1)'], {}), '((num_model_features, 1))\n', (1641, 1666), True, 'import numpy as np\n'), ((2968, 3004), 'numpy.zeros', 'np.zeros', (['(num_stimuli, num_stimuli)'], {}), '((num_stimuli, num_stimuli))\n', (2976, 3004), True, 'import numpy as np\n'), ((3200, 3247), 'numpy.clip', 'np.clip', (['x'], {'a_min': '(1.0)', 'a_max': '(1000000000000000.0)'}), '(x, a_min=1.0, a_max=1000000000000000.0)\n', (3207, 3247), True, 'import numpy as np\n'), ((3848, 3865), 'numpy.mean', 'np.mean', (['kernel_v'], {}), '(kernel_v)\n', (3855, 3865), True, 'import numpy as np\n'), ((3887, 3903), 'numpy.std', 'np.std', (['kernel_v'], {}), '(kernel_v)\n', (3893, 3903), True, 'import numpy as np\n'), ((4297, 4333), 'numpy.zeros', 'np.zeros', (['(num_stimuli, num_stimuli)'], {}), '((num_stimuli, num_stimuli))\n', (4305, 4333), True, 'import numpy as np\n'), ((4352, 4395), 'numpy.zeros', 'np.zeros', (['(num_stimuli, num_stimuli, ndims)'], {}), '((num_stimuli, num_stimuli, ndims))\n', (4360, 4395), True, 'import numpy as np\n'), ((5014, 5031), 'numpy.mean', 'np.mean', (['kernel_v'], {}), '(kernel_v)\n', (5021, 5031), True, 'import numpy as np\n'), ((5053, 5069), 'numpy.std', 'np.std', (['kernel_v'], {}), '(kernel_v)\n', (5059, 5069), True, 'import numpy as np\n'), ((5652, 5688), 'numpy.zeros', 'np.zeros', (['(num_stimuli, num_stimuli)'], {}), '((num_stimuli, num_stimuli))\n', (5660, 5688), True, 'import numpy as np\n'), ((6048, 6065), 'numpy.mean', 'np.mean', (['kernel_v'], {}), '(kernel_v)\n', (6055, 6065), True, 'import numpy as np\n'), ((6087, 6103), 'numpy.std', 'np.std', (['kernel_v'], {}), '(kernel_v)\n', (6093, 6103), True, 'import numpy as np\n'), ((2402, 2432), 'numpy.ones', 'np.ones', (['(num_model_features,)'], {}), '((num_model_features,))\n', (2409, 2432), True, 'import numpy as np\n'), ((3331, 3350), 'random.shuffle', 'random.shuffle', (['idx'], {}), '(idx)\n', (3345, 3350), False, 'import random\n'), ((3924, 3988), 'numpy.multiply', 'np.multiply', (['(kernel_v - mean_kernel)', '(target_values - mean_target)'], {}), '(kernel_v - mean_kernel, target_values - mean_target)\n', (3935, 3988), True, 'import numpy as np\n'), ((4179, 4198), 'random.shuffle', 'random.shuffle', (['idx'], {}), '(idx)\n', (4193, 4198), False, 'import random\n'), ((5090, 5154), 'numpy.multiply', 'np.multiply', (['(kernel_v - mean_kernel)', '(target_values - mean_target)'], {}), '(kernel_v - mean_kernel, target_values - mean_target)\n', (5101, 5154), True, 'import numpy as np\n'), ((5301, 5344), 'numpy.sum', 'np.sum', (['(tmp * (target_values - mean_target))'], {}), '(tmp * (target_values - mean_target))\n', (5307, 5344), True, 'import numpy as np\n'), ((6124, 6188), 'numpy.multiply', 'np.multiply', (['(kernel_v - mean_kernel)', '(target_values - mean_target)'], {}), '(kernel_v - mean_kernel, target_values - mean_target)\n', (6135, 6188), True, 'import numpy as np\n'), ((9517, 9556), 'os.path.join', 'os.path.join', (['log_foldername', '"""tmp.pkl"""'], {}), "(log_foldername, 'tmp.pkl')\n", (9529, 9556), False, 'import os\n'), ((9669, 9708), 'os.path.join', 'os.path.join', (['log_foldername', '"""tmp.pkl"""'], {}), "(log_foldername, 'tmp.pkl')\n", (9681, 9708), False, 'import os\n'), ((2876, 2924), 'os.path.join', 'os.path.join', (['log_foldername', '"""optim_config.pkl"""'], {}), "(log_foldername, 'optim_config.pkl')\n", (2888, 2924), False, 'import os\n'), ((5413, 5451), 'numpy.sum', 'np.sum', (['(tmp * (kernel_v - mean_kernel))'], {}), '(tmp * (kernel_v - mean_kernel))\n', (5419, 5451), True, 'import numpy as np\n'), ((6271, 6310), 'os.path.join', 'os.path.join', (['log_foldername', '"""tmp.pkl"""'], {}), "(log_foldername, 'tmp.pkl')\n", (6283, 6310), False, 'import os\n'), ((7307, 7327), 'numpy.mean', 'np.mean', (['test_target'], {}), '(test_target)\n', (7314, 7327), True, 'import numpy as np\n'), ((7362, 7381), 'numpy.std', 'np.std', (['test_target'], {}), '(test_target)\n', (7368, 7381), True, 'import numpy as np\n'), ((7410, 7444), 'numpy.zeros', 'np.zeros', (['(input_data.shape[1], 1)'], {}), '((input_data.shape[1], 1))\n', (7418, 7444), True, 'import numpy as np\n'), ((7714, 7732), 'numpy.mean', 'np.mean', (['distances'], {}), '(distances)\n', (7721, 7732), True, 'import numpy as np\n'), ((7768, 7785), 'numpy.std', 'np.std', (['distances'], {}), '(distances)\n', (7774, 7785), True, 'import numpy as np\n'), ((1541, 1579), 'numpy.random.randn', 'np.random.randn', (['num_model_features', '(1)'], {}), '(num_model_features, 1)\n', (1556, 1579), True, 'import numpy as np\n'), ((5546, 5561), 'numpy.power', 'np.power', (['Jd', '(2)'], {}), '(Jd, 2)\n', (5554, 5561), True, 'import numpy as np\n'), ((6437, 6476), 'os.path.join', 'os.path.join', (['log_foldername', '"""tmp.pkl"""'], {}), "(log_foldername, 'tmp.pkl')\n", (6449, 6476), False, 'import os\n'), ((6935, 6974), 'os.path.join', 'os.path.join', (['log_foldername', '"""tmp.pkl"""'], {}), "(log_foldername, 'tmp.pkl')\n", (6947, 6974), False, 'import os\n'), ((7815, 7886), 'numpy.multiply', 'np.multiply', (['(distances - mean_distances)', '(test_target - mean_target_test)'], {}), '(distances - mean_distances, test_target - mean_target_test)\n', (7826, 7886), True, 'import numpy as np\n'), ((9241, 9280), 'os.path.join', 'os.path.join', (['log_foldername', '"""tmp.pkl"""'], {}), "(log_foldername, 'tmp.pkl')\n", (9253, 9280), False, 'import os\n'), ((4741, 4793), 'numpy.power', 'np.power', (['(input_data[idx, i] - input_data[idx, j])', '(2)'], {}), '(input_data[idx, i] - input_data[idx, j], 2)\n', (4749, 4793), True, 'import numpy as np\n'), ((4864, 4888), 'numpy.power', 'np.power', (['sigmas[idx]', '(3)'], {}), '(sigmas[idx], 3)\n', (4872, 4888), True, 'import numpy as np\n'), ((5564, 5579), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (5572, 5579), True, 'import numpy as np\n'), ((4891, 4906), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (4899, 4906), True, 'import numpy as np\n'), ((5390, 5405), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (5398, 5405), True, 'import numpy as np\n'), ((3763, 3778), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (3771, 3778), True, 'import numpy as np\n'), ((4675, 4690), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (4683, 4690), True, 'import numpy as np\n'), ((5962, 5977), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (5970, 5977), True, 'import numpy as np\n'), ((7654, 7669), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (7662, 7669), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import datetime
import sys
import numpy as np
import pytest
from climetlab import ALL, load_source
from climetlab.decorators import normalize
from climetlab.normalize import DateListNormaliser, EnumListNormaliser, EnumNormaliser
from climetlab.testing import climetlab_file
from climetlab.utils.bbox import BoundingBox
@normalize("parameter", ("variable-list(mars)"))
def values_mars(parameter):
return parameter
@normalize("parameter", ("variable-list(cf)"))
def values_cf(parameter):
return parameter
def test_param_convention_mars():
assert values_mars(parameter="tp") == "tp"
assert values_mars(parameter="2t") == "2t"
assert values_mars(parameter="t2m") == "2t"
assert values_mars(parameter=["t2m", "tp"]) == ["2t", "tp"]
assert values_mars(parameter="whatever") == "whatever"
def test_param_convention_cf():
assert values_cf(parameter="tp") == "tp"
assert values_cf(parameter="2t") == "t2m"
assert values_cf(parameter="t2m") == "t2m"
@normalize("date", "date")
def dates_1(date):
return date
@normalize("date", "date-list")
def dates_list_1(date):
return date
def test_dates():
npdate = np.datetime64("2016-01-01")
assert dates_1(date=npdate) == datetime.datetime(2016, 1, 1)
assert dates_list_1(date=npdate) == [datetime.datetime(2016, 1, 1)]
source = load_source("file", climetlab_file("docs/examples/test.grib"))
assert dates_1(source[0]) == datetime.datetime(2020, 5, 13, 12, 0)
assert dates_list_1(source[0]) == [datetime.datetime(2020, 5, 13, 12, 0)]
source = load_source("file", climetlab_file("docs/examples/test.nc"))
# For now
with pytest.raises(NotImplementedError):
assert dates_1(source[0]) == datetime.datetime(2020, 5, 13, 12, 0)
assert dates_list_1(source[0]) == [datetime.datetime(2020, 5, 13, 12, 0)]
def test_dates_no_list():
norm = DateListNormaliser("%Y.%m.%d")
assert norm("20200513") == ["2020.05.13"]
assert norm([datetime.datetime(2020, 5, 13, 0, 0)]) == ["2020.05.13"]
assert norm([datetime.datetime(2020, 5, 13, 23, 59)]) == ["2020.05.13"]
# def test_dates_with_list():
# norm = DateListNormaliser("%Y.%m.%d", valid=["2020.05.13"] )
# assert norm("20200513") == ["2020.05.13"]
# assert norm([datetime.datetime(2020, 5, 13, 12, 0)]) == ["2020.05.13"]
#
# with pytest.raises(ValueError):
# assert norm("19991231")
def test_dates_3():
norm = DateListNormaliser()
assert norm("20200513") == [datetime.datetime(2020, 5, 13, 0, 0)]
assert norm([datetime.datetime(2020, 5, 13, 0, 0)]) == [
datetime.datetime(2020, 5, 13, 0, 0)
]
@normalize("area", "bounding-box")
def bbox_list(ignore, area):
return area
@normalize("area", "bounding-box(tuple)")
def bbox_tuple(area, ignore=None):
return area
@normalize("area", "bounding-box(list)")
def bbox_bbox(area):
return area
@normalize("area", "bounding-box(dict)")
def bbox_dict(area):
return area
@normalize("area", "bounding-box")
def bbox_defaults(area=None):
return area
# def test_enum_definition():
@normalize("name", ("a", "b", "c"))
def enum_1(name="a"):
return name
@normalize("name", ("a", "b", "c"))
def enum_no_default(name):
return name
@normalize("name", ("a", "b", "c"))
def enum_default_is_none(name=None):
return name
@normalize("name", (1, 0.5, 3))
def enum_number(name=1):
return name
# for k, v in vars().items():
# globals()[k] = v
# def test_enum_list_definition():
@normalize("name", ["a", "b", "c"])
def enum_list_1(name="a"):
return name
@normalize("name", ["a", "b", "c"])
def enum_list_no_default(name):
return name
@normalize("name", ["a", "b", "c"])
def enum_list_default_is_none(name=None):
return name
@normalize("name", ["a", "b", "c"])
def enum_list_default_is_all(name=ALL):
return name
@normalize("name", [1, 0.5, 3])
def enum_list_number(name=1):
return name
@normalize("a", [1, 2])
@normalize("b", [3, 4])
def enum_2_normalizers(a, b):
return a
def test_enum_2_normalizers():
enum_2_normalizers(a=1, b=4)
# enum_2_normalizers(1,4)
@normalize(
"name",
["a", "b", "c"],
alias={
"ab": ["a", "b"],
"z": "a",
"i": ["a", "b"],
"j": "ab",
"bad": ["a", "ab"],
},
)
def enum_list_alias_1(name=1):
return name
def test_enum_list_alias_1():
assert enum_list_alias_1("a") == ["a"]
assert enum_list_alias_1("b") == ["b"]
assert enum_list_alias_1("ab") == ["a", "b"]
assert enum_list_alias_1("z") == ["a"]
assert enum_list_alias_1(["z", "b"]) == ["a", "b"]
assert enum_list_alias_1("i") == ["a", "b"]
assert enum_list_alias_1("j") == ["a", "b"]
with pytest.raises(ValueError):
enum_list_alias_1("bad")
@normalize(
"name",
[1, 2, 3],
alias=lambda x: {"one": 1, "o": "one"}.get(x, x),
)
def enum_list_alias_2(name=1):
return name
def test_enum_list_alias_2():
assert enum_list_alias_2(1) == [1]
assert enum_list_alias_2("one") == [1]
assert enum_list_alias_2(["one"]) == [1]
assert enum_list_alias_2(["o"]) == [1]
@normalize("name", ["a", "b", "c"], alias={"x": "y", "y": "z", "z": "a"})
def enum_alias(name=1):
return name
def test_enum_alias():
assert enum_alias("a") == ["a"]
assert enum_alias("b") == ["b"]
assert enum_alias("x") == ["a"]
assert enum_alias("y") == ["a"]
assert enum_alias("z") == ["a"]
# for k, v in vars().items():
# globals()[k] = v
def test_enum_decorator():
assert enum_1("a") == "a"
assert enum_1("b") == "b"
assert enum_1() == "a"
with pytest.raises(ValueError):
enum_1("z")
with pytest.raises(ValueError):
enum_1(["a", "b"])
def test_enum_decorator_default():
assert enum_no_default("a") == "a"
assert enum_default_is_none("a") == "a"
with pytest.raises(ValueError):
enum_default_is_none()
with pytest.raises(TypeError):
enum_no_default()
def test_enum():
enum_3 = EnumNormaliser(["a", "b", "c"])
assert enum_3("a") == "a"
assert enum_3("b") == "b"
with pytest.raises(ValueError):
enum_3("z")
with pytest.raises(ValueError):
enum_3(ALL)
def test_enum_list_decorator_default():
assert enum_list_no_default("a") == ["a"]
assert enum_list_default_is_none("a") == ["a"]
assert enum_list_default_is_none() == ["a", "b", "c"]
assert enum_list_default_is_all() == ["a", "b", "c"]
assert enum_list_number(1.0) == [1]
with pytest.raises(ValueError):
enum_list_number("1")
# with pytest.raises(ValueError):
# enum_list_default_is_none()
with pytest.raises(TypeError):
enum_list_no_default()
def test_enum_list_case_sensitive():
enum_5 = EnumListNormaliser(["A", "b", "c"])
assert enum_5(ALL) == ["A", "b", "c"]
assert enum_5("a") == ["A"]
assert enum_5("A") == ["A"]
assert enum_5(["a", "B"]) == ["A", "b"]
def test_bbox():
area = [30.0, 2.0, 3.0, 4.0]
bbox = BoundingBox(north=30, west=2, south=3, east=4)
assert bbox_list(None, area) == bbox
assert bbox_list(area=area, ignore=None) == bbox
assert bbox_tuple(area) == tuple(area)
assert bbox_tuple(area=area) == tuple(area)
assert bbox_bbox(area) == area
assert bbox_dict(area) == dict(north=30, west=2, south=3, east=4)
assert bbox_defaults(area) == bbox
source = load_source("file", climetlab_file("docs/examples/test.grib"))
assert bbox_tuple(source[0]) == (73.0, -27.0, 33.0, 45.0)
source = load_source("file", climetlab_file("docs/examples/test.nc"))
assert bbox_tuple(source[0]) == (73.0, -27.0, 33.0, 45.0)
def test_normalize_kwargs():
class Klass:
@normalize("param", ["a", "b", "c"])
def ok(self, param):
pass
@normalize("param", ["a", "b", "c"])
def f(self, **kwargs):
# def f(self, param, **kwargs):
assert "param" in kwargs
Klass().ok(param="a")
Klass().f(param="a")
@pytest.mark.skipif(sys.version_info < (3, 8), reason="Python < 3.8")
def test_normalize_advanced_1():
exec(
"""
# def f(a,/, b, c=4,*, x=3):
# return a,b,c,x
# args = ['A']
# kwargs=dict(b=2, c=4)
@normalize("b", ["B", "BB"])
def f(a, /, b, c=4, *, x=3):
return a, b, c, x
out = f("A", b="B", c=7, x=8)
assert out == ("A", ["B"], 7, 8)
"""
)
@pytest.mark.skipif(sys.version_info < (3, 8), reason="Python < 3.8")
def test_normalize_advanced_2():
exec(
"""
@normalize("b", ["B", "BB"])
@normalize("a", ["A", "AA"])
def g(a, /, b, c=4, *, x=3):
return a, b, c, x
out = g("A", b="B", c=7, x=8)
assert out == (["A"], ["B"], 7, 8)
"""
)
def test_normalize_advanced_3():
from climetlab.normalize import _find_normaliser as normaliser
assert normaliser((1, 2), type=str, multiple=True)(1) == ["1"]
assert normaliser((1, 2), type=str, multiple=True)((1, 2)) == ["1", "2"]
if __name__ == "__main__":
# test_normalize_advanced_3()
from climetlab.testing import main
main(__file__)
| [
"numpy.datetime64",
"climetlab.testing.main",
"climetlab.decorators.normalize",
"climetlab.normalize.EnumNormaliser",
"datetime.datetime",
"climetlab.testing.climetlab_file",
"climetlab.normalize._find_normaliser",
"pytest.raises",
"pytest.mark.skipif",
"climetlab.normalize.DateListNormaliser",
... | [((728, 773), 'climetlab.decorators.normalize', 'normalize', (['"""parameter"""', '"""variable-list(mars)"""'], {}), "('parameter', 'variable-list(mars)')\n", (737, 773), False, 'from climetlab.decorators import normalize\n'), ((828, 871), 'climetlab.decorators.normalize', 'normalize', (['"""parameter"""', '"""variable-list(cf)"""'], {}), "('parameter', 'variable-list(cf)')\n", (837, 871), False, 'from climetlab.decorators import normalize\n'), ((1397, 1422), 'climetlab.decorators.normalize', 'normalize', (['"""date"""', '"""date"""'], {}), "('date', 'date')\n", (1406, 1422), False, 'from climetlab.decorators import normalize\n'), ((1461, 1491), 'climetlab.decorators.normalize', 'normalize', (['"""date"""', '"""date-list"""'], {}), "('date', 'date-list')\n", (1470, 1491), False, 'from climetlab.decorators import normalize\n'), ((3052, 3085), 'climetlab.decorators.normalize', 'normalize', (['"""area"""', '"""bounding-box"""'], {}), "('area', 'bounding-box')\n", (3061, 3085), False, 'from climetlab.decorators import normalize\n'), ((3134, 3174), 'climetlab.decorators.normalize', 'normalize', (['"""area"""', '"""bounding-box(tuple)"""'], {}), "('area', 'bounding-box(tuple)')\n", (3143, 3174), False, 'from climetlab.decorators import normalize\n'), ((3229, 3268), 'climetlab.decorators.normalize', 'normalize', (['"""area"""', '"""bounding-box(list)"""'], {}), "('area', 'bounding-box(list)')\n", (3238, 3268), False, 'from climetlab.decorators import normalize\n'), ((3309, 3348), 'climetlab.decorators.normalize', 'normalize', (['"""area"""', '"""bounding-box(dict)"""'], {}), "('area', 'bounding-box(dict)')\n", (3318, 3348), False, 'from climetlab.decorators import normalize\n'), ((3389, 3422), 'climetlab.decorators.normalize', 'normalize', (['"""area"""', '"""bounding-box"""'], {}), "('area', 'bounding-box')\n", (3398, 3422), False, 'from climetlab.decorators import normalize\n'), ((3502, 3536), 'climetlab.decorators.normalize', 'normalize', (['"""name"""', "('a', 'b', 'c')"], {}), "('name', ('a', 'b', 'c'))\n", (3511, 3536), False, 'from climetlab.decorators import normalize\n'), ((3578, 3612), 'climetlab.decorators.normalize', 'normalize', (['"""name"""', "('a', 'b', 'c')"], {}), "('name', ('a', 'b', 'c'))\n", (3587, 3612), False, 'from climetlab.decorators import normalize\n'), ((3659, 3693), 'climetlab.decorators.normalize', 'normalize', (['"""name"""', "('a', 'b', 'c')"], {}), "('name', ('a', 'b', 'c'))\n", (3668, 3693), False, 'from climetlab.decorators import normalize\n'), ((3750, 3780), 'climetlab.decorators.normalize', 'normalize', (['"""name"""', '(1, 0.5, 3)'], {}), "('name', (1, 0.5, 3))\n", (3759, 3780), False, 'from climetlab.decorators import normalize\n'), ((3922, 3956), 'climetlab.decorators.normalize', 'normalize', (['"""name"""', "['a', 'b', 'c']"], {}), "('name', ['a', 'b', 'c'])\n", (3931, 3956), False, 'from climetlab.decorators import normalize\n'), ((4003, 4037), 'climetlab.decorators.normalize', 'normalize', (['"""name"""', "['a', 'b', 'c']"], {}), "('name', ['a', 'b', 'c'])\n", (4012, 4037), False, 'from climetlab.decorators import normalize\n'), ((4089, 4123), 'climetlab.decorators.normalize', 'normalize', (['"""name"""', "['a', 'b', 'c']"], {}), "('name', ['a', 'b', 'c'])\n", (4098, 4123), False, 'from climetlab.decorators import normalize\n'), ((4185, 4219), 'climetlab.decorators.normalize', 'normalize', (['"""name"""', "['a', 'b', 'c']"], {}), "('name', ['a', 'b', 'c'])\n", (4194, 4219), False, 'from climetlab.decorators import normalize\n'), ((4279, 4309), 'climetlab.decorators.normalize', 'normalize', (['"""name"""', '[1, 0.5, 3]'], {}), "('name', [1, 0.5, 3])\n", (4288, 4309), False, 'from climetlab.decorators import normalize\n'), ((4359, 4381), 'climetlab.decorators.normalize', 'normalize', (['"""a"""', '[1, 2]'], {}), "('a', [1, 2])\n", (4368, 4381), False, 'from climetlab.decorators import normalize\n'), ((4383, 4405), 'climetlab.decorators.normalize', 'normalize', (['"""b"""', '[3, 4]'], {}), "('b', [3, 4])\n", (4392, 4405), False, 'from climetlab.decorators import normalize\n'), ((4548, 4670), 'climetlab.decorators.normalize', 'normalize', (['"""name"""', "['a', 'b', 'c']"], {'alias': "{'ab': ['a', 'b'], 'z': 'a', 'i': ['a', 'b'], 'j': 'ab', 'bad': ['a', 'ab']}"}), "('name', ['a', 'b', 'c'], alias={'ab': ['a', 'b'], 'z': 'a', 'i':\n ['a', 'b'], 'j': 'ab', 'bad': ['a', 'ab']})\n", (4557, 4670), False, 'from climetlab.decorators import normalize\n'), ((5555, 5627), 'climetlab.decorators.normalize', 'normalize', (['"""name"""', "['a', 'b', 'c']"], {'alias': "{'x': 'y', 'y': 'z', 'z': 'a'}"}), "('name', ['a', 'b', 'c'], alias={'x': 'y', 'y': 'z', 'z': 'a'})\n", (5564, 5627), False, 'from climetlab.decorators import normalize\n'), ((8466, 8534), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info < (3, 8))'], {'reason': '"""Python < 3.8"""'}), "(sys.version_info < (3, 8), reason='Python < 3.8')\n", (8484, 8534), False, 'import pytest\n'), ((8836, 8904), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info < (3, 8))'], {'reason': '"""Python < 3.8"""'}), "(sys.version_info < (3, 8), reason='Python < 3.8')\n", (8854, 8904), False, 'import pytest\n'), ((1565, 1592), 'numpy.datetime64', 'np.datetime64', (['"""2016-01-01"""'], {}), "('2016-01-01')\n", (1578, 1592), True, 'import numpy as np\n'), ((2288, 2318), 'climetlab.normalize.DateListNormaliser', 'DateListNormaliser', (['"""%Y.%m.%d"""'], {}), "('%Y.%m.%d')\n", (2306, 2318), False, 'from climetlab.normalize import DateListNormaliser, EnumListNormaliser, EnumNormaliser\n'), ((2846, 2866), 'climetlab.normalize.DateListNormaliser', 'DateListNormaliser', ([], {}), '()\n', (2864, 2866), False, 'from climetlab.normalize import DateListNormaliser, EnumListNormaliser, EnumNormaliser\n'), ((6442, 6473), 'climetlab.normalize.EnumNormaliser', 'EnumNormaliser', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (6456, 6473), False, 'from climetlab.normalize import DateListNormaliser, EnumListNormaliser, EnumNormaliser\n'), ((7206, 7241), 'climetlab.normalize.EnumListNormaliser', 'EnumListNormaliser', (["['A', 'b', 'c']"], {}), "(['A', 'b', 'c'])\n", (7224, 7241), False, 'from climetlab.normalize import DateListNormaliser, EnumListNormaliser, EnumNormaliser\n'), ((7456, 7502), 'climetlab.utils.bbox.BoundingBox', 'BoundingBox', ([], {'north': '(30)', 'west': '(2)', 'south': '(3)', 'east': '(4)'}), '(north=30, west=2, south=3, east=4)\n', (7467, 7502), False, 'from climetlab.utils.bbox import BoundingBox\n'), ((9500, 9514), 'climetlab.testing.main', 'main', (['__file__'], {}), '(__file__)\n', (9504, 9514), False, 'from climetlab.testing import main\n'), ((1628, 1657), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(1)', '(1)'], {}), '(2016, 1, 1)\n', (1645, 1657), False, 'import datetime\n'), ((1764, 1805), 'climetlab.testing.climetlab_file', 'climetlab_file', (['"""docs/examples/test.grib"""'], {}), "('docs/examples/test.grib')\n", (1778, 1805), False, 'from climetlab.testing import climetlab_file\n'), ((1840, 1877), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(5)', '(13)', '(12)', '(0)'], {}), '(2020, 5, 13, 12, 0)\n', (1857, 1877), False, 'import datetime\n'), ((1990, 2029), 'climetlab.testing.climetlab_file', 'climetlab_file', (['"""docs/examples/test.nc"""'], {}), "('docs/examples/test.nc')\n", (2004, 2029), False, 'from climetlab.testing import climetlab_file\n'), ((2056, 2090), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (2069, 2090), False, 'import pytest\n'), ((5146, 5171), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5159, 5171), False, 'import pytest\n'), ((6052, 6077), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6065, 6077), False, 'import pytest\n'), ((6108, 6133), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6121, 6133), False, 'import pytest\n'), ((6291, 6316), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6304, 6316), False, 'import pytest\n'), ((6358, 6382), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (6371, 6382), False, 'import pytest\n'), ((6543, 6568), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6556, 6568), False, 'import pytest\n'), ((6599, 6624), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6612, 6624), False, 'import pytest\n'), ((6949, 6974), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6962, 6974), False, 'import pytest\n'), ((7097, 7121), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (7110, 7121), False, 'import pytest\n'), ((7871, 7912), 'climetlab.testing.climetlab_file', 'climetlab_file', (['"""docs/examples/test.grib"""'], {}), "('docs/examples/test.grib')\n", (7885, 7912), False, 'from climetlab.testing import climetlab_file\n'), ((8010, 8049), 'climetlab.testing.climetlab_file', 'climetlab_file', (['"""docs/examples/test.nc"""'], {}), "('docs/examples/test.nc')\n", (8024, 8049), False, 'from climetlab.testing import climetlab_file\n'), ((8170, 8205), 'climetlab.decorators.normalize', 'normalize', (['"""param"""', "['a', 'b', 'c']"], {}), "('param', ['a', 'b', 'c'])\n", (8179, 8205), False, 'from climetlab.decorators import normalize\n'), ((8262, 8297), 'climetlab.decorators.normalize', 'normalize', (['"""param"""', "['a', 'b', 'c']"], {}), "('param', ['a', 'b', 'c'])\n", (8271, 8297), False, 'from climetlab.decorators import normalize\n'), ((1699, 1728), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(1)', '(1)'], {}), '(2016, 1, 1)\n', (1716, 1728), False, 'import datetime\n'), ((1917, 1954), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(5)', '(13)', '(12)', '(0)'], {}), '(2020, 5, 13, 12, 0)\n', (1934, 1954), False, 'import datetime\n'), ((2129, 2166), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(5)', '(13)', '(12)', '(0)'], {}), '(2020, 5, 13, 12, 0)\n', (2146, 2166), False, 'import datetime\n'), ((2899, 2935), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(5)', '(13)', '(0)', '(0)'], {}), '(2020, 5, 13, 0, 0)\n', (2916, 2935), False, 'import datetime\n'), ((3006, 3042), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(5)', '(13)', '(0)', '(0)'], {}), '(2020, 5, 13, 0, 0)\n', (3023, 3042), False, 'import datetime\n'), ((9259, 9302), 'climetlab.normalize._find_normaliser', 'normaliser', (['(1, 2)'], {'type': 'str', 'multiple': '(True)'}), '((1, 2), type=str, multiple=True)\n', (9269, 9302), True, 'from climetlab.normalize import _find_normaliser as normaliser\n'), ((9326, 9369), 'climetlab.normalize._find_normaliser', 'normaliser', (['(1, 2)'], {'type': 'str', 'multiple': '(True)'}), '((1, 2), type=str, multiple=True)\n', (9336, 9369), True, 'from climetlab.normalize import _find_normaliser as normaliser\n'), ((2210, 2247), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(5)', '(13)', '(12)', '(0)'], {}), '(2020, 5, 13, 12, 0)\n', (2227, 2247), False, 'import datetime\n'), ((2382, 2418), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(5)', '(13)', '(0)', '(0)'], {}), '(2020, 5, 13, 0, 0)\n', (2399, 2418), False, 'import datetime\n'), ((2456, 2494), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(5)', '(13)', '(23)', '(59)'], {}), '(2020, 5, 13, 23, 59)\n', (2473, 2494), False, 'import datetime\n'), ((2954, 2990), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(5)', '(13)', '(0)', '(0)'], {}), '(2020, 5, 13, 0, 0)\n', (2971, 2990), False, 'import datetime\n')] |
import numpy as np
import scipy.sparse as sp
import cvxpy as cp
from opymize.linear.diff import GradientOp, LaplacianOp
from opymize.tools.tests import test_adjoint, test_rowwise_lp, test_gpu_op
testfun_domain = np.array([[-np.pi,np.pi],[-np.pi,np.pi]])
def testfun(pts):
x, y = pts[:,0], pts[:,1]
return np.sin(x) + x**2*np.cos(y)
def testfun_grad(pts):
x, y = pts[:,0], pts[:,1]
return np.vstack([np.cos(x) + 2*x*np.cos(y), -x**2*np.sin(y)]).T
def testfun_laplacian(pts):
x, y = pts[:,0], pts[:,1]
return (2 - x**2)*np.cos(y) - np.sin(x)
def cell_centered_grid(domain, shape):
h = (domain[:,1] - domain[:,0])/shape
ndims = len(shape)
grid = np.mgrid[[slice(0.0,s) for s in shape]].reshape(ndims, -1).T
grid *= h[None,:]
grid += domain[None,:,0] + 0.5*h[None,:]
return grid, h
def test_grad():
imagedims = (10,12)
imageh = np.array([0.3,0.4])
ndims = len(imagedims)
nchannels = 3
grad = GradientOp(imagedims, nchannels, imageh=imageh)
for op in [grad,grad.adjoint]:
test_adjoint(op)
test_rowwise_lp(op)
test_gpu_op(op)
def test_grad_fun(s):
imagedims = (10**s,10**s)
ndims = len(imagedims)
nchannels = 1
grid, imageh = cell_centered_grid(testfun_domain, imagedims)
grid2 = grid + 0.5*imageh[None,:]
vol = np.prod(imageh)
grad = GradientOp(imagedims, nchannels, imageh=imageh)
y = grad.y.new()
x = testfun(grid).ravel()
grad(x, y)
x = x.reshape(imagedims)
y = y.reshape(imagedims + (ndims,))
ytest = testfun_grad(grid2).reshape(imagedims + (ndims,))
dif = y[:-1,:-1] - ytest[:-1,:-1]
assert np.linalg.norm(vol*dif.ravel()) < 2*vol
def test_lplcn(bdry):
imagedims = (10,12)
ndims = len(imagedims)
nchannels = 3
lplcn = LaplacianOp(imagedims, nchannels, boundary=bdry)
for op in [lplcn,lplcn.adjoint]:
test_adjoint(op)
test_rowwise_lp(op)
test_gpu_op(op)
def test_lplcn_fun(bdry, s):
imagedims = (10**s,10**s)
ndims = len(imagedims)
nchannels = 1
grid, imageh = cell_centered_grid(testfun_domain, imagedims)
vol = np.prod(imageh)
lplcn = LaplacianOp(imagedims, nchannels, imageh=imageh, boundary=bdry)
y = lplcn.y.new()
x = testfun(grid).ravel()
lplcn(x, y)
x, y = [v.reshape(imagedims) for v in [x,y]]
ytest = testfun_laplacian(grid).reshape(imagedims)
dif = y[1:-1,1:-1] - ytest[1:-1,1:-1]
assert np.linalg.norm(vol*dif.ravel()) < 2*vol
def test_lplcn_ghost():
def laplop(m, n):
ddn = sp.spdiags(np.ones(n)*np.array([[1, -2, 1]]).T, [-1, 0, 1], n, n)
ddm = sp.spdiags(np.ones(m)*np.array([[1, -2, 1]]).T, [-1, 0, 1], m, m)
return sp.kron(ddm, sp.eye(n,n)) + sp.kron(sp.eye(m,m), ddn)
imagedims = np.array((30, 40))
data = np.random.rand(*imagedims)
op = LaplacianOp(imagedims, 1, boundary="curvature")
Dy_curv = op.y.new().reshape(imagedims)
op(data, Dy_curv)
gimagedims = imagedims+2
A = cp.Constant(laplop(*gimagedims[::-1]))
y = cp.Variable(gimagedims)
Dy = cp.reshape(A*cp.vec(y), gimagedims)
cp.Problem(
cp.Minimize(cp.sum_squares(Dy[1:-1,1:-1])),
[y[1:-1,1:-1] == data]
).solve()
Dy_ghost = Dy.value
assert np.linalg.norm(Dy_curv - Dy_ghost[1:-1,1:-1], ord=np.inf) < 1e-12
if __name__ == "__main__":
print("=> Testing gradient operator...")
test_grad()
for s in range(1,4):
test_grad_fun(s)
for bdry in ["curvature", "neumann", "second-order"]:
print("=> Testing Laplacian operator with %s bc..." % bdry)
test_lplcn(bdry)
for s in range(1,4):
test_lplcn_fun(bdry, s)
test_lplcn_ghost()
| [
"opymize.tools.tests.test_rowwise_lp",
"opymize.linear.diff.GradientOp",
"cvxpy.vec",
"opymize.tools.tests.test_gpu_op",
"opymize.linear.diff.LaplacianOp",
"opymize.tools.tests.test_adjoint",
"scipy.sparse.eye",
"numpy.ones",
"numpy.sin",
"numpy.array",
"numpy.linalg.norm",
"numpy.cos",
"cvx... | [((215, 259), 'numpy.array', 'np.array', (['[[-np.pi, np.pi], [-np.pi, np.pi]]'], {}), '([[-np.pi, np.pi], [-np.pi, np.pi]])\n', (223, 259), True, 'import numpy as np\n'), ((888, 908), 'numpy.array', 'np.array', (['[0.3, 0.4]'], {}), '([0.3, 0.4])\n', (896, 908), True, 'import numpy as np\n'), ((964, 1011), 'opymize.linear.diff.GradientOp', 'GradientOp', (['imagedims', 'nchannels'], {'imageh': 'imageh'}), '(imagedims, nchannels, imageh=imageh)\n', (974, 1011), False, 'from opymize.linear.diff import GradientOp, LaplacianOp\n'), ((1335, 1350), 'numpy.prod', 'np.prod', (['imageh'], {}), '(imageh)\n', (1342, 1350), True, 'import numpy as np\n'), ((1362, 1409), 'opymize.linear.diff.GradientOp', 'GradientOp', (['imagedims', 'nchannels'], {'imageh': 'imageh'}), '(imagedims, nchannels, imageh=imageh)\n', (1372, 1409), False, 'from opymize.linear.diff import GradientOp, LaplacianOp\n'), ((1800, 1848), 'opymize.linear.diff.LaplacianOp', 'LaplacianOp', (['imagedims', 'nchannels'], {'boundary': 'bdry'}), '(imagedims, nchannels, boundary=bdry)\n', (1811, 1848), False, 'from opymize.linear.diff import GradientOp, LaplacianOp\n'), ((2143, 2158), 'numpy.prod', 'np.prod', (['imageh'], {}), '(imageh)\n', (2150, 2158), True, 'import numpy as np\n'), ((2171, 2234), 'opymize.linear.diff.LaplacianOp', 'LaplacianOp', (['imagedims', 'nchannels'], {'imageh': 'imageh', 'boundary': 'bdry'}), '(imagedims, nchannels, imageh=imageh, boundary=bdry)\n', (2182, 2234), False, 'from opymize.linear.diff import GradientOp, LaplacianOp\n'), ((2793, 2811), 'numpy.array', 'np.array', (['(30, 40)'], {}), '((30, 40))\n', (2801, 2811), True, 'import numpy as np\n'), ((2823, 2849), 'numpy.random.rand', 'np.random.rand', (['*imagedims'], {}), '(*imagedims)\n', (2837, 2849), True, 'import numpy as np\n'), ((2860, 2907), 'opymize.linear.diff.LaplacianOp', 'LaplacianOp', (['imagedims', '(1)'], {'boundary': '"""curvature"""'}), "(imagedims, 1, boundary='curvature')\n", (2871, 2907), False, 'from opymize.linear.diff import GradientOp, LaplacianOp\n'), ((3059, 3082), 'cvxpy.Variable', 'cp.Variable', (['gimagedims'], {}), '(gimagedims)\n', (3070, 3082), True, 'import cvxpy as cp\n'), ((317, 326), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (323, 326), True, 'import numpy as np\n'), ((560, 569), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (566, 569), True, 'import numpy as np\n'), ((1055, 1071), 'opymize.tools.tests.test_adjoint', 'test_adjoint', (['op'], {}), '(op)\n', (1067, 1071), False, 'from opymize.tools.tests import test_adjoint, test_rowwise_lp, test_gpu_op\n'), ((1080, 1099), 'opymize.tools.tests.test_rowwise_lp', 'test_rowwise_lp', (['op'], {}), '(op)\n', (1095, 1099), False, 'from opymize.tools.tests import test_adjoint, test_rowwise_lp, test_gpu_op\n'), ((1108, 1123), 'opymize.tools.tests.test_gpu_op', 'test_gpu_op', (['op'], {}), '(op)\n', (1119, 1123), False, 'from opymize.tools.tests import test_adjoint, test_rowwise_lp, test_gpu_op\n'), ((1894, 1910), 'opymize.tools.tests.test_adjoint', 'test_adjoint', (['op'], {}), '(op)\n', (1906, 1910), False, 'from opymize.tools.tests import test_adjoint, test_rowwise_lp, test_gpu_op\n'), ((1919, 1938), 'opymize.tools.tests.test_rowwise_lp', 'test_rowwise_lp', (['op'], {}), '(op)\n', (1934, 1938), False, 'from opymize.tools.tests import test_adjoint, test_rowwise_lp, test_gpu_op\n'), ((1947, 1962), 'opymize.tools.tests.test_gpu_op', 'test_gpu_op', (['op'], {}), '(op)\n', (1958, 1962), False, 'from opymize.tools.tests import test_adjoint, test_rowwise_lp, test_gpu_op\n'), ((3277, 3335), 'numpy.linalg.norm', 'np.linalg.norm', (['(Dy_curv - Dy_ghost[1:-1, 1:-1])'], {'ord': 'np.inf'}), '(Dy_curv - Dy_ghost[1:-1, 1:-1], ord=np.inf)\n', (3291, 3335), True, 'import numpy as np\n'), ((334, 343), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (340, 343), True, 'import numpy as np\n'), ((548, 557), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (554, 557), True, 'import numpy as np\n'), ((3105, 3114), 'cvxpy.vec', 'cp.vec', (['y'], {}), '(y)\n', (3111, 3114), True, 'import cvxpy as cp\n'), ((2572, 2582), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (2579, 2582), True, 'import numpy as np\n'), ((2652, 2662), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (2659, 2662), True, 'import numpy as np\n'), ((2735, 2747), 'scipy.sparse.eye', 'sp.eye', (['n', 'n'], {}), '(n, n)\n', (2741, 2747), True, 'import scipy.sparse as sp\n'), ((2758, 2770), 'scipy.sparse.eye', 'sp.eye', (['m', 'm'], {}), '(m, m)\n', (2764, 2770), True, 'import scipy.sparse as sp\n'), ((420, 429), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (426, 429), True, 'import numpy as np\n'), ((453, 462), 'numpy.sin', 'np.sin', (['y'], {}), '(y)\n', (459, 462), True, 'import numpy as np\n'), ((2583, 2605), 'numpy.array', 'np.array', (['[[1, -2, 1]]'], {}), '([[1, -2, 1]])\n', (2591, 2605), True, 'import numpy as np\n'), ((2663, 2685), 'numpy.array', 'np.array', (['[[1, -2, 1]]'], {}), '([[1, -2, 1]])\n', (2671, 2685), True, 'import numpy as np\n'), ((3164, 3194), 'cvxpy.sum_squares', 'cp.sum_squares', (['Dy[1:-1, 1:-1]'], {}), '(Dy[1:-1, 1:-1])\n', (3178, 3194), True, 'import cvxpy as cp\n'), ((436, 445), 'numpy.cos', 'np.cos', (['y'], {}), '(y)\n', (442, 445), True, 'import numpy as np\n')] |
#Xing @ 2018.12.05
import cv2
import numpy as np
import time
import matplotlib.pyplot as plt
# Initialize webcam input
cap = cv2.VideoCapture(1)
# Initialize video input
#cap = cv2.VideoCapture("C:/Users/liangx/Documents/Dunhill Project Data/Single Sign/Pronated Wrist/WATCH2.mp4")
#cap = cv2.VideoCapture("C:/Users/liangx/Documents/Dunhill Project Data/Single Sign/Stomach/FARM.mp4")
#cap = cv2.VideoCapture("C:/Users/liangx/Documents/Dunhill Project Data/Single Sign/Below Waist/LAP.mp4")
#cap = cv2.VideoCapture("C:/Users/liangx/Documents/Dunhill Project Data/Restricted/L2n.mov")
# Set Tracking Delay (i.e. delay in number of frames) to wait for KNN background subtraction work (Camera: 30; Video: 5)
DELAY= 5
# Set countour radius to denoise, only contours bigger enough are tracked (Camera: 45-55 ajust this value depending on distance between tracking object and camera; Video: 35)
RADIUS = 55
# Set frame count number for tracking trails reset (when there is no hands being detected)
FRAME = 100
# Initialize frame_acount
frame_count = 0
# define range of skin color in HSV (works good with brown skin)
lower_thresh = np.array([0, 48, 80], dtype = "uint8")
upper_thresh = np.array([20, 255, 255], dtype = "uint8")
# Create empty points array for hand trajectories tracking
points_left = []
points_right = []
# Initlaize K-Nearest Neighbors (KNN) background subtractor
kernel_bgsub = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
fgbg = cv2.createBackgroundSubtractorKNN()
# Sorting contour by area
def get_contour_areas(contours):
# returns the areas of all contours as list
all_areas = []
for cnt in contours:
area = cv2.contourArea(cnt)
all_areas.append(area)
return all_areas
# Sorting contour by position
def x_cord_contour(contours):
#Returns the X cordinate for the contour centroid
M = cv2.moments(contours)
return (int(M['m10']/M['m00']))
#Plot trajectores
def plot_trajectories(center,str, clr):
xs = [x[0] for x in center]
ys = [x[1] for x in center]
plt.plot(xs, ys, color= clr)
plt.xlabel('X')
plt.ylabel('Y')
plt.title(str + ' hand trajectores')
plt.gca().invert_yaxis() #Reverse Y-Axis in PyPlot (OpenCv choose the coordinate system of points/images from Top-Left corner)
plt.gca().invert_xaxis() #Reverse X-Axis in PyPlot (Make trajectories look like a Mirror View)
plt.show()
return None
# Loop video capture until break statement is exectured
while cap.isOpened():
# Read webcam/video image
ret, frame = cap.read()
# when there is a video input
if ret == True:
# Get default camera/video window size
Height, Width = frame.shape[:2]
# Convert image from RBG/BGR to HSV
hsv_img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# Face Detection Using HAAR CASCADE
hc_face = cv2.CascadeClassifier("C:/Users/liangx/source/repos/Skin Detection/haarcascade_frontalface_alt/haarcascade_frontalface_alt.xml")
faces = hc_face.detectMultiScale(hsv_img)
for (x,y,w,h) in faces:
cv2.rectangle(hsv_img, (x,y), (x+w,y+h), 255, thickness=2)
crop_img = frame[y+2:y+w, x+2:x+h]
cv2.imshow('Face Detection', crop_img)
# Use inRange to capture only the values between lower & upper_thresh for skin detection
mask = cv2.inRange(hsv_img, lower_thresh, upper_thresh)
# Adding morphology effects to denoise
kernel_morphology =np.ones((5, 5), np.uint8)
mask = cv2.erode(mask, kernel_morphology, iterations=1)
#mask=cv2.morphologyEx(mask,cv2.MORPH_OPEN, kernel_morphology)
mask=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel_morphology)
mask = cv2.dilate(mask, kernel_morphology, iterations=1)
cv2.imshow('Skin colour + Morpho Mask', mask)
# Perform Bitwise AND on mask and original frame
# rest1 is the results after applying morphology effects + skin filtering
rest1 = cv2.bitwise_and(frame, frame, mask= mask)
# Apply KKN background subtraction to refine skin filtering result, i.e. to further remove static skin coulor related background (face will be fading out, if it does not move)
fgmask = fgbg.apply(rest1)
fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel_bgsub)
cv2.imshow('Background subtraction + Skin colour + Morpho Mask',fgmask)
# Perform Bitwise AND on fgmask and rest1 frame
# rest2 is results after applying background subtraction + morphology effects + skin filtering
rest2 = cv2.bitwise_and(rest1, rest1, mask= fgmask)
# Find contours
# cv2.RETR_EXTERNAL finds external contours only; cv2.CHAIN_APPROX_SIMPLE only provides start and end points of bounding contours, thus resulting in much more efficent storage of contour information.
_, contours, _ = cv2.findContours(fgmask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
print ("Number of contours1 found = ", len(contours))
#print(type(contours)) #The variable 'contours' are stored as a numpy array of (x,y) points that form the contour
# Draw all Contours found
#cv2.drawContours(rest2, contours, -1, (0,255,0), 3)
#cv2.imshow('All Contours filtered by skin color and background subtraction', rest2)
#cv2.imshow('Original', frame)
# When both hands are detected
if len(contours) >=2:
# Get the largest two contours and its center (i.e. two hands)
sorted_contours = sorted(contours, key=cv2.contourArea, reverse=True)[:2]
# Sort by reverse=True, using our x_cord_contour function (i.e. hands tracking from left to right)
contours_left_to_right = sorted(sorted_contours, key = x_cord_contour, reverse = True)
# Iterate over two contours and draw one at a time
for (i,c) in enumerate(contours_left_to_right):
# Draw Convex Hull Contour
hull=cv2.convexHull(c)
cv2.drawContours(rest2, [hull], -1, (0,0,255), 3)
# Draw Normal Contour
cv2.drawContours(rest2, [c], -1, (255,0,0), 3)
# Show hands Contour
cv2.imshow('Contours by area', rest2)
# Tracking Left hand
if i == 0:
(x, y), radius = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# Draw cirlce and leave the last center creating a trail
cv2.circle(frame, (int(x), int(y)), int(radius),(0, 0, 255), 2)
# Only contours with radius > RADIUS are tracked (de-noise)
if radius > RADIUS:
points_left.append(center)
# loop over the set of tracked points to draw tracking lines (starts with frames delay- to wait for KNN background subtraction work)
for l in range(DELAY, len(points_left)):
try:
cv2.line(frame, points_left[l - 1], points_left[l], (0, 0, 255), 2)
except:
pass
frame_count = 0
else:
frame_count += 1
# If there is no hand detected, when count frames to FRAME, plot trajectories before clear the trajectories trails
if frame_count == FRAME:
print("frame_count",frame_count)
plot_trajectories(points_left,"Left", "red")
plot_trajectories(points_right, "Right", "green")
points_left = []
points_right = []
frame_count = 0
# Tracking Right hand
else:
(x, y), radius = cv2.minEnclosingCircle(c)
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# Draw cirlce and leave the last center creating a trail
cv2.circle(frame, (int(x), int(y)), int(radius),(0, 255, 0), 2)
# loop over the set of tracked points
if radius > RADIUS:
points_right.append(center)
for l in range(DELAY, len(points_right)):
try:
cv2.line(frame, points_right[l - 1], points_right[l], (0, 255, 0), 2)
except:
pass
frame_count = 0
else:
frame_count += 1
# If there is no hand detected, when count frames to FRAME, plot trajectories before clear the trajectories trails
if frame_count == FRAME:
print("frame_count",frame_count)
plot_trajectories(points_left, "Left", "red")
plot_trajectories(points_right, "Right", "green")
points_left = []
points_right = []
frame_count = 0
else:
pass
# Display our object tracker
frame = cv2.flip(frame, 1)
cv2.imshow("Object Tracker", frame)
if cv2.waitKey(1) == 13: #13 is the Enter Key
plot_trajectories(points_left, "Left", "red")
plot_trajectories(points_right, "Right", "green")
break
else:
if cv2.waitKey(1) == 13: #13 is the Enter Key
plot_trajectories(points_left, "Left", "red")
plot_trajectories(points_right, "Right", "green")
break
cap.release()
cv2.destroyAllWindows()
| [
"matplotlib.pyplot.title",
"cv2.bitwise_and",
"numpy.ones",
"matplotlib.pyplot.gca",
"cv2.rectangle",
"cv2.erode",
"cv2.imshow",
"cv2.inRange",
"cv2.line",
"cv2.contourArea",
"cv2.dilate",
"cv2.cvtColor",
"cv2.drawContours",
"cv2.destroyAllWindows",
"matplotlib.pyplot.show",
"cv2.minEn... | [((128, 147), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(1)'], {}), '(1)\n', (144, 147), False, 'import cv2\n'), ((1135, 1171), 'numpy.array', 'np.array', (['[0, 48, 80]'], {'dtype': '"""uint8"""'}), "([0, 48, 80], dtype='uint8')\n", (1143, 1171), True, 'import numpy as np\n'), ((1189, 1228), 'numpy.array', 'np.array', (['[20, 255, 255]'], {'dtype': '"""uint8"""'}), "([20, 255, 255], dtype='uint8')\n", (1197, 1228), True, 'import numpy as np\n'), ((1403, 1455), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(3, 3)'], {}), '(cv2.MORPH_ELLIPSE, (3, 3))\n', (1428, 1455), False, 'import cv2\n'), ((1461, 1496), 'cv2.createBackgroundSubtractorKNN', 'cv2.createBackgroundSubtractorKNN', ([], {}), '()\n', (1494, 1496), False, 'import cv2\n'), ((10363, 10386), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (10384, 10386), False, 'import cv2\n'), ((1862, 1883), 'cv2.moments', 'cv2.moments', (['contours'], {}), '(contours)\n', (1873, 1883), False, 'import cv2\n'), ((2047, 2074), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys'], {'color': 'clr'}), '(xs, ys, color=clr)\n', (2055, 2074), True, 'import matplotlib.pyplot as plt\n'), ((2080, 2095), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""X"""'], {}), "('X')\n", (2090, 2095), True, 'import matplotlib.pyplot as plt\n'), ((2100, 2115), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Y"""'], {}), "('Y')\n", (2110, 2115), True, 'import matplotlib.pyplot as plt\n'), ((2120, 2156), 'matplotlib.pyplot.title', 'plt.title', (["(str + ' hand trajectores')"], {}), "(str + ' hand trajectores')\n", (2129, 2156), True, 'import matplotlib.pyplot as plt\n'), ((2393, 2403), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2401, 2403), True, 'import matplotlib.pyplot as plt\n'), ((1666, 1686), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (1681, 1686), False, 'import cv2\n'), ((2781, 2819), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (2793, 2819), False, 'import cv2\n'), ((2892, 3030), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""C:/Users/liangx/source/repos/Skin Detection/haarcascade_frontalface_alt/haarcascade_frontalface_alt.xml"""'], {}), "(\n 'C:/Users/liangx/source/repos/Skin Detection/haarcascade_frontalface_alt/haarcascade_frontalface_alt.xml'\n )\n", (2913, 3030), False, 'import cv2\n'), ((3392, 3440), 'cv2.inRange', 'cv2.inRange', (['hsv_img', 'lower_thresh', 'upper_thresh'], {}), '(hsv_img, lower_thresh, upper_thresh)\n', (3403, 3440), False, 'import cv2\n'), ((3525, 3550), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (3532, 3550), True, 'import numpy as np\n'), ((3566, 3614), 'cv2.erode', 'cv2.erode', (['mask', 'kernel_morphology'], {'iterations': '(1)'}), '(mask, kernel_morphology, iterations=1)\n', (3575, 3614), False, 'import cv2\n'), ((3699, 3757), 'cv2.morphologyEx', 'cv2.morphologyEx', (['mask', 'cv2.MORPH_CLOSE', 'kernel_morphology'], {}), '(mask, cv2.MORPH_CLOSE, kernel_morphology)\n', (3715, 3757), False, 'import cv2\n'), ((3771, 3820), 'cv2.dilate', 'cv2.dilate', (['mask', 'kernel_morphology'], {'iterations': '(1)'}), '(mask, kernel_morphology, iterations=1)\n', (3781, 3820), False, 'import cv2\n'), ((3829, 3874), 'cv2.imshow', 'cv2.imshow', (['"""Skin colour + Morpho Mask"""', 'mask'], {}), "('Skin colour + Morpho Mask', mask)\n", (3839, 3874), False, 'import cv2\n'), ((4031, 4071), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'mask'}), '(frame, frame, mask=mask)\n', (4046, 4071), False, 'import cv2\n'), ((4319, 4373), 'cv2.morphologyEx', 'cv2.morphologyEx', (['fgmask', 'cv2.MORPH_OPEN', 'kernel_bgsub'], {}), '(fgmask, cv2.MORPH_OPEN, kernel_bgsub)\n', (4335, 4373), False, 'import cv2\n'), ((4382, 4454), 'cv2.imshow', 'cv2.imshow', (['"""Background subtraction + Skin colour + Morpho Mask"""', 'fgmask'], {}), "('Background subtraction + Skin colour + Morpho Mask', fgmask)\n", (4392, 4454), False, 'import cv2\n'), ((4632, 4674), 'cv2.bitwise_and', 'cv2.bitwise_and', (['rest1', 'rest1'], {'mask': 'fgmask'}), '(rest1, rest1, mask=fgmask)\n', (4647, 4674), False, 'import cv2\n'), ((9869, 9887), 'cv2.flip', 'cv2.flip', (['frame', '(1)'], {}), '(frame, 1)\n', (9877, 9887), False, 'import cv2\n'), ((9896, 9931), 'cv2.imshow', 'cv2.imshow', (['"""Object Tracker"""', 'frame'], {}), "('Object Tracker', frame)\n", (9906, 9931), False, 'import cv2\n'), ((2161, 2170), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2168, 2170), True, 'import matplotlib.pyplot as plt\n'), ((2293, 2302), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2300, 2302), True, 'import matplotlib.pyplot as plt\n'), ((3115, 3179), 'cv2.rectangle', 'cv2.rectangle', (['hsv_img', '(x, y)', '(x + w, y + h)', '(255)'], {'thickness': '(2)'}), '(hsv_img, (x, y), (x + w, y + h), 255, thickness=2)\n', (3128, 3179), False, 'import cv2\n'), ((3233, 3271), 'cv2.imshow', 'cv2.imshow', (['"""Face Detection"""', 'crop_img'], {}), "('Face Detection', crop_img)\n", (3243, 3271), False, 'import cv2\n'), ((9953, 9967), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (9964, 9967), False, 'import cv2\n'), ((10156, 10170), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (10167, 10170), False, 'import cv2\n'), ((6122, 6139), 'cv2.convexHull', 'cv2.convexHull', (['c'], {}), '(c)\n', (6136, 6139), False, 'import cv2\n'), ((6156, 6207), 'cv2.drawContours', 'cv2.drawContours', (['rest2', '[hull]', '(-1)', '(0, 0, 255)', '(3)'], {}), '(rest2, [hull], -1, (0, 0, 255), 3)\n', (6172, 6207), False, 'import cv2\n'), ((6283, 6331), 'cv2.drawContours', 'cv2.drawContours', (['rest2', '[c]', '(-1)', '(255, 0, 0)', '(3)'], {}), '(rest2, [c], -1, (255, 0, 0), 3)\n', (6299, 6331), False, 'import cv2\n'), ((6411, 6448), 'cv2.imshow', 'cv2.imshow', (['"""Contours by area"""', 'rest2'], {}), "('Contours by area', rest2)\n", (6421, 6448), False, 'import cv2\n'), ((6555, 6580), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['c'], {}), '(c)\n', (6577, 6580), False, 'import cv2\n'), ((6605, 6619), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (6616, 6619), False, 'import cv2\n'), ((8360, 8385), 'cv2.minEnclosingCircle', 'cv2.minEnclosingCircle', (['c'], {}), '(c)\n', (8382, 8385), False, 'import cv2\n'), ((8410, 8424), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (8421, 8424), False, 'import cv2\n'), ((7369, 7436), 'cv2.line', 'cv2.line', (['frame', 'points_left[l - 1]', 'points_left[l]', '(0, 0, 255)', '(2)'], {}), '(frame, points_left[l - 1], points_left[l], (0, 0, 255), 2)\n', (7377, 7436), False, 'import cv2\n'), ((8970, 9039), 'cv2.line', 'cv2.line', (['frame', 'points_right[l - 1]', 'points_right[l]', '(0, 255, 0)', '(2)'], {}), '(frame, points_right[l - 1], points_right[l], (0, 255, 0), 2)\n', (8978, 9039), False, 'import cv2\n')] |
import numpy as np
from qutip import (sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN,
Qobj, tensor, snot)
from scipy import linalg
from scipy.sparse import csc_matrix
import copy
import itertools
from utils import generate_u3
class ProbDist:
"""Base class for generating probability distributions for fidelity estimation
"""
def __init__(self, nqubits):
self.nqubits = nqubits
self.pauli_strings = self.pauli_permutations()
def get_probabilities(self):
raise NotImplementedError
def pauli_permutations(self):
return [''.join(i) for i in
itertools.product('0123', repeat=self.nqubits)]
class ChiProbDist(ProbDist):
"""Probability distribution for estimating the 0-fidelity based on an
adaptation of 10.1103/PhysRevLett.106.230501
"""
def __init__(self, nqubits: int, U: Qobj):
super().__init__(nqubits)
self.tens_ops = self.get_tensored_ops()
self.tens_states = self.get_tensored_states()
self.U = U.full()
self.probabilities, self.chi_dict = self.get_probs_and_chis()
def get_probs_and_chis(self):
d = 2**self.nqubits
input_states, observables = self.generate_states_observables()
probabilities = {}
chi_dict = {}
for _state_idx in self.pauli_strings:
for _obs_idx in self.pauli_strings:
_state = input_states[_state_idx]
_obs = observables[_obs_idx]
_trace = np.dot(self.U, _state)
_trace = np.dot(_trace, np.conj(np.transpose(self.U)))
_trace = np.dot(_trace, _obs)
_trace = _trace.diagonal()
chi = _trace.sum(axis=0)
chi_dict[_state_idx, _obs_idx] = chi
probabilities[(_state_idx, _obs_idx)] = (
1/d**3)*np.real(chi)**2
return probabilities, chi_dict
def generate_states_observables(self):
init_state = tensor([basis(2, 0)] * self.nqubits).full()
input_states = {}
observables = {}
for i, _op in enumerate(self.tens_ops):
_state = self.pauli_strings[i]
_input_state = copy.deepcopy(self.tens_states[i])
observables[_state] = copy.deepcopy(_op)
_init_copy = copy.deepcopy(init_state)
state = np.dot(_input_state, _init_copy)
input_states[_state] = np.dot(state, np.conj(np.transpose(state)))
return input_states, observables
def get_tensored_ops(self):
tens_ops = []
for _state in self.pauli_strings:
_ops = []
for i in _state:
if i == '0':
_ops.append(qeye(2))
if i == '1':
_ops.append(sigmax())
if i == '2':
_ops.append(sigmay())
if i == '3':
_ops.append(sigmaz())
_op = tensor(_ops)
tens_ops.append(_op.full())
return tens_ops
def get_tensored_states(self):
tens_ops = []
for _state in self.pauli_strings:
_ops = []
for i in _state:
if i == '0':
_ops.append(qeye(2))
if i == '1':
_ops.append(generate_u3(np.arccos(-1/3), 0, 0))
if i == '2':
_ops.append(generate_u3(np.arccos(-1/3), 2*np.pi/3, 0))
if i == '3':
_ops.append(generate_u3(np.arccos(-1/3), 4*np.pi/3, 0))
_op = tensor(_ops)
tens_ops.append(_op.full())
return tens_ops
class FlammiaProbDist(ProbDist):
"""Probability distribution for estimating the process fidelity as in
10.1103/PhysRevLett.106.230501
"""
def __init__(self, nqubits: int, U: Qobj):
super().__init__(nqubits)
self.tens_ops = self.get_tensored_ops()
self.tens_states = self.get_tensored_states()
self.U = U.full()
self.probabilities, self.chi_dict = self.get_probs_and_chis()
def get_probs_and_chis(self):
d = 2**self.nqubits
input_states, observables = self.generate_states_observables()
probabilities = {}
chi_dict = {}
for _state_idx in self.pauli_strings:
for _obs_idx in self.pauli_strings:
_state = input_states[_state_idx]
_obs = observables[_obs_idx]
_trace = np.dot(self.U, _state)
_trace = np.dot(_trace, np.conj(np.transpose(self.U)))
_trace = np.dot(_trace, _obs)
_trace = _trace.diagonal()
chi = _trace.sum(axis=0)
chi_dict[_state_idx, _obs_idx] = chi # np.real(chi)
probabilities[(_state_idx, _obs_idx)] = np.abs((1/d**4)*chi**2)
return probabilities, chi_dict
def generate_states_observables(self):
input_states = {}
observables = {}
for i, _op in enumerate(self.tens_ops):
_state = self.pauli_strings[i]
_input_state = copy.deepcopy(_op)
observables[_state] = copy.deepcopy(_op)
input_states[_state] = _input_state
return input_states, observables
def get_tensored_ops(self):
tens_ops = []
for _state in self.pauli_strings:
_ops = []
for i in _state:
if i == '0':
_ops.append(qeye(2))
if i == '1':
_ops.append(sigmax())
if i == '2':
_ops.append(sigmay())
if i == '3':
_ops.append(sigmaz())
_op = tensor(_ops)
tens_ops.append(_op.full())
return tens_ops
def get_tensored_states(self):
tens_ops = []
for _state in self.pauli_strings:
_ops = []
for i in _state:
if i == '0':
_ops.append(qeye(2))
if i == '1':
_ops.append(sigmax())
if i == '2':
_ops.append(sigmay())
if i == '3':
_ops.append(sigmaz())
_op = tensor(_ops)
tens_ops.append(_op.full())
return tens_ops
| [
"copy.deepcopy",
"qutip.sigmaz",
"qutip.tensor",
"numpy.abs",
"numpy.transpose",
"qutip.qeye",
"qutip.sigmax",
"qutip.sigmay",
"numpy.real",
"itertools.product",
"qutip.basis",
"numpy.dot",
"numpy.arccos"
] | [((2219, 2253), 'copy.deepcopy', 'copy.deepcopy', (['self.tens_states[i]'], {}), '(self.tens_states[i])\n', (2232, 2253), False, 'import copy\n'), ((2288, 2306), 'copy.deepcopy', 'copy.deepcopy', (['_op'], {}), '(_op)\n', (2301, 2306), False, 'import copy\n'), ((2332, 2357), 'copy.deepcopy', 'copy.deepcopy', (['init_state'], {}), '(init_state)\n', (2345, 2357), False, 'import copy\n'), ((2378, 2410), 'numpy.dot', 'np.dot', (['_input_state', '_init_copy'], {}), '(_input_state, _init_copy)\n', (2384, 2410), True, 'import numpy as np\n'), ((2981, 2993), 'qutip.tensor', 'tensor', (['_ops'], {}), '(_ops)\n', (2987, 2993), False, 'from qutip import sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN, Qobj, tensor, snot\n'), ((3605, 3617), 'qutip.tensor', 'tensor', (['_ops'], {}), '(_ops)\n', (3611, 3617), False, 'from qutip import sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN, Qobj, tensor, snot\n'), ((5140, 5158), 'copy.deepcopy', 'copy.deepcopy', (['_op'], {}), '(_op)\n', (5153, 5158), False, 'import copy\n'), ((5193, 5211), 'copy.deepcopy', 'copy.deepcopy', (['_op'], {}), '(_op)\n', (5206, 5211), False, 'import copy\n'), ((5751, 5763), 'qutip.tensor', 'tensor', (['_ops'], {}), '(_ops)\n', (5757, 5763), False, 'from qutip import sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN, Qobj, tensor, snot\n'), ((6281, 6293), 'qutip.tensor', 'tensor', (['_ops'], {}), '(_ops)\n', (6287, 6293), False, 'from qutip import sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN, Qobj, tensor, snot\n'), ((636, 682), 'itertools.product', 'itertools.product', (['"""0123"""'], {'repeat': 'self.nqubits'}), "('0123', repeat=self.nqubits)\n", (653, 682), False, 'import itertools\n'), ((1523, 1545), 'numpy.dot', 'np.dot', (['self.U', '_state'], {}), '(self.U, _state)\n', (1529, 1545), True, 'import numpy as np\n'), ((1642, 1662), 'numpy.dot', 'np.dot', (['_trace', '_obs'], {}), '(_trace, _obs)\n', (1648, 1662), True, 'import numpy as np\n'), ((4515, 4537), 'numpy.dot', 'np.dot', (['self.U', '_state'], {}), '(self.U, _state)\n', (4521, 4537), True, 'import numpy as np\n'), ((4634, 4654), 'numpy.dot', 'np.dot', (['_trace', '_obs'], {}), '(_trace, _obs)\n', (4640, 4654), True, 'import numpy as np\n'), ((4864, 4893), 'numpy.abs', 'np.abs', (['(1 / d ** 4 * chi ** 2)'], {}), '(1 / d ** 4 * chi ** 2)\n', (4870, 4893), True, 'import numpy as np\n'), ((2468, 2487), 'numpy.transpose', 'np.transpose', (['state'], {}), '(state)\n', (2480, 2487), True, 'import numpy as np\n'), ((1594, 1614), 'numpy.transpose', 'np.transpose', (['self.U'], {}), '(self.U)\n', (1606, 1614), True, 'import numpy as np\n'), ((1886, 1898), 'numpy.real', 'np.real', (['chi'], {}), '(chi)\n', (1893, 1898), True, 'import numpy as np\n'), ((2741, 2748), 'qutip.qeye', 'qeye', (['(2)'], {}), '(2)\n', (2745, 2748), False, 'from qutip import sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN, Qobj, tensor, snot\n'), ((2811, 2819), 'qutip.sigmax', 'sigmax', ([], {}), '()\n', (2817, 2819), False, 'from qutip import sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN, Qobj, tensor, snot\n'), ((2882, 2890), 'qutip.sigmay', 'sigmay', ([], {}), '()\n', (2888, 2890), False, 'from qutip import sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN, Qobj, tensor, snot\n'), ((2953, 2961), 'qutip.sigmaz', 'sigmaz', ([], {}), '()\n', (2959, 2961), False, 'from qutip import sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN, Qobj, tensor, snot\n'), ((3271, 3278), 'qutip.qeye', 'qeye', (['(2)'], {}), '(2)\n', (3275, 3278), False, 'from qutip import sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN, Qobj, tensor, snot\n'), ((4586, 4606), 'numpy.transpose', 'np.transpose', (['self.U'], {}), '(self.U)\n', (4598, 4606), True, 'import numpy as np\n'), ((5511, 5518), 'qutip.qeye', 'qeye', (['(2)'], {}), '(2)\n', (5515, 5518), False, 'from qutip import sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN, Qobj, tensor, snot\n'), ((5581, 5589), 'qutip.sigmax', 'sigmax', ([], {}), '()\n', (5587, 5589), False, 'from qutip import sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN, Qobj, tensor, snot\n'), ((5652, 5660), 'qutip.sigmay', 'sigmay', ([], {}), '()\n', (5658, 5660), False, 'from qutip import sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN, Qobj, tensor, snot\n'), ((5723, 5731), 'qutip.sigmaz', 'sigmaz', ([], {}), '()\n', (5729, 5731), False, 'from qutip import sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN, Qobj, tensor, snot\n'), ((6041, 6048), 'qutip.qeye', 'qeye', (['(2)'], {}), '(2)\n', (6045, 6048), False, 'from qutip import sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN, Qobj, tensor, snot\n'), ((6111, 6119), 'qutip.sigmax', 'sigmax', ([], {}), '()\n', (6117, 6119), False, 'from qutip import sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN, Qobj, tensor, snot\n'), ((6182, 6190), 'qutip.sigmay', 'sigmay', ([], {}), '()\n', (6188, 6190), False, 'from qutip import sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN, Qobj, tensor, snot\n'), ((6253, 6261), 'qutip.sigmaz', 'sigmaz', ([], {}), '()\n', (6259, 6261), False, 'from qutip import sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN, Qobj, tensor, snot\n'), ((2014, 2025), 'qutip.basis', 'basis', (['(2)', '(0)'], {}), '(2, 0)\n', (2019, 2025), False, 'from qutip import sigmax, sigmay, sigmaz, qeye, basis, gate_expand_1toN, Qobj, tensor, snot\n'), ((3353, 3370), 'numpy.arccos', 'np.arccos', (['(-1 / 3)'], {}), '(-1 / 3)\n', (3362, 3370), True, 'import numpy as np\n'), ((3450, 3467), 'numpy.arccos', 'np.arccos', (['(-1 / 3)'], {}), '(-1 / 3)\n', (3459, 3467), True, 'import numpy as np\n'), ((3555, 3572), 'numpy.arccos', 'np.arccos', (['(-1 / 3)'], {}), '(-1 / 3)\n', (3564, 3572), True, 'import numpy as np\n')] |
import numpy as np
from ..bath import PseudomodeBath
from .base import DynamicalModel, SystemOperator
from .liouville_space import matrix_to_ket_vec
class ZOFESpaceOperator(SystemOperator):
"""
Parameters
----------
operator : np.ndarray
Matrix representation of the operator in the Hilbert subspace of
`dynamical_model`.
liouv_subspace_map : string
String in the form 'eg->ee' indicating the mapping between
Liouville subspaces on which the operator should act.
dynamical_model : ZOFEModel
ZOFE dynamical model on which this operator acts.
"""
def __init__(self, operator, liouv_subspace_map, dynamical_model):
self.operator = operator
self.dynamical_model = dynamical_model
def left_multiply(self, state):
rho0, oop0 = self.dynamical_model.state_vec_to_operators(state)
rho1 = self.operator.dot(rho0)
#oop1 = np.einsum('cd,abde->abce', self.operator, oop0)
oop1 = np.rollaxis(np.tensordot(self.operator, oop0, axes=[1, 2]), 0, 3) #faster than einsum
return self.dynamical_model.operators_to_state_vec(rho1, oop1)
def right_multiply(self, state):
rho0, oop0 = self.dynamical_model.state_vec_to_operators(state)
rho1 = rho0.dot(self.operator)
oop1 = oop0.dot(self.operator)
return self.dynamical_model.operators_to_state_vec(rho1, oop1)
def expectation_value(self, state):
rho0, _ = self.dynamical_model.state_vec_to_operators(state)
# Proof: tr M rho = \sum_{ij} M_ij rho_ji
return np.tensordot(self.operator, rho0, axes=([0, 1], [1, 0])) # faster than einsum
class ZOFEModel(DynamicalModel):
"""
DynamicalModel for ZOFE master equation
Assumes that each pigment is coupled to an identical, independent bath
Parameters
----------
hamiltonian : hamiltonian.Hamiltonian
Hamiltonian object specifying the system
rw_freq : float, optional
Rotating wave frequency at which to calculate dynamics. By default,
the rotating wave frequency is chosen from the central frequency
of the Hamiltonian.
hilbert_subspace : container, default 'ge'
Container of any or all of 'g', 'e' and 'f' indicating the desired
Hilbert subspace
unit_convert : number, optional
Unit conversion from energy to time units (default 1).
References
----------
See references in method containing the ZOFE master equation
"""
system_operator = ZOFESpaceOperator
def __init__(self, hamiltonian, rw_freq=None, hilbert_subspace='gef',
unit_convert=1, ham_hermit=False, rho_hermit=False):
super(ZOFEModel, self).__init__(
hamiltonian, rw_freq, hilbert_subspace, unit_convert)
# initial auxiliary operator for the ZOFE master equation
if not isinstance(self.hamiltonian.bath, PseudomodeBath):
raise NotImplementedError('ZOFE only implemented for baths of type '
'PseudomodeBath')
numb_pm = self.hamiltonian.bath.numb_pm
n_sit = self.hamiltonian.n_sites
n_stat = self.hamiltonian.n_states(self.hilbert_subspace)
self.oop_shape = (numb_pm, n_sit, n_stat, n_stat)
self.ham_hermit = ham_hermit
self.rho_hermit = rho_hermit
def density_matrix_to_state_vector(self, rho0, liouville_subspace):
# initial auxiliary operator for the ZOFE master equation
state0 = rho0.reshape((-1), order='F')
initial_oop = np.zeros(self.oop_shape, dtype=complex)
return np.append(state0, initial_oop.reshape((-1), order='F'))
def state_vector_to_density_matrix(self, rhos):
"""
turn the diff eq trajectory (list of state vectors) into a
list of density matrices
"""
return np.array([self.state_vec_to_operators(k)[0] for k in rhos])
def thermal_state(self, _):
rho0 = self.hamiltonian.thermal_state(self.hilbert_subspace)
return self.density_matrix_to_state_vector(rho0, None)
def map_between_subspaces(self, state, from_subspace, to_subspace):
return state
def state_vec_to_operators(self, rho_oop_vec):
n_stat = self.hamiltonian.n_states(self.hilbert_subspace)
n_stat_sq = n_stat ** 2
rho = rho_oop_vec[:n_stat_sq].reshape((n_stat, n_stat), order='F')
oop = rho_oop_vec[n_stat_sq:].reshape(self.oop_shape, order='F')
return rho, oop
def operators_to_state_vec(self, rho, oop):
return np.append(rho.reshape((-1), order='F'),
oop.reshape((-1), order='F'))
def rhodot_oopdot_vec(self, t, rho_oop_vec, oop_shape, ham, L_n, Gamma, w):
"""
Calculates the time derivatives rhodot and oopdot,
i.e., of the density matrix and the auxiliary operator
(takes and gives them back in vector form) according to the
ZOFE master equation.
Does work for one-exciton AND two-exciton space
(including ground state).
Parameters
----------
t: time
rho_oop_vec: vector containing the density matrix and the
auxiliary operator at time t
oop_shape: shape of the auxiliary operator, i.e., highest
indices for each dimension.
oop_shape should be (n_pseudomodes, n_sites, n_states, n_states)
ham: Hamiltonian of the system part.
ham is a 2D array of the form ham[state, state]
L_n: system operator for the system-bath coupling.
L_n is a 3D array of the form L_n[site, state, state]
Gamma: =Omeg**2*huang, corresponding to a bath correlation
spectrum with Lorentzians centered at frequencies Omeg with
prefactors huang.
Gamma is a 2D array of the form Gamma[pseudomode, site]
w: =1j*Omeg+gamma, corresponding to a bath correlation spectrum
with Lorentzians centered at frequencies Omeg with widths gammma.
w is a 2D array of the form w[pseudomode, site]
The following two parameters are needed to make the calculation more
efficient if possible.
ham_hermit: True if system Hamiltonian is hermitian
rho_hermit: True if rho is hermitian
Returns
-------
np.append(vec(rhodot), vec(oopdot)): time derivatives rhodot and oopdot
in vector form.
References
----------
.. [1] ZOFE master equation: Ritschel et. al., An efficient method to
calculate excitation energy transfer in light-harvesting systems:
application to the Fenna-Matthews-Olson complex, NJP 13 (2011) 113034
(and references therein)
.. [2] Extend ZOFE master equation to two-exciton space: unpublished
.. [3] Speed up ZOFE master equation: unpublished
"""
rho, oop = self.state_vec_to_operators(rho_oop_vec)
sum_oop = oop.sum(axis=0) #sum over pseudomode index
a_op = np.tensordot(L_n.swapaxes(1, 2).conj(), sum_oop,
axes=([0, 2], [0, 1]))
b_op = -1j * ham - a_op
c_op = np.tensordot(np.tensordot(L_n, rho, axes=([2], [0])),
sum_oop.swapaxes(1,2).conj(), axes=([0, 2], [0, 1]))
d_op = np.dot(b_op, rho) + c_op
if not self.rho_hermit:
big_operator = np.tensordot(np.tensordot(sum_oop, rho,
axes=([2], [0])),
L_n.swapaxes(1, 2).conj(),
axes=([0, 2], [0, 1]))
if self.ham_hermit:
f_op = rho.dot(b_op.T.conj()) + big_operator
else:
f_op = rho.dot(1j * ham - a_op.T.conj()) + big_operator
else:
if self.ham_hermit:
f_op = d_op.T.conj()
else:
f_op = rho.dot(1j * ham - a_op.T.conj()) + c_op.T.conj()
rhodot = d_op + f_op
# O operator evolution equation (uses b_op from above)
oopdot = (np.einsum('ij,jkl->ijkl', Gamma, L_n)
- np.einsum('ij,ijkl->ijkl', w, oop)
+ np.rollaxis(np.tensordot(b_op, oop, axes=[1, 2]), 0, 3)
- oop.dot(b_op))
return self.operators_to_state_vec(rhodot, oopdot)
def equation_of_motion(self, liouville_subspace, heisenberg_picture=False):
"""
Return the equation of motion for this dynamical model in the given
subspace, a function which takes a state vector and returns its first
time derivative, for use in a numerical integration routine
"""
if heisenberg_picture:
raise NotImplementedError('ZOFE not implemented in the Heisenberg '
'picture')
# NOTE THE MINUS SIGN!!
L_n = -np.asanyarray(
self.hamiltonian.system_bath_couplings(self.hilbert_subspace))
# parameters of PseudomodeBath
Omega = self.hamiltonian.bath.Omega
gamma = self.hamiltonian.bath.gamma
huang = self.hamiltonian.bath.huang
Gamma = Omega ** 2 * huang
w = 1j * Omega + gamma
sys_ham = self.hamiltonian.H(self.hilbert_subspace)
def eom(t, rho_oop_vec):
return (self.unit_convert
* self.rhodot_oopdot_vec(t, rho_oop_vec,
self.oop_shape, sys_ham, L_n,
Gamma, w))
return eom
| [
"numpy.tensordot",
"numpy.dot",
"numpy.zeros",
"numpy.einsum"
] | [((1585, 1641), 'numpy.tensordot', 'np.tensordot', (['self.operator', 'rho0'], {'axes': '([0, 1], [1, 0])'}), '(self.operator, rho0, axes=([0, 1], [1, 0]))\n', (1597, 1641), True, 'import numpy as np\n'), ((3566, 3605), 'numpy.zeros', 'np.zeros', (['self.oop_shape'], {'dtype': 'complex'}), '(self.oop_shape, dtype=complex)\n', (3574, 3605), True, 'import numpy as np\n'), ((1006, 1052), 'numpy.tensordot', 'np.tensordot', (['self.operator', 'oop0'], {'axes': '[1, 2]'}), '(self.operator, oop0, axes=[1, 2])\n', (1018, 1052), True, 'import numpy as np\n'), ((7205, 7244), 'numpy.tensordot', 'np.tensordot', (['L_n', 'rho'], {'axes': '([2], [0])'}), '(L_n, rho, axes=([2], [0]))\n', (7217, 7244), True, 'import numpy as np\n'), ((7342, 7359), 'numpy.dot', 'np.dot', (['b_op', 'rho'], {}), '(b_op, rho)\n', (7348, 7359), True, 'import numpy as np\n'), ((7440, 7483), 'numpy.tensordot', 'np.tensordot', (['sum_oop', 'rho'], {'axes': '([2], [0])'}), '(sum_oop, rho, axes=([2], [0]))\n', (7452, 7483), True, 'import numpy as np\n'), ((8137, 8174), 'numpy.einsum', 'np.einsum', (['"""ij,jkl->ijkl"""', 'Gamma', 'L_n'], {}), "('ij,jkl->ijkl', Gamma, L_n)\n", (8146, 8174), True, 'import numpy as np\n'), ((8195, 8229), 'numpy.einsum', 'np.einsum', (['"""ij,ijkl->ijkl"""', 'w', 'oop'], {}), "('ij,ijkl->ijkl', w, oop)\n", (8204, 8229), True, 'import numpy as np\n'), ((8262, 8298), 'numpy.tensordot', 'np.tensordot', (['b_op', 'oop'], {'axes': '[1, 2]'}), '(b_op, oop, axes=[1, 2])\n', (8274, 8298), True, 'import numpy as np\n')] |
import random
import numpy as np
import torch
from rdkit import Chem
def to_tensor(tensor):
if isinstance(tensor, np.ndarray):
tensor = torch.from_numpy(tensor)
if torch.cuda.is_available():
return torch.autograd.Variable(tensor).cuda()
return torch.autograd.Variable(tensor)
def get_indices_of_unique_smiles(smiles: [str]) -> np.array:
"""Returns an np.array of indices corresponding to the first entries in a list of smiles strings"""
_, idxs = np.unique(smiles, return_index=True)
sorted_indices = np.sort(idxs)
return sorted_indices
def set_default_device_cuda():
"""Sets the default device (cpu or cuda) used for all tensors."""
if torch.cuda.is_available() == False:
tensor = torch.FloatTensor
torch.set_default_tensor_type(tensor)
return False
else: # device_name == "cuda":
tensor = torch.cuda.FloatTensor # pylint: disable=E1101
torch.set_default_tensor_type(tensor)
return True
def fraction_valid_smiles(smiles):
i = 0
for smile in smiles:
if Chem.MolFromSmiles(smile):
i += 1
fraction = 100 * i / len(smiles)
return fraction
def set_seed(seed):
torch.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(seed)
random.seed(seed)
| [
"numpy.random.seed",
"torch.manual_seed",
"torch.autograd.Variable",
"torch.set_default_tensor_type",
"numpy.sort",
"torch.cuda.is_available",
"random.seed",
"rdkit.Chem.MolFromSmiles",
"numpy.unique",
"torch.from_numpy"
] | [((183, 208), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (206, 208), False, 'import torch\n'), ((275, 306), 'torch.autograd.Variable', 'torch.autograd.Variable', (['tensor'], {}), '(tensor)\n', (298, 306), False, 'import torch\n'), ((488, 524), 'numpy.unique', 'np.unique', (['smiles'], {'return_index': '(True)'}), '(smiles, return_index=True)\n', (497, 524), True, 'import numpy as np\n'), ((546, 559), 'numpy.sort', 'np.sort', (['idxs'], {}), '(idxs)\n', (553, 559), True, 'import numpy as np\n'), ((1213, 1236), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1230, 1236), False, 'import torch\n'), ((1330, 1350), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1344, 1350), True, 'import numpy as np\n'), ((1355, 1372), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (1366, 1372), False, 'import random\n'), ((151, 175), 'torch.from_numpy', 'torch.from_numpy', (['tensor'], {}), '(tensor)\n', (167, 175), False, 'import torch\n'), ((696, 721), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (719, 721), False, 'import torch\n'), ((775, 812), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['tensor'], {}), '(tensor)\n', (804, 812), False, 'import torch\n'), ((943, 980), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['tensor'], {}), '(tensor)\n', (972, 980), False, 'import torch\n'), ((1084, 1109), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['smile'], {}), '(smile)\n', (1102, 1109), False, 'from rdkit import Chem\n'), ((225, 256), 'torch.autograd.Variable', 'torch.autograd.Variable', (['tensor'], {}), '(tensor)\n', (248, 256), False, 'import torch\n')] |
#!python
# GL interoperability example, by <NAME>.
# Draws a rotating teapot, using cuda to invert the RGB value
# each frame
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GL.ARB.vertex_buffer_object import *
from OpenGL.GL.ARB.pixel_buffer_object import *
import numpy, sys, time
import pycuda.driver as cuda_driver
import pycuda.gl as cuda_gl
from pycuda.compiler import SourceModule
#this is all munged together from the CUDA SDK postprocessGL example.
initial_size = 512,512
current_size = initial_size
animate = True
enable_cuda = True
window = None # Number of the glut window.
time_of_last_draw = 0.0
time_of_last_titleupdate = 0.0
frames_per_second = 0.0
frame_counter = 0
output_texture = None # pointer to offscreen render target
(source_pbo, dest_pbo, cuda_module, invert,
pycuda_source_pbo, pycuda_dest_pbo) = [None]*6
heading,pitch,bank = [0.0]*3
def create_PBOs(w,h):
global source_pbo, dest_pbo, pycuda_source_pbo, pycuda_dest_pbo
num_texels = w*h
data = numpy.zeros((num_texels,4),numpy.uint8)
source_pbo = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, source_pbo)
glBufferData(GL_ARRAY_BUFFER, data, GL_DYNAMIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, 0)
pycuda_source_pbo = cuda_gl.BufferObject(int(source_pbo))
dest_pbo = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, dest_pbo)
glBufferData(GL_ARRAY_BUFFER, data, GL_DYNAMIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, 0)
pycuda_dest_pbo = cuda_gl.BufferObject(int(dest_pbo))
def destroy_PBOs():
global source_pbo, dest_pbo, pycuda_source_pbo, pycuda_dest_pbo
for pbo in [source_pbo, dest_pbo]:
glBindBuffer(GL_ARRAY_BUFFER, int(pbo))
glDeleteBuffers(1, int(pbo))
glBindBuffer(GL_ARRAY_BUFFER, 0)
source_pbo,dest_pbo,pycuda_source_pbo,pycuda_dest_pbo = [None]*4
def create_texture(w,h):
global output_texture
output_texture = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, output_texture)
# set basic parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
# buffer data
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA,
w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, None)
def destroy_texture():
global output_texture
glDeleteTextures(output_texture)
output_texture = None
def init_gl():
Width, Height = current_size
glClearColor(0.1, 0.1, 0.5, 1.0)
glDisable(GL_DEPTH_TEST)
glViewport(0, 0, Width, Height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60.0, Width/float(Height), 0.1, 10.0)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glEnable(GL_LIGHT0)
red = ( 1.0, 0.1, 0.1, 1.0 )
white = ( 1.0, 1.0, 1.0, 1.0 )
glMaterialfv(GL_FRONT_AND_BACK, GL_DIFFUSE, red )
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, white)
glMaterialf( GL_FRONT_AND_BACK, GL_SHININESS, 60.0)
def resize(Width, Height):
global current_size
current_size = Width, Height
glViewport(0, 0, Width, Height) # Reset The Current Viewport And Perspective Transformation
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60.0, Width/float(Height), 0.1, 10.0)
def do_tick():
global time_of_last_titleupdate, frame_counter, frames_per_second
if ((time.clock () * 1000.0) - time_of_last_titleupdate >= 1000.):
frames_per_second = frame_counter # Save The FPS
frame_counter = 0 # Reset The FPS Counter
szTitle = "%d FPS" % (frames_per_second )
glutSetWindowTitle ( szTitle )
time_of_last_titleupdate = time.clock () * 1000.0
frame_counter += 1
# The function called whenever a key is pressed. Note the use of Python tuples to pass in: (key, x, y)
def keyPressed(*args):
global animate, enable_cuda
# If escape is pressed, kill everything.
if args[0] == '\033':
print('Closing..')
destroy_PBOs()
destroy_texture()
exit()
elif args[0] == 'a':
print('toggling animation')
animate = not animate
elif args[0] == 'e':
print('toggling cuda')
enable_cuda = not enable_cuda
def idle():
global heading, pitch, bank
if animate:
heading += 0.2
pitch += 0.6
bank += 1.0
glutPostRedisplay()
def display():
try:
render_scene()
if enable_cuda:
process_image()
display_image()
glutSwapBuffers()
except:
from traceback import print_exc
print_exc()
from os import _exit
_exit(0)
def process(width, height):
""" Use PyCuda """
grid_dimensions = (width//16,height//16)
source_mapping = pycuda_source_pbo.map()
dest_mapping = pycuda_dest_pbo.map()
invert.prepared_call(grid_dimensions, (16, 16, 1),
source_mapping.device_ptr(),
dest_mapping.device_ptr())
cuda_driver.Context.synchronize()
source_mapping.unmap()
dest_mapping.unmap()
def process_image():
""" copy image and process using CUDA """
global pycuda_source_pbo,source_pbo,current_size, dest_pbo
image_width, image_height = current_size
assert source_pbo is not None
# tell cuda we are going to get into these buffers
pycuda_source_pbo.unregister()
# activate destination buffer
glBindBufferARB(GL_PIXEL_PACK_BUFFER_ARB, int(source_pbo))
# read data into pbo. note: use BGRA format for optimal performance
glReadPixels(
0, #start x
0, #start y
image_width, #end x
image_height, #end y
GL_BGRA, #format
GL_UNSIGNED_BYTE, #output type
ctypes.c_void_p(0))
pycuda_source_pbo = cuda_gl.BufferObject(int(source_pbo))
# run the Cuda kernel
process(image_width, image_height)
# blit convolved texture onto the screen
# download texture from PBO
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, int(dest_pbo))
glBindTexture(GL_TEXTURE_2D, output_texture)
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0,
image_width, image_height,
GL_BGRA, GL_UNSIGNED_BYTE, ctypes.c_void_p(0))
def display_image():
""" render a screen sized quad """
glDisable(GL_DEPTH_TEST)
glDisable(GL_LIGHTING)
glEnable(GL_TEXTURE_2D)
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE)
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
glOrtho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0)
glMatrixMode( GL_MODELVIEW)
glLoadIdentity()
glViewport(0, 0, current_size[0], current_size[1])
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0)
glVertex3f(-1.0, -1.0, 0.5)
glTexCoord2f(1.0, 0.0)
glVertex3f(1.0, -1.0, 0.5)
glTexCoord2f(1.0, 1.0)
glVertex3f(1.0, 1.0, 0.5)
glTexCoord2f(0.0, 1.0)
glVertex3f(-1.0, 1.0, 0.5)
glEnd()
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glDisable(GL_TEXTURE_2D)
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0)
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0)
def render_scene():
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)# Clear Screen And Depth Buffer
glMatrixMode(GL_MODELVIEW)
glLoadIdentity () # Reset The Modelview Matrix
glTranslatef(0.0, 0.0, -3.0)
glRotatef(heading, 1.0, 0.0, 0.0)
glRotatef(pitch , 0.0, 1.0, 0.0)
glRotatef(bank , 0.0, 0.0, 1.0)
glViewport(0, 0, current_size[0],current_size[1])
glEnable(GL_LIGHTING)
glEnable(GL_DEPTH_TEST)
glDepthFunc(GL_LESS)
glutSolidTeapot(1.0)
do_tick()#just for fps display..
return True
def main():
global window, cuda_module, cuda_gl, cuda_driver, invert
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
glutInitWindowSize(*initial_size)
glutInitWindowPosition(0, 0)
window = glutCreateWindow("PyCuda GL Interop Example")
glutDisplayFunc(display)
glutIdleFunc(idle)
glutReshapeFunc(resize)
glutKeyboardFunc(keyPressed)
glutSpecialFunc(keyPressed)
init_gl()
# create texture for blitting to screen
create_texture(*initial_size)
#setup pycuda gl interop
import pycuda.gl.autoinit
import pycuda.gl
cuda_gl = pycuda.gl
cuda_driver = pycuda.driver
cuda_module = SourceModule("""
__global__ void invert(unsigned char *source, unsigned char *dest)
{
int block_num = blockIdx.x + blockIdx.y * gridDim.x;
int thread_num = threadIdx.y * blockDim.x + threadIdx.x;
int threads_in_block = blockDim.x * blockDim.y;
//Since the image is RGBA we multiply the index 4.
//We'll only use the first 3 (RGB) channels though
int idx = 4 * (threads_in_block * block_num + thread_num);
dest[idx ] = 255 - source[idx ];
dest[idx+1] = 255 - source[idx+1];
dest[idx+2] = 255 - source[idx+2];
}
""")
invert = cuda_module.get_function("invert")
# The argument "PP" indicates that the invert function will take two PBOs as arguments
invert.prepare("PP")
# create source and destination pixel buffer objects for processing
create_PBOs(*initial_size)
glutMainLoop()
# Print message to console, and kick off the main to get it rolling.
if __name__ == "__main__":
print("Hit ESC key to quit, 'a' to toggle animation, and 'e' to toggle cuda")
main()
| [
"pycuda.compiler.SourceModule",
"traceback.print_exc",
"pycuda.driver.Context.synchronize",
"numpy.zeros",
"time.clock",
"os._exit"
] | [((1035, 1076), 'numpy.zeros', 'numpy.zeros', (['(num_texels, 4)', 'numpy.uint8'], {}), '((num_texels, 4), numpy.uint8)\n', (1046, 1076), False, 'import numpy, sys, time\n'), ((5126, 5159), 'pycuda.driver.Context.synchronize', 'cuda_driver.Context.synchronize', ([], {}), '()\n', (5157, 5159), True, 'import pycuda.driver as cuda_driver\n'), ((8587, 9209), 'pycuda.compiler.SourceModule', 'SourceModule', (['"""\n __global__ void invert(unsigned char *source, unsigned char *dest)\n {\n int block_num = blockIdx.x + blockIdx.y * gridDim.x;\n int thread_num = threadIdx.y * blockDim.x + threadIdx.x;\n int threads_in_block = blockDim.x * blockDim.y;\n //Since the image is RGBA we multiply the index 4.\n //We\'ll only use the first 3 (RGB) channels though\n int idx = 4 * (threads_in_block * block_num + thread_num);\n dest[idx ] = 255 - source[idx ];\n dest[idx+1] = 255 - source[idx+1];\n dest[idx+2] = 255 - source[idx+2];\n }\n """'], {}), '(\n """\n __global__ void invert(unsigned char *source, unsigned char *dest)\n {\n int block_num = blockIdx.x + blockIdx.y * gridDim.x;\n int thread_num = threadIdx.y * blockDim.x + threadIdx.x;\n int threads_in_block = blockDim.x * blockDim.y;\n //Since the image is RGBA we multiply the index 4.\n //We\'ll only use the first 3 (RGB) channels though\n int idx = 4 * (threads_in_block * block_num + thread_num);\n dest[idx ] = 255 - source[idx ];\n dest[idx+1] = 255 - source[idx+1];\n dest[idx+2] = 255 - source[idx+2];\n }\n """\n )\n', (8599, 9209), False, 'from pycuda.compiler import SourceModule\n'), ((3818, 3830), 'time.clock', 'time.clock', ([], {}), '()\n', (3828, 3830), False, 'import numpy, sys, time\n'), ((4739, 4750), 'traceback.print_exc', 'print_exc', ([], {}), '()\n', (4748, 4750), False, 'from traceback import print_exc\n'), ((4788, 4796), 'os._exit', '_exit', (['(0)'], {}), '(0)\n', (4793, 4796), False, 'from os import _exit\n'), ((3506, 3518), 'time.clock', 'time.clock', ([], {}), '()\n', (3516, 3518), False, 'import numpy, sys, time\n')] |
#import numpy as np
from pylsl import StreamInlet, resolve_stream, local_clock
from DE_viewer_dialog import DialogDE
from qtpy import QtGui, QtCore, QtWidgets, uic
import numpy as np
import os
qtCreatorFile = "viewer.ui" # Enter file here.
Ui_MainWindow, QtBaseClass = uic.loadUiType(os.path.join(os.path.dirname(__file__),qtCreatorFile))
def classifyStreamInlet(streams):
listStreams = []
for stream in streams:
listStreams.append(StreamInlet(stream).info().name())
return listStreams
class Viewer(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self, selectedStreams, streams, update_rate):
QtWidgets.QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
self.comboBox_src.currentIndexChanged.connect(self.changeDisplayedStream)
self.inlet = []
self.storedData = []
self.storedData.append([]) # All events list
self.selectedStream = 0 # By default events from all sources are displayed
self.dataAcquisition = True # By default data acquisition is active
self.update_rate = update_rate
for num in selectedStreams:
self.comboBox_src.addItem(StreamInlet(streams[num]).info().name())
self.inlet.append(StreamInlet(streams[num], max_buflen=10))
self.storedData.append([]) # Single event list
self.configureTable()
self.action_buffering_time.triggered.connect(self.changeBufferingTime)
self.bufferTime = 100
self.StopButton.setFocus()
self.StopButton.clicked.connect(self.setStopResume)
self.CloseButton.clicked.connect(self.stopAndClose)
self.configureTimers()
def configureTimers(self):
self.timerData = QtCore.QTimer()
self.timerData.timeout.connect(self.updateData)
self.timerData.start(1000.0/self.update_rate)
self.timerBuffer = QtCore.QTimer()
self.timerBuffer.timeout.connect(self.updateBuffer)
self.timerBuffer.start(1000.0)
def configureTable(self):
self.eventTable.horizontalHeader().setStretchLastSection(True)
self.eventTable.setColumnWidth(0, 100)
self.eventTable.setColumnWidth(1, 140)
self.tableColorList = [QtCore.Qt.white, QtCore.Qt.yellow, QtCore.Qt.red,
QtCore.Qt.green,QtCore.Qt.magenta, QtCore.Qt.cyan]
def changeBufferingTime(self,q):
n, ok = QtWidgets.QInputDialog().getInt(self, "Maximum data buffering time",
"Time to remove data from buffer [s]:", self.bufferTime, 1, 9999, 1)
if ok:
self.bufferTime = n
def changeDisplayedStream(self, i):
if self.selectedStream is not i:
self.selectedStream = i
self.eventTable.setRowCount(0) # delete all rows
for event in self.storedData[i]:
self.updateTable(event)
def updateData(self):
for stream in self.inlet:
chunk, timestamps = stream.pull_chunk(timeout=0.0)
if self.dataAcquisition is True:
if timestamps:
ts = np.asarray(timestamps)
y = np.asarray(chunk)
for elem in range(len(ts)):
event = []
event.append(stream.info().name())
event.append(float(ts[elem]))
event.append(str(y[elem,:]))
event.append(self.inlet.index(stream)%6)
self.storedData[0].insert(0,event)
self.storedData[self.inlet.index(stream)+1].insert(0,event)
if self.selectedStream == 0 or self.inlet.index(stream)+1 == self.selectedStream:
self.updateTable(event)
def updateTable(self, event):
self.eventTable.insertRow (0)
for n in range(len(event)-1):
elem = QtWidgets.QTableWidgetItem(str(event[n]))
elem.setFlags(elem.flags() & ~QtCore.Qt.ItemIsEditable)
elem.setBackground(self.tableColorList[event[len(event)-1]])
self.eventTable.setItem(0, n, elem)
def updateBuffer(self):
for s in self.storedData:
for l in reversed(s):
if (l[1] - local_clock()) <= - self.bufferTime:
s.remove(l)
else:
break
for rowIndex in reversed(range(self.eventTable.rowCount())):
ts = self.eventTable.item(rowIndex, 1).text()
if (float(ts) - local_clock()) <= - self.bufferTime:
self.eventTable.removeRow(rowIndex)
else:
break
def setStopResume(self):
self.dataAcquisition = not(self.dataAcquisition)
if self.dataAcquisition is True:
self.StopButton.setText("Stop")
else:
self.StopButton.setText("Resume")
def stopAndClose(self):
self.close()
def Start():
v = None
streams = resolve_stream()
listStreams = classifyStreamInlet(streams)
dialog = DialogDE(listStreams)
if(len(listStreams) == 0):
dialog.showErrorNoStreams()
else:
if dialog.exec_() and dialog.checkLineEditPattern():
selectedStreams, update_rate = dialog.returnWindowParameters()
if len(selectedStreams) == 0:
dialog.showErrorNoStreamSelected()
else:
v = Viewer(selectedStreams, streams, int(update_rate))
v.show()
else:
print("Window was not created.")
return v
def main():
v = Start()
import sys
if (((sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION')) ):
QtWidgets.QApplication.instance().exec_()
return v
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
app.setQuitOnLastWindowClosed(True)
v = Start()
if (((sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION')) ):
app.instance().exec_()
sys.exit(0)
| [
"pylsl.local_clock",
"DE_viewer_dialog.DialogDE",
"os.path.dirname",
"qtpy.QtCore.QTimer",
"qtpy.QtWidgets.QApplication.instance",
"numpy.asarray",
"pylsl.resolve_stream",
"pylsl.StreamInlet",
"qtpy.QtWidgets.QMainWindow.__init__",
"qtpy.QtWidgets.QInputDialog",
"qtpy.QtWidgets.QApplication",
... | [((5200, 5216), 'pylsl.resolve_stream', 'resolve_stream', ([], {}), '()\n', (5214, 5216), False, 'from pylsl import StreamInlet, resolve_stream, local_clock\n'), ((5282, 5303), 'DE_viewer_dialog.DialogDE', 'DialogDE', (['listStreams'], {}), '(listStreams)\n', (5290, 5303), False, 'from DE_viewer_dialog import DialogDE\n'), ((6064, 6096), 'qtpy.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (6086, 6096), False, 'from qtpy import QtGui, QtCore, QtWidgets, uic\n'), ((6269, 6280), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (6277, 6280), False, 'import sys\n'), ((299, 324), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (314, 324), False, 'import os\n'), ((633, 669), 'qtpy.QtWidgets.QMainWindow.__init__', 'QtWidgets.QMainWindow.__init__', (['self'], {}), '(self)\n', (663, 669), False, 'from qtpy import QtGui, QtCore, QtWidgets, uic\n'), ((1757, 1772), 'qtpy.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (1770, 1772), False, 'from qtpy import QtGui, QtCore, QtWidgets, uic\n'), ((1927, 1942), 'qtpy.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (1940, 1942), False, 'from qtpy import QtGui, QtCore, QtWidgets, uic\n'), ((1266, 1306), 'pylsl.StreamInlet', 'StreamInlet', (['streams[num]'], {'max_buflen': '(10)'}), '(streams[num], max_buflen=10)\n', (1277, 1306), False, 'from pylsl import StreamInlet, resolve_stream, local_clock\n'), ((2467, 2491), 'qtpy.QtWidgets.QInputDialog', 'QtWidgets.QInputDialog', ([], {}), '()\n', (2489, 2491), False, 'from qtpy import QtGui, QtCore, QtWidgets, uic\n'), ((5956, 5989), 'qtpy.QtWidgets.QApplication.instance', 'QtWidgets.QApplication.instance', ([], {}), '()\n', (5987, 5989), False, 'from qtpy import QtGui, QtCore, QtWidgets, uic\n'), ((3198, 3220), 'numpy.asarray', 'np.asarray', (['timestamps'], {}), '(timestamps)\n', (3208, 3220), True, 'import numpy as np\n'), ((3245, 3262), 'numpy.asarray', 'np.asarray', (['chunk'], {}), '(chunk)\n', (3255, 3262), True, 'import numpy as np\n'), ((4715, 4728), 'pylsl.local_clock', 'local_clock', ([], {}), '()\n', (4726, 4728), False, 'from pylsl import StreamInlet, resolve_stream, local_clock\n'), ((4430, 4443), 'pylsl.local_clock', 'local_clock', ([], {}), '()\n', (4441, 4443), False, 'from pylsl import StreamInlet, resolve_stream, local_clock\n'), ((451, 470), 'pylsl.StreamInlet', 'StreamInlet', (['stream'], {}), '(stream)\n', (462, 470), False, 'from pylsl import StreamInlet, resolve_stream, local_clock\n'), ((1195, 1220), 'pylsl.StreamInlet', 'StreamInlet', (['streams[num]'], {}), '(streams[num])\n', (1206, 1220), False, 'from pylsl import StreamInlet, resolve_stream, local_clock\n')] |
import seaborn as sns
import datetime
import seaborn as sns
import pandas as pd
import pickle as pickle
from scipy.spatial.distance import cdist, pdist, squareform
#import backspinpy
import pandas as pd
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.model_selection import StratifiedShuffleSplit
from collections import defaultdict
from sklearn import preprocessing
import matplotlib.patches as mpatches
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.patches as mpatches
from matplotlib.legend_handler import HandlerPatch
#from numpy import in1d
today=f"{datetime.datetime.now():%Y-%m-%d-%I:%M%p}"
def AccuracyPlot( Xhigh, acc, accCutoff=0.95, Xlow=-1,Ylow=0.5, Yhigh=1,):
fig_args = {'figsize': (6, 3), 'facecolor': 'white', 'edgecolor': 'white'}
#acc = net.history[:, 'valid_acc'], accCutoff = 0.95,
#Xlow = -1, Xhigh = len(net.history[:, 'valid_acc']) + 1,
fig = plt.figure(**fig_args)
ax = fig.add_subplot(111)
ax.plot(np.array([abs(i) for i in range(Xhigh-1)]),np.array( acc ), c='k', lw=2 )
ax.axhline( accCutoff, c='b' )
#axvline( 35 , c='r')
plt.ylabel('Accuracy Score', fontsize=15)
plt.xlabel('Epoches', fontsize=15)
plt.xlim( Xlow, Xhigh)
plt.ylim(Ylow, Yhigh)
plt.grid(False)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
return ax
####NOT READY!!!
####NOT READY!!!
####NOT READY!!!
def MVplot(mu, cv, mu_sorted,cv_sorted, thrs,
Xlow=-8.5, Xhigh=6.5, Ylow=-2, Yhigh=6.5,alphaValue=0.2, sValue=10,
fig_args={'figsize': (8, 8), 'facecolor': 'white', 'edgecolor': 'white'}):
#mu = mu, cv = cv, mu_sorted = mu_sorted, cv_sorted = cv_sorted, thrs = thrs,
#mu_linspace = mu_linspace, cv_fit = cv_fit,
#Xlow = -8.5, Xhigh = 6.5, Ylow = -2, Yhigh = 6.5, alphaValue = 0.2, sValue = 10,
fig = plt.figure(**fig_args)
ax.scatter(np.log2(mu_sorted[thrs:]), np.log2(cv_sorted[thrs:]), marker='o', edgecolor='none', alpha=alphaValue, s=sValue,
c='r')
# x.plot(mu_linspace, cv_fit*1.1,'-k', linewidth=1, label='$FitCurve$')
# plot(linspace(-9,7), -0.5*linspace(-9,7), '-r', label='$Poisson$')
plt.ylabel('log2 CV')
plt.xlabel('log2 mean')
ax.grid(alpha=0.3)
plt.xlim(-8.6, 6.5)
plt.ylim(-2, 6.5)
ax.legend(loc=1, fontsize=15)
plt.gca().set_aspect(1.2)
plt.grid(False)
return ax
| [
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"numpy.log2",
"datetime.datetime.now",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] | [((1000, 1022), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '(**fig_args)\n', (1010, 1022), True, 'import matplotlib.pyplot as plt\n'), ((1211, 1252), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy Score"""'], {'fontsize': '(15)'}), "('Accuracy Score', fontsize=15)\n", (1221, 1252), True, 'import matplotlib.pyplot as plt\n'), ((1258, 1292), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoches"""'], {'fontsize': '(15)'}), "('Epoches', fontsize=15)\n", (1268, 1292), True, 'import matplotlib.pyplot as plt\n'), ((1298, 1319), 'matplotlib.pyplot.xlim', 'plt.xlim', (['Xlow', 'Xhigh'], {}), '(Xlow, Xhigh)\n', (1306, 1319), True, 'import matplotlib.pyplot as plt\n'), ((1326, 1347), 'matplotlib.pyplot.ylim', 'plt.ylim', (['Ylow', 'Yhigh'], {}), '(Ylow, Yhigh)\n', (1334, 1347), True, 'import matplotlib.pyplot as plt\n'), ((1353, 1368), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (1361, 1368), True, 'import matplotlib.pyplot as plt\n'), ((1971, 1993), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '(**fig_args)\n', (1981, 1993), True, 'import matplotlib.pyplot as plt\n'), ((2305, 2326), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""log2 CV"""'], {}), "('log2 CV')\n", (2315, 2326), True, 'import matplotlib.pyplot as plt\n'), ((2332, 2355), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""log2 mean"""'], {}), "('log2 mean')\n", (2342, 2355), True, 'import matplotlib.pyplot as plt\n'), ((2385, 2404), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(-8.6)', '(6.5)'], {}), '(-8.6, 6.5)\n', (2393, 2404), True, 'import matplotlib.pyplot as plt\n'), ((2410, 2427), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-2)', '(6.5)'], {}), '(-2, 6.5)\n', (2418, 2427), True, 'import matplotlib.pyplot as plt\n'), ((2499, 2514), 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), '(False)\n', (2507, 2514), True, 'import matplotlib.pyplot as plt\n'), ((662, 685), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (683, 685), False, 'import datetime\n'), ((1110, 1123), 'numpy.array', 'np.array', (['acc'], {}), '(acc)\n', (1118, 1123), True, 'import numpy as np\n'), ((2014, 2039), 'numpy.log2', 'np.log2', (['mu_sorted[thrs:]'], {}), '(mu_sorted[thrs:])\n', (2021, 2039), True, 'import numpy as np\n'), ((2041, 2066), 'numpy.log2', 'np.log2', (['cv_sorted[thrs:]'], {}), '(cv_sorted[thrs:])\n', (2048, 2066), True, 'import numpy as np\n'), ((2468, 2477), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2475, 2477), True, 'import matplotlib.pyplot as plt\n')] |
#Data to fit to for each galaxy to be used in workshop
###############################
########## Imports ############
###############################
import sys
sys.path.append('../python/')
import dataPython as dp
import numpy as np
import scipy.interpolate as inter
import matplotlib.pyplot as plt
###############################
########### NGC5533 ###########
###############################
NGC5533 = {
# Load data from files for Noordermeer's band and fitted curves
# 'raw' in the sense that I haven't split everything up. Idk there's probably a better way to name these
'raw_total' : dp.getXYdata('data/NGC5533/noord-120kpc-total.txt' ),
'raw_blackhole' : dp.getXYdata('data/NGC5533/noord-120kpc-blackhole.txt' ),
'raw_bulge' : dp.getXYdata('data/NGC5533/noord-120kpc-bulge.txt' ),
'raw_disk' : dp.getXYdata('data/NGC5533/noord-120kpc-disk.txt' ),
'raw_halo' : dp.getXYdata('data/NGC5533/noord-120kpc-halo.txt' ),
'raw_gas' : dp.getXYdata('data/NGC5533/noord-120kpc-gas.txt' ),
'raw_band_btm' : dp.getXYdata('data/NGC5533/noord-120kpc-bottomband.txt'),
'raw_band_top' : dp.getXYdata('data/NGC5533/noord-120kpc-topband.txt' ),
# Get data from 100kpc file
'measured_data' : dp.getXYdata_wXYerr('data/NGC5533/100kpc_data.txt')
}
# Parameters ########################
NGC5533['galaxyname'] = 'NGC 5533' # NGC catalog number of the galaxy
NGC5533['rho0'] = 0.31e9 # central mass density (in solar mass/kpc^3), Source: Noordermeer (2007)
NGC5533['rc'] = 1.4 # core radius (in kpc), Source: Noordermeer (2007)
NGC5533['massbh'] = 2.7e9 # mass of central black hole (in solar masses), Source: Noordermeer (2007)
#Organize 100kpc data
NGC5533['m_radii'] = np.asarray(NGC5533['measured_data']['xx'])
NGC5533['m_velocities'] = np.asarray(NGC5533['measured_data']['yy'])
NGC5533['m_r_errors'] = np.asarray(NGC5533['measured_data']['ex'])
NGC5533['m_v_errors'] = np.asarray(NGC5533['measured_data']['ey'])
#Organize band data
NGC5533['n_r_btmband'] = np.asarray(NGC5533['raw_band_btm']['xx'])
NGC5533['n_v_btmband'] = np.asarray(NGC5533['raw_band_btm']['yy'])
NGC5533['n_r_topband'] = np.asarray(NGC5533['raw_band_top']['xx'])
NGC5533['n_v_topband'] = np.asarray(NGC5533['raw_band_top']['yy'])
NGC5533['n_v_bandwidth'] = NGC5533['n_v_topband'] - NGC5533['n_v_btmband']
NGC5533['n_v_bandwidth'] = NGC5533['n_v_bandwidth'][0::28] #For weights, v_errors and band must line up.
NGC5533['n_v_bandwidth'] = NGC5533['n_v_bandwidth'][1:]
# Smoothing
NGC5533['n_tb'], NGC5533['n_cb'], NGC5533['n_kb'] = inter.splrep(NGC5533['n_r_btmband'],NGC5533['n_v_btmband'])
NGC5533['n_tt'], NGC5533['n_ct'], NGC5533['n_kt'] = inter.splrep(NGC5533['n_r_topband'],NGC5533['n_v_topband'])
NGC5533['n_band_btm'] = inter.BSpline(NGC5533['n_tb'], NGC5533['n_cb'], NGC5533['n_kb'])
NGC5533['n_band_top'] = inter.BSpline(NGC5533['n_tt'], NGC5533['n_ct'], NGC5533['n_kt'])
# Total Curve #######################
NGC5533['total'] = {
'r' : np.asarray(NGC5533['raw_total']['xx']),
'v' : np.asarray(NGC5533['raw_total']['yy'])
}
NGC5533['total']['t'], NGC5533['total']['c'], NGC5533['total']['k'] = inter.splrep(NGC5533['total']['r'], NGC5533['total']['v'])
NGC5533['total']['spline'] = inter.BSpline(NGC5533['total']['t'], NGC5533['total']['c'], NGC5533['total']['k'])
# Black Hole ########################
NGC5533['blackhole'] = {
'r' : np.asarray(NGC5533['raw_blackhole']['xx']),
'v' : np.asarray(NGC5533['raw_blackhole']['yy'])
}
NGC5533['blackhole']['t'], NGC5533['blackhole']['c'], NGC5533['blackhole']['k'] = inter.splrep(NGC5533['blackhole']['r'], NGC5533['blackhole']['v'])
NGC5533['blackhole']['spline'] = inter.BSpline(NGC5533['blackhole']['t'], NGC5533['blackhole']['c'], NGC5533['blackhole']['k'])
# Bulge #############################
NGC5533['bulge'] = {
'r' : np.asarray(NGC5533['raw_bulge']['xx']),
'v' : np.asarray(NGC5533['raw_bulge']['yy'])
}
NGC5533['bulge']['t'], NGC5533['bulge']['c'], NGC5533['bulge']['k'] = inter.splrep(NGC5533['bulge']['r'], NGC5533['bulge']['v'])
NGC5533['bulge']['spline'] = inter.BSpline(NGC5533['bulge']['t'], NGC5533['bulge']['c'], NGC5533['bulge']['k'])
# Disk ##############################
NGC5533['disk'] = {
'r' : np.asarray(NGC5533['raw_disk']['xx']),
'v' : np.asarray(NGC5533['raw_disk']['yy'])
}
NGC5533['disk']['t'], NGC5533['disk']['c'], NGC5533['disk']['k'] = inter.splrep(NGC5533['disk']['r'], NGC5533['disk']['v'])
NGC5533['disk']['spline'] = inter.BSpline(NGC5533['disk']['t'], NGC5533['disk']['c'], NGC5533['disk']['k'])
# Halo ##############################
NGC5533['halo'] = {
'r' : np.asarray(NGC5533['raw_halo']['xx']),
'v' : np.asarray(NGC5533['raw_halo']['yy'])
}
NGC5533['halo']['t'], NGC5533['halo']['c'], NGC5533['halo']['k'] = inter.splrep(NGC5533['halo']['r'], NGC5533['halo']['v'])
NGC5533['halo']['spline'] = inter.BSpline(NGC5533['halo']['t'], NGC5533['halo']['c'], NGC5533['halo']['k'])
# Gas ###############################
NGC5533['gas'] = {
'r' : np.asarray(NGC5533['raw_gas']['xx']),
'v' : np.asarray(NGC5533['raw_gas']['yy'])
}
NGC5533['gas']['t'], NGC5533['gas']['c'], NGC5533['gas']['k'] = inter.splrep(NGC5533['gas']['r'], NGC5533['gas']['v'])
NGC5533['gas']['spline'] = inter.BSpline(NGC5533['gas']['t'], NGC5533['gas']['c'], NGC5533['gas']['k'])
###############################
########### NGC0891 ###########
###############################
NGC0891 = {
'raw_bulge' : dp.getXYdata('data/NGC0891/891_dtBulge.dat' ),
'raw_disk' : dp.getXYdata('data/NGC0891/891_dtDisk.dat' ),
'raw_gas' : dp.getXYdata('data/NGC0891/891_dtGas.dat' ),
# Get data
'measured_data' : dp.getXYdata_wXYerr('data/NGC0891/891_data')
}
# Parameters ########################
NGC0891['galaxyname'] = 'NGC 891' # NGC catalog number of the galaxy
NGC0891['rho0'] = 3.31e7 # central mass density (in solar mass/kpc^3), Source: Richards et al. (2015)
NGC0891['rc'] = 1.9 # core radius (in kpc), Source: Richards et al. (2015)
NGC0891['massbh'] = 0 # central black hole is included in the bulge curve
#Organize measured data
NGC0891['m_radii'] = np.asarray(NGC0891['measured_data']['xx'])
NGC0891['m_velocities'] = np.asarray(NGC0891['measured_data']['yy'])
NGC0891['m_r_errors'] = np.asarray(NGC0891['measured_data']['ex'])
NGC0891['m_v_errors'] = np.asarray(NGC0891['measured_data']['ey'])
# Bulge #############################
NGC0891['bulge'] = {
'r' : np.asarray(NGC0891['raw_bulge']['xx']),
'v' : np.asarray(NGC0891['raw_bulge']['yy'])
}
NGC0891['bulge']['t'], NGC0891['bulge']['c'], NGC0891['bulge']['k'] = inter.splrep(NGC0891['bulge']['r'], NGC0891['bulge']['v'])
NGC0891['bulge']['spline'] = inter.BSpline(NGC0891['bulge']['t'], NGC0891['bulge']['c'], NGC0891['bulge']['k'])
# Disk ##############################
NGC0891['disk'] = {
'r' : np.asarray(NGC0891['raw_disk']['xx']),
'v' : np.asarray(NGC0891['raw_disk']['yy'])
}
NGC0891['disk']['t'], NGC0891['disk']['c'], NGC0891['disk']['k'] = inter.splrep(NGC0891['disk']['r'], NGC0891['disk']['v'])
NGC0891['disk']['spline'] = inter.BSpline(NGC0891['disk']['t'], NGC0891['disk']['c'], NGC0891['disk']['k'])
# Gas ###############################
NGC0891['gas'] = {
'r' : np.asarray(NGC0891['raw_gas']['xx']),
'v' : np.asarray(NGC0891['raw_gas']['yy'])
}
NGC0891['gas']['t'], NGC0891['gas']['c'], NGC0891['gas']['k'] = inter.splrep(NGC0891['gas']['r'], NGC0891['gas']['v'])
NGC0891['gas']['spline'] = inter.BSpline(NGC0891['gas']['t'], NGC0891['gas']['c'], NGC0891['gas']['k'])
NGC891 = NGC0891 # Considering when someone forgets to type 0
###############################
########### NGC7814 ###########
###############################
NGC7814 = {
'raw_bulge' : dp.getXYdata('data/NGC7814/7814reallybulge.dat' ),
'raw_disk' : dp.getXYdata('data/NGC7814/7814reallydisk.dat' ),
'raw_gas' : dp.getXYdata('data/NGC7814/7814reallygas.dat' ),
# Get data
'measured_data' : dp.getXYdata_wXYerr('data/NGC7814/ngc7814data')
}
# Parameters ########################
NGC7814['galaxyname'] = 'NGC 7814' # NGC catalog number of the galaxy
NGC7814['rho0'] = 1.52e8 # central mass density (in solar mass/kpc^3), Source: Richards et al. (2015)
NGC7814['rc'] = 2.1 # core radius (in kpc), Source: Richards et al. (2015)
NGC7814['massbh'] = 0 # central black hole is included in the bulge curve
#Organize measured data
NGC7814['m_radii'] = np.asarray(NGC7814['measured_data']['xx'])
NGC7814['m_velocities'] = np.asarray(NGC7814['measured_data']['yy'])
NGC7814['m_r_errors'] = np.asarray(NGC7814['measured_data']['ex'])
NGC7814['m_v_errors'] = np.asarray(NGC7814['measured_data']['ey'])
# Bulge #############################
NGC7814['bulge'] = {
'r' : np.asarray(NGC7814['raw_bulge']['xx']),
'v' : np.asarray(NGC7814['raw_bulge']['yy'])
}
NGC7814['bulge']['t'], NGC7814['bulge']['c'], NGC7814['bulge']['k'] = inter.splrep(NGC7814['bulge']['r'], NGC7814['bulge']['v'])
NGC7814['bulge']['spline'] = inter.BSpline(NGC7814['bulge']['t'], NGC7814['bulge']['c'], NGC7814['bulge']['k'])
# Disk ##############################
NGC7814['disk'] = {
'r' : np.asarray(NGC7814['raw_disk']['xx']),
'v' : np.asarray(NGC7814['raw_disk']['yy'])
}
NGC7814['disk']['t'], NGC7814['disk']['c'], NGC7814['disk']['k'] = inter.splrep(NGC7814['disk']['r'], NGC7814['disk']['v'])
NGC7814['disk']['spline'] = inter.BSpline(NGC7814['disk']['t'], NGC7814['disk']['c'], NGC7814['disk']['k'])
# Gas ###############################
NGC7814['gas'] = {
'r' : np.asarray(NGC7814['raw_gas']['xx']),
'v' : np.asarray(NGC7814['raw_gas']['yy'])
}
NGC7814['gas']['t'], NGC7814['gas']['c'], NGC7814['gas']['k'] = inter.splrep(NGC7814['gas']['r'], NGC7814['gas']['v'])
NGC7814['gas']['spline'] = inter.BSpline(NGC7814['gas']['t'], NGC7814['gas']['c'], NGC7814['gas']['k'])
###############################
########### NGC5005 ###########
###############################
NGC5005 = {
'raw_bulge' : dp.getXYdata('data/NGC5005/ngc5005_bulge.txt' ),
'raw_disk' : dp.getXYdata('data/NGC5005/ngc5005_disk.txt' ),
'raw_halo' : dp.getXYdata('data/NGC5005/ngc5005_halo.txt' ),
'raw_gas' : dp.getXYdata('data/NGC5005/ngc5005_gas.txt' ),
# Get data
'measured_data' : dp.getXYdata_wXYerr('data/NGC5005/ngc5005_data.txt')
}
#Organize measured data
NGC5005['m_radii'] = np.asarray(NGC5005['measured_data']['xx'])
NGC5005['m_velocities'] = np.asarray(NGC5005['measured_data']['yy'])
NGC5005['m_r_errors'] = np.asarray(NGC5005['measured_data']['ex'])
NGC5005['m_v_errors'] = np.asarray(NGC5005['measured_data']['ey'])
# Bulge #############################
NGC5005['bulge'] = {
'r' : np.asarray(NGC5005['raw_bulge']['xx']),
'v' : np.asarray(NGC5005['raw_bulge']['yy'])
}
NGC5005['bulge']['t'], NGC5005['bulge']['c'], NGC5005['bulge']['k'] = inter.splrep(NGC5005['bulge']['r'], NGC5005['bulge']['v'])
NGC5005['bulge']['spline'] = inter.BSpline(NGC5005['bulge']['t'], NGC5005['bulge']['c'], NGC5005['bulge']['k'])
# Disk ##############################
NGC5005['disk'] = {
'r' : np.asarray(NGC5005['raw_disk']['xx']),
'v' : np.asarray(NGC5005['raw_disk']['yy'])
}
NGC5005['disk']['t'], NGC5005['disk']['c'], NGC5005['disk']['k'] = inter.splrep(NGC5005['disk']['r'], NGC5005['disk']['v'])
NGC5005['disk']['spline'] = inter.BSpline(NGC5005['disk']['t'], NGC5005['disk']['c'], NGC5005['disk']['k'])
# Halo ##############################
NGC5005['halo'] = {
'r' : np.asarray(NGC5005['raw_halo']['xx']),
'v' : np.asarray(NGC5005['raw_halo']['yy'])
}
NGC5005['halo']['t'], NGC5005['halo']['c'], NGC5005['halo']['k'] = inter.splrep(NGC5005['halo']['r'], NGC5005['halo']['v'])
NGC5005['halo']['spline'] = inter.BSpline(NGC5005['halo']['t'], NGC5005['halo']['c'], NGC5005['halo']['k'])
# Gas ###############################
NGC5005['gas'] = {
'r' : np.asarray(NGC5005['raw_gas']['xx']),
'v' : np.asarray(NGC5005['raw_gas']['yy'])
}
NGC5005['gas']['t'], NGC5005['gas']['c'], NGC5005['gas']['k'] = inter.splrep(NGC5005['gas']['r'], NGC5005['gas']['v'])
NGC5005['gas']['spline'] = inter.BSpline(NGC5005['gas']['t'], NGC5005['gas']['c'], NGC5005['gas']['k'])
# Parameters ########################
NGC5005['galaxyname'] = 'NGC 5005' # NGC catalog number of the galaxy
NGC5005['rho0'] = 1e8 # central mass density (in solar mass/kpc^3), guess!
NGC5005['rc'] = 2.5 # core radius (in kpc), Source: Richards et al. (2015)
NGC5005['massbh'] = 0 # central black hole is included in the bulge curve
###############################
####### Other Galaxies ########
###############################
# NGC 3198
NGC3198 = {'measured_data' : dp.getXYdata_wYerr('data/othergalaxies/NGC3198.txt')}
NGC3198['m_radii'] = np.asarray(NGC3198['measured_data']['xx'])
NGC3198['m_velocities'] = np.asarray(NGC3198['measured_data']['yy'])
NGC3198['m_v_errors'] = np.asarray(NGC3198['measured_data']['ey'])
NGC3198['galaxyname'] = 'NGC 3198'
# UGC 89
UGC89 = {'measured_data' : dp.getXYdata_wYerr('data/othergalaxies/UGC89.txt')}
UGC89['m_radii'] = np.asarray(UGC89['measured_data']['xx'])
UGC89['m_velocities'] = np.asarray(UGC89['measured_data']['yy'])
UGC89['m_v_errors'] = np.asarray(UGC89['measured_data']['ey'])
UGC89['galaxyname'] = 'UGC 89'
# UGC 477
UGC477 = {'measured_data' : dp.getXYdata_wYerr('data/othergalaxies/UGC477.txt')}
UGC477['m_radii'] = np.asarray(UGC477['measured_data']['xx'])
UGC477['m_velocities'] = np.asarray(UGC477['measured_data']['yy'])
UGC477['m_v_errors'] = np.asarray(UGC477['measured_data']['ey'])
UGC477['galaxyname'] = 'UGC 477'
# UGC 1281
UGC1281 = {'measured_data' : dp.getXYdata_wYerr('data/othergalaxies/UGC1281.txt')}
UGC1281['m_radii'] = np.asarray(UGC1281['measured_data']['xx'])
UGC1281['m_velocities'] = np.asarray(UGC1281['measured_data']['yy'])
UGC1281['m_v_errors'] = np.asarray(UGC1281['measured_data']['ey'])
UGC1281['galaxyname'] = 'UGC 1281'
# UGC 1437
UGC1437 = {'measured_data' : dp.getXYdata_wYerr('data/othergalaxies/UGC1437.txt')}
UGC1437['m_radii'] = np.asarray(UGC1437['measured_data']['xx'])
UGC1437['m_velocities'] = np.asarray(UGC1437['measured_data']['yy'])
UGC1437['m_v_errors'] = np.asarray(UGC1437['measured_data']['ey'])
UGC1437['galaxyname'] = 'UGC 1437'
# UGC 2953
UGC2953 = {'measured_data' : dp.getXYdata_wYerr('data/othergalaxies/UGC2953.txt')}
UGC2953['m_radii'] = np.asarray(UGC2953['measured_data']['xx'])
UGC2953['m_velocities'] = np.asarray(UGC2953['measured_data']['yy'])
UGC2953['m_v_errors'] = np.asarray(UGC2953['measured_data']['ey'])
UGC2953['galaxyname'] = 'UGC 2953'
# UGC 4325
UGC4325 = {'measured_data' : dp.getXYdata_wYerr('data/othergalaxies/UGC4325.txt')}
UGC4325['m_radii'] = np.asarray(UGC4325['measured_data']['xx'])
UGC4325['m_velocities'] = np.asarray(UGC4325['measured_data']['yy'])
UGC4325['m_v_errors'] = np.asarray(UGC4325['measured_data']['ey'])
UGC4325['galaxyname'] = 'UGC 4325'
# UGC 5253
UGC5253 = {'measured_data' : dp.getXYdata_wYerr('data/othergalaxies/UGC5253.txt')}
UGC5253['m_radii'] = np.asarray(UGC5253['measured_data']['xx'])
UGC5253['m_velocities'] = np.asarray(UGC5253['measured_data']['yy'])
UGC5253['m_v_errors'] = np.asarray(UGC5253['measured_data']['ey'])
UGC5253['galaxyname'] = 'UGC 5253'
# UGC 6787
UGC6787 = {'measured_data' : dp.getXYdata_wYerr('data/othergalaxies/UGC6787.txt')}
UGC6787['m_radii'] = np.asarray(UGC6787['measured_data']['xx'])
UGC6787['m_velocities'] = np.asarray(UGC6787['measured_data']['yy'])
UGC6787['m_v_errors'] = np.asarray(UGC6787['measured_data']['ey'])
UGC6787['galaxyname'] = 'UGC 6787'
# UGC 10075
UGC10075 = {'measured_data' : dp.getXYdata_wYerr('data/othergalaxies/UGC10075.txt')}
UGC10075['m_radii'] = np.asarray(UGC10075['measured_data']['xx'])
UGC10075['m_velocities'] = np.asarray(UGC10075['measured_data']['yy'])
UGC10075['m_v_errors'] = np.asarray(UGC10075['measured_data']['ey'])
UGC10075['galaxyname'] = 'UGC 10075' | [
"sys.path.append",
"scipy.interpolate.BSpline",
"numpy.asarray",
"dataPython.getXYdata",
"dataPython.getXYdata_wYerr",
"dataPython.getXYdata_wXYerr",
"scipy.interpolate.splrep"
] | [((163, 192), 'sys.path.append', 'sys.path.append', (['"""../python/"""'], {}), "('../python/')\n", (178, 192), False, 'import sys\n'), ((1857, 1899), 'numpy.asarray', 'np.asarray', (["NGC5533['measured_data']['xx']"], {}), "(NGC5533['measured_data']['xx'])\n", (1867, 1899), True, 'import numpy as np\n'), ((1926, 1968), 'numpy.asarray', 'np.asarray', (["NGC5533['measured_data']['yy']"], {}), "(NGC5533['measured_data']['yy'])\n", (1936, 1968), True, 'import numpy as np\n'), ((1995, 2037), 'numpy.asarray', 'np.asarray', (["NGC5533['measured_data']['ex']"], {}), "(NGC5533['measured_data']['ex'])\n", (2005, 2037), True, 'import numpy as np\n'), ((2064, 2106), 'numpy.asarray', 'np.asarray', (["NGC5533['measured_data']['ey']"], {}), "(NGC5533['measured_data']['ey'])\n", (2074, 2106), True, 'import numpy as np\n'), ((2155, 2196), 'numpy.asarray', 'np.asarray', (["NGC5533['raw_band_btm']['xx']"], {}), "(NGC5533['raw_band_btm']['xx'])\n", (2165, 2196), True, 'import numpy as np\n'), ((2224, 2265), 'numpy.asarray', 'np.asarray', (["NGC5533['raw_band_btm']['yy']"], {}), "(NGC5533['raw_band_btm']['yy'])\n", (2234, 2265), True, 'import numpy as np\n'), ((2293, 2334), 'numpy.asarray', 'np.asarray', (["NGC5533['raw_band_top']['xx']"], {}), "(NGC5533['raw_band_top']['xx'])\n", (2303, 2334), True, 'import numpy as np\n'), ((2362, 2403), 'numpy.asarray', 'np.asarray', (["NGC5533['raw_band_top']['yy']"], {}), "(NGC5533['raw_band_top']['yy'])\n", (2372, 2403), True, 'import numpy as np\n'), ((2709, 2769), 'scipy.interpolate.splrep', 'inter.splrep', (["NGC5533['n_r_btmband']", "NGC5533['n_v_btmband']"], {}), "(NGC5533['n_r_btmband'], NGC5533['n_v_btmband'])\n", (2721, 2769), True, 'import scipy.interpolate as inter\n'), ((2821, 2881), 'scipy.interpolate.splrep', 'inter.splrep', (["NGC5533['n_r_topband']", "NGC5533['n_v_topband']"], {}), "(NGC5533['n_r_topband'], NGC5533['n_v_topband'])\n", (2833, 2881), True, 'import scipy.interpolate as inter\n'), ((2905, 2969), 'scipy.interpolate.BSpline', 'inter.BSpline', (["NGC5533['n_tb']", "NGC5533['n_cb']", "NGC5533['n_kb']"], {}), "(NGC5533['n_tb'], NGC5533['n_cb'], NGC5533['n_kb'])\n", (2918, 2969), True, 'import scipy.interpolate as inter\n'), ((2994, 3058), 'scipy.interpolate.BSpline', 'inter.BSpline', (["NGC5533['n_tt']", "NGC5533['n_ct']", "NGC5533['n_kt']"], {}), "(NGC5533['n_tt'], NGC5533['n_ct'], NGC5533['n_kt'])\n", (3007, 3058), True, 'import scipy.interpolate as inter\n'), ((3290, 3348), 'scipy.interpolate.splrep', 'inter.splrep', (["NGC5533['total']['r']", "NGC5533['total']['v']"], {}), "(NGC5533['total']['r'], NGC5533['total']['v'])\n", (3302, 3348), True, 'import scipy.interpolate as inter\n'), ((3378, 3465), 'scipy.interpolate.BSpline', 'inter.BSpline', (["NGC5533['total']['t']", "NGC5533['total']['c']", "NGC5533['total']['k']"], {}), "(NGC5533['total']['t'], NGC5533['total']['c'], NGC5533['total'\n ]['k'])\n", (3391, 3465), True, 'import scipy.interpolate as inter\n'), ((3716, 3782), 'scipy.interpolate.splrep', 'inter.splrep', (["NGC5533['blackhole']['r']", "NGC5533['blackhole']['v']"], {}), "(NGC5533['blackhole']['r'], NGC5533['blackhole']['v'])\n", (3728, 3782), True, 'import scipy.interpolate as inter\n'), ((3816, 3915), 'scipy.interpolate.BSpline', 'inter.BSpline', (["NGC5533['blackhole']['t']", "NGC5533['blackhole']['c']", "NGC5533['blackhole']['k']"], {}), "(NGC5533['blackhole']['t'], NGC5533['blackhole']['c'], NGC5533\n ['blackhole']['k'])\n", (3829, 3915), True, 'import scipy.interpolate as inter\n'), ((4142, 4200), 'scipy.interpolate.splrep', 'inter.splrep', (["NGC5533['bulge']['r']", "NGC5533['bulge']['v']"], {}), "(NGC5533['bulge']['r'], NGC5533['bulge']['v'])\n", (4154, 4200), True, 'import scipy.interpolate as inter\n'), ((4230, 4317), 'scipy.interpolate.BSpline', 'inter.BSpline', (["NGC5533['bulge']['t']", "NGC5533['bulge']['c']", "NGC5533['bulge']['k']"], {}), "(NGC5533['bulge']['t'], NGC5533['bulge']['c'], NGC5533['bulge'\n ]['k'])\n", (4243, 4317), True, 'import scipy.interpolate as inter\n'), ((4538, 4594), 'scipy.interpolate.splrep', 'inter.splrep', (["NGC5533['disk']['r']", "NGC5533['disk']['v']"], {}), "(NGC5533['disk']['r'], NGC5533['disk']['v'])\n", (4550, 4594), True, 'import scipy.interpolate as inter\n'), ((4623, 4702), 'scipy.interpolate.BSpline', 'inter.BSpline', (["NGC5533['disk']['t']", "NGC5533['disk']['c']", "NGC5533['disk']['k']"], {}), "(NGC5533['disk']['t'], NGC5533['disk']['c'], NGC5533['disk']['k'])\n", (4636, 4702), True, 'import scipy.interpolate as inter\n'), ((4928, 4984), 'scipy.interpolate.splrep', 'inter.splrep', (["NGC5533['halo']['r']", "NGC5533['halo']['v']"], {}), "(NGC5533['halo']['r'], NGC5533['halo']['v'])\n", (4940, 4984), True, 'import scipy.interpolate as inter\n'), ((5013, 5092), 'scipy.interpolate.BSpline', 'inter.BSpline', (["NGC5533['halo']['t']", "NGC5533['halo']['c']", "NGC5533['halo']['k']"], {}), "(NGC5533['halo']['t'], NGC5533['halo']['c'], NGC5533['halo']['k'])\n", (5026, 5092), True, 'import scipy.interpolate as inter\n'), ((5312, 5366), 'scipy.interpolate.splrep', 'inter.splrep', (["NGC5533['gas']['r']", "NGC5533['gas']['v']"], {}), "(NGC5533['gas']['r'], NGC5533['gas']['v'])\n", (5324, 5366), True, 'import scipy.interpolate as inter\n'), ((5394, 5470), 'scipy.interpolate.BSpline', 'inter.BSpline', (["NGC5533['gas']['t']", "NGC5533['gas']['c']", "NGC5533['gas']['k']"], {}), "(NGC5533['gas']['t'], NGC5533['gas']['c'], NGC5533['gas']['k'])\n", (5407, 5470), True, 'import scipy.interpolate as inter\n'), ((6346, 6388), 'numpy.asarray', 'np.asarray', (["NGC0891['measured_data']['xx']"], {}), "(NGC0891['measured_data']['xx'])\n", (6356, 6388), True, 'import numpy as np\n'), ((6415, 6457), 'numpy.asarray', 'np.asarray', (["NGC0891['measured_data']['yy']"], {}), "(NGC0891['measured_data']['yy'])\n", (6425, 6457), True, 'import numpy as np\n'), ((6484, 6526), 'numpy.asarray', 'np.asarray', (["NGC0891['measured_data']['ex']"], {}), "(NGC0891['measured_data']['ex'])\n", (6494, 6526), True, 'import numpy as np\n'), ((6553, 6595), 'numpy.asarray', 'np.asarray', (["NGC0891['measured_data']['ey']"], {}), "(NGC0891['measured_data']['ey'])\n", (6563, 6595), True, 'import numpy as np\n'), ((6827, 6885), 'scipy.interpolate.splrep', 'inter.splrep', (["NGC0891['bulge']['r']", "NGC0891['bulge']['v']"], {}), "(NGC0891['bulge']['r'], NGC0891['bulge']['v'])\n", (6839, 6885), True, 'import scipy.interpolate as inter\n'), ((6915, 7002), 'scipy.interpolate.BSpline', 'inter.BSpline', (["NGC0891['bulge']['t']", "NGC0891['bulge']['c']", "NGC0891['bulge']['k']"], {}), "(NGC0891['bulge']['t'], NGC0891['bulge']['c'], NGC0891['bulge'\n ]['k'])\n", (6928, 7002), True, 'import scipy.interpolate as inter\n'), ((7223, 7279), 'scipy.interpolate.splrep', 'inter.splrep', (["NGC0891['disk']['r']", "NGC0891['disk']['v']"], {}), "(NGC0891['disk']['r'], NGC0891['disk']['v'])\n", (7235, 7279), True, 'import scipy.interpolate as inter\n'), ((7308, 7387), 'scipy.interpolate.BSpline', 'inter.BSpline', (["NGC0891['disk']['t']", "NGC0891['disk']['c']", "NGC0891['disk']['k']"], {}), "(NGC0891['disk']['t'], NGC0891['disk']['c'], NGC0891['disk']['k'])\n", (7321, 7387), True, 'import scipy.interpolate as inter\n'), ((7607, 7661), 'scipy.interpolate.splrep', 'inter.splrep', (["NGC0891['gas']['r']", "NGC0891['gas']['v']"], {}), "(NGC0891['gas']['r'], NGC0891['gas']['v'])\n", (7619, 7661), True, 'import scipy.interpolate as inter\n'), ((7689, 7765), 'scipy.interpolate.BSpline', 'inter.BSpline', (["NGC0891['gas']['t']", "NGC0891['gas']['c']", "NGC0891['gas']['k']"], {}), "(NGC0891['gas']['t'], NGC0891['gas']['c'], NGC0891['gas']['k'])\n", (7702, 7765), True, 'import scipy.interpolate as inter\n'), ((8717, 8759), 'numpy.asarray', 'np.asarray', (["NGC7814['measured_data']['xx']"], {}), "(NGC7814['measured_data']['xx'])\n", (8727, 8759), True, 'import numpy as np\n'), ((8786, 8828), 'numpy.asarray', 'np.asarray', (["NGC7814['measured_data']['yy']"], {}), "(NGC7814['measured_data']['yy'])\n", (8796, 8828), True, 'import numpy as np\n'), ((8855, 8897), 'numpy.asarray', 'np.asarray', (["NGC7814['measured_data']['ex']"], {}), "(NGC7814['measured_data']['ex'])\n", (8865, 8897), True, 'import numpy as np\n'), ((8924, 8966), 'numpy.asarray', 'np.asarray', (["NGC7814['measured_data']['ey']"], {}), "(NGC7814['measured_data']['ey'])\n", (8934, 8966), True, 'import numpy as np\n'), ((9198, 9256), 'scipy.interpolate.splrep', 'inter.splrep', (["NGC7814['bulge']['r']", "NGC7814['bulge']['v']"], {}), "(NGC7814['bulge']['r'], NGC7814['bulge']['v'])\n", (9210, 9256), True, 'import scipy.interpolate as inter\n'), ((9286, 9373), 'scipy.interpolate.BSpline', 'inter.BSpline', (["NGC7814['bulge']['t']", "NGC7814['bulge']['c']", "NGC7814['bulge']['k']"], {}), "(NGC7814['bulge']['t'], NGC7814['bulge']['c'], NGC7814['bulge'\n ]['k'])\n", (9299, 9373), True, 'import scipy.interpolate as inter\n'), ((9594, 9650), 'scipy.interpolate.splrep', 'inter.splrep', (["NGC7814['disk']['r']", "NGC7814['disk']['v']"], {}), "(NGC7814['disk']['r'], NGC7814['disk']['v'])\n", (9606, 9650), True, 'import scipy.interpolate as inter\n'), ((9679, 9758), 'scipy.interpolate.BSpline', 'inter.BSpline', (["NGC7814['disk']['t']", "NGC7814['disk']['c']", "NGC7814['disk']['k']"], {}), "(NGC7814['disk']['t'], NGC7814['disk']['c'], NGC7814['disk']['k'])\n", (9692, 9758), True, 'import scipy.interpolate as inter\n'), ((9978, 10032), 'scipy.interpolate.splrep', 'inter.splrep', (["NGC7814['gas']['r']", "NGC7814['gas']['v']"], {}), "(NGC7814['gas']['r'], NGC7814['gas']['v'])\n", (9990, 10032), True, 'import scipy.interpolate as inter\n'), ((10060, 10136), 'scipy.interpolate.BSpline', 'inter.BSpline', (["NGC7814['gas']['t']", "NGC7814['gas']['c']", "NGC7814['gas']['k']"], {}), "(NGC7814['gas']['t'], NGC7814['gas']['c'], NGC7814['gas']['k'])\n", (10073, 10136), True, 'import scipy.interpolate as inter\n'), ((10712, 10754), 'numpy.asarray', 'np.asarray', (["NGC5005['measured_data']['xx']"], {}), "(NGC5005['measured_data']['xx'])\n", (10722, 10754), True, 'import numpy as np\n'), ((10781, 10823), 'numpy.asarray', 'np.asarray', (["NGC5005['measured_data']['yy']"], {}), "(NGC5005['measured_data']['yy'])\n", (10791, 10823), True, 'import numpy as np\n'), ((10850, 10892), 'numpy.asarray', 'np.asarray', (["NGC5005['measured_data']['ex']"], {}), "(NGC5005['measured_data']['ex'])\n", (10860, 10892), True, 'import numpy as np\n'), ((10919, 10961), 'numpy.asarray', 'np.asarray', (["NGC5005['measured_data']['ey']"], {}), "(NGC5005['measured_data']['ey'])\n", (10929, 10961), True, 'import numpy as np\n'), ((11193, 11251), 'scipy.interpolate.splrep', 'inter.splrep', (["NGC5005['bulge']['r']", "NGC5005['bulge']['v']"], {}), "(NGC5005['bulge']['r'], NGC5005['bulge']['v'])\n", (11205, 11251), True, 'import scipy.interpolate as inter\n'), ((11281, 11368), 'scipy.interpolate.BSpline', 'inter.BSpline', (["NGC5005['bulge']['t']", "NGC5005['bulge']['c']", "NGC5005['bulge']['k']"], {}), "(NGC5005['bulge']['t'], NGC5005['bulge']['c'], NGC5005['bulge'\n ]['k'])\n", (11294, 11368), True, 'import scipy.interpolate as inter\n'), ((11589, 11645), 'scipy.interpolate.splrep', 'inter.splrep', (["NGC5005['disk']['r']", "NGC5005['disk']['v']"], {}), "(NGC5005['disk']['r'], NGC5005['disk']['v'])\n", (11601, 11645), True, 'import scipy.interpolate as inter\n'), ((11674, 11753), 'scipy.interpolate.BSpline', 'inter.BSpline', (["NGC5005['disk']['t']", "NGC5005['disk']['c']", "NGC5005['disk']['k']"], {}), "(NGC5005['disk']['t'], NGC5005['disk']['c'], NGC5005['disk']['k'])\n", (11687, 11753), True, 'import scipy.interpolate as inter\n'), ((11979, 12035), 'scipy.interpolate.splrep', 'inter.splrep', (["NGC5005['halo']['r']", "NGC5005['halo']['v']"], {}), "(NGC5005['halo']['r'], NGC5005['halo']['v'])\n", (11991, 12035), True, 'import scipy.interpolate as inter\n'), ((12064, 12143), 'scipy.interpolate.BSpline', 'inter.BSpline', (["NGC5005['halo']['t']", "NGC5005['halo']['c']", "NGC5005['halo']['k']"], {}), "(NGC5005['halo']['t'], NGC5005['halo']['c'], NGC5005['halo']['k'])\n", (12077, 12143), True, 'import scipy.interpolate as inter\n'), ((12363, 12417), 'scipy.interpolate.splrep', 'inter.splrep', (["NGC5005['gas']['r']", "NGC5005['gas']['v']"], {}), "(NGC5005['gas']['r'], NGC5005['gas']['v'])\n", (12375, 12417), True, 'import scipy.interpolate as inter\n'), ((12445, 12521), 'scipy.interpolate.BSpline', 'inter.BSpline', (["NGC5005['gas']['t']", "NGC5005['gas']['c']", "NGC5005['gas']['k']"], {}), "(NGC5005['gas']['t'], NGC5005['gas']['c'], NGC5005['gas']['k'])\n", (12458, 12521), True, 'import scipy.interpolate as inter\n'), ((13100, 13142), 'numpy.asarray', 'np.asarray', (["NGC3198['measured_data']['xx']"], {}), "(NGC3198['measured_data']['xx'])\n", (13110, 13142), True, 'import numpy as np\n'), ((13169, 13211), 'numpy.asarray', 'np.asarray', (["NGC3198['measured_data']['yy']"], {}), "(NGC3198['measured_data']['yy'])\n", (13179, 13211), True, 'import numpy as np\n'), ((13238, 13280), 'numpy.asarray', 'np.asarray', (["NGC3198['measured_data']['ey']"], {}), "(NGC3198['measured_data']['ey'])\n", (13248, 13280), True, 'import numpy as np\n'), ((13430, 13470), 'numpy.asarray', 'np.asarray', (["UGC89['measured_data']['xx']"], {}), "(UGC89['measured_data']['xx'])\n", (13440, 13470), True, 'import numpy as np\n'), ((13495, 13535), 'numpy.asarray', 'np.asarray', (["UGC89['measured_data']['yy']"], {}), "(UGC89['measured_data']['yy'])\n", (13505, 13535), True, 'import numpy as np\n'), ((13560, 13600), 'numpy.asarray', 'np.asarray', (["UGC89['measured_data']['ey']"], {}), "(UGC89['measured_data']['ey'])\n", (13570, 13600), True, 'import numpy as np\n'), ((13750, 13791), 'numpy.asarray', 'np.asarray', (["UGC477['measured_data']['xx']"], {}), "(UGC477['measured_data']['xx'])\n", (13760, 13791), True, 'import numpy as np\n'), ((13817, 13858), 'numpy.asarray', 'np.asarray', (["UGC477['measured_data']['yy']"], {}), "(UGC477['measured_data']['yy'])\n", (13827, 13858), True, 'import numpy as np\n'), ((13884, 13925), 'numpy.asarray', 'np.asarray', (["UGC477['measured_data']['ey']"], {}), "(UGC477['measured_data']['ey'])\n", (13894, 13925), True, 'import numpy as np\n'), ((14080, 14122), 'numpy.asarray', 'np.asarray', (["UGC1281['measured_data']['xx']"], {}), "(UGC1281['measured_data']['xx'])\n", (14090, 14122), True, 'import numpy as np\n'), ((14149, 14191), 'numpy.asarray', 'np.asarray', (["UGC1281['measured_data']['yy']"], {}), "(UGC1281['measured_data']['yy'])\n", (14159, 14191), True, 'import numpy as np\n'), ((14218, 14260), 'numpy.asarray', 'np.asarray', (["UGC1281['measured_data']['ey']"], {}), "(UGC1281['measured_data']['ey'])\n", (14228, 14260), True, 'import numpy as np\n'), ((14418, 14460), 'numpy.asarray', 'np.asarray', (["UGC1437['measured_data']['xx']"], {}), "(UGC1437['measured_data']['xx'])\n", (14428, 14460), True, 'import numpy as np\n'), ((14487, 14529), 'numpy.asarray', 'np.asarray', (["UGC1437['measured_data']['yy']"], {}), "(UGC1437['measured_data']['yy'])\n", (14497, 14529), True, 'import numpy as np\n'), ((14556, 14598), 'numpy.asarray', 'np.asarray', (["UGC1437['measured_data']['ey']"], {}), "(UGC1437['measured_data']['ey'])\n", (14566, 14598), True, 'import numpy as np\n'), ((14756, 14798), 'numpy.asarray', 'np.asarray', (["UGC2953['measured_data']['xx']"], {}), "(UGC2953['measured_data']['xx'])\n", (14766, 14798), True, 'import numpy as np\n'), ((14825, 14867), 'numpy.asarray', 'np.asarray', (["UGC2953['measured_data']['yy']"], {}), "(UGC2953['measured_data']['yy'])\n", (14835, 14867), True, 'import numpy as np\n'), ((14894, 14936), 'numpy.asarray', 'np.asarray', (["UGC2953['measured_data']['ey']"], {}), "(UGC2953['measured_data']['ey'])\n", (14904, 14936), True, 'import numpy as np\n'), ((15093, 15135), 'numpy.asarray', 'np.asarray', (["UGC4325['measured_data']['xx']"], {}), "(UGC4325['measured_data']['xx'])\n", (15103, 15135), True, 'import numpy as np\n'), ((15162, 15204), 'numpy.asarray', 'np.asarray', (["UGC4325['measured_data']['yy']"], {}), "(UGC4325['measured_data']['yy'])\n", (15172, 15204), True, 'import numpy as np\n'), ((15231, 15273), 'numpy.asarray', 'np.asarray', (["UGC4325['measured_data']['ey']"], {}), "(UGC4325['measured_data']['ey'])\n", (15241, 15273), True, 'import numpy as np\n'), ((15430, 15472), 'numpy.asarray', 'np.asarray', (["UGC5253['measured_data']['xx']"], {}), "(UGC5253['measured_data']['xx'])\n", (15440, 15472), True, 'import numpy as np\n'), ((15499, 15541), 'numpy.asarray', 'np.asarray', (["UGC5253['measured_data']['yy']"], {}), "(UGC5253['measured_data']['yy'])\n", (15509, 15541), True, 'import numpy as np\n'), ((15568, 15610), 'numpy.asarray', 'np.asarray', (["UGC5253['measured_data']['ey']"], {}), "(UGC5253['measured_data']['ey'])\n", (15578, 15610), True, 'import numpy as np\n'), ((15770, 15812), 'numpy.asarray', 'np.asarray', (["UGC6787['measured_data']['xx']"], {}), "(UGC6787['measured_data']['xx'])\n", (15780, 15812), True, 'import numpy as np\n'), ((15839, 15881), 'numpy.asarray', 'np.asarray', (["UGC6787['measured_data']['yy']"], {}), "(UGC6787['measured_data']['yy'])\n", (15849, 15881), True, 'import numpy as np\n'), ((15908, 15950), 'numpy.asarray', 'np.asarray', (["UGC6787['measured_data']['ey']"], {}), "(UGC6787['measured_data']['ey'])\n", (15918, 15950), True, 'import numpy as np\n'), ((16114, 16157), 'numpy.asarray', 'np.asarray', (["UGC10075['measured_data']['xx']"], {}), "(UGC10075['measured_data']['xx'])\n", (16124, 16157), True, 'import numpy as np\n'), ((16185, 16228), 'numpy.asarray', 'np.asarray', (["UGC10075['measured_data']['yy']"], {}), "(UGC10075['measured_data']['yy'])\n", (16195, 16228), True, 'import numpy as np\n'), ((16256, 16299), 'numpy.asarray', 'np.asarray', (["UGC10075['measured_data']['ey']"], {}), "(UGC10075['measured_data']['ey'])\n", (16266, 16299), True, 'import numpy as np\n'), ((640, 691), 'dataPython.getXYdata', 'dp.getXYdata', (['"""data/NGC5533/noord-120kpc-total.txt"""'], {}), "('data/NGC5533/noord-120kpc-total.txt')\n", (652, 691), True, 'import dataPython as dp\n'), ((723, 778), 'dataPython.getXYdata', 'dp.getXYdata', (['"""data/NGC5533/noord-120kpc-blackhole.txt"""'], {}), "('data/NGC5533/noord-120kpc-blackhole.txt')\n", (735, 778), True, 'import dataPython as dp\n'), ((806, 857), 'dataPython.getXYdata', 'dp.getXYdata', (['"""data/NGC5533/noord-120kpc-bulge.txt"""'], {}), "('data/NGC5533/noord-120kpc-bulge.txt')\n", (818, 857), True, 'import dataPython as dp\n'), ((889, 939), 'dataPython.getXYdata', 'dp.getXYdata', (['"""data/NGC5533/noord-120kpc-disk.txt"""'], {}), "('data/NGC5533/noord-120kpc-disk.txt')\n", (901, 939), True, 'import dataPython as dp\n'), ((972, 1022), 'dataPython.getXYdata', 'dp.getXYdata', (['"""data/NGC5533/noord-120kpc-halo.txt"""'], {}), "('data/NGC5533/noord-120kpc-halo.txt')\n", (984, 1022), True, 'import dataPython as dp\n'), ((1055, 1104), 'dataPython.getXYdata', 'dp.getXYdata', (['"""data/NGC5533/noord-120kpc-gas.txt"""'], {}), "('data/NGC5533/noord-120kpc-gas.txt')\n", (1067, 1104), True, 'import dataPython as dp\n'), ((1138, 1194), 'dataPython.getXYdata', 'dp.getXYdata', (['"""data/NGC5533/noord-120kpc-bottomband.txt"""'], {}), "('data/NGC5533/noord-120kpc-bottomband.txt')\n", (1150, 1194), True, 'import dataPython as dp\n'), ((1221, 1274), 'dataPython.getXYdata', 'dp.getXYdata', (['"""data/NGC5533/noord-120kpc-topband.txt"""'], {}), "('data/NGC5533/noord-120kpc-topband.txt')\n", (1233, 1274), True, 'import dataPython as dp\n'), ((1337, 1388), 'dataPython.getXYdata_wXYerr', 'dp.getXYdata_wXYerr', (['"""data/NGC5533/100kpc_data.txt"""'], {}), "('data/NGC5533/100kpc_data.txt')\n", (1356, 1388), True, 'import dataPython as dp\n'), ((3129, 3167), 'numpy.asarray', 'np.asarray', (["NGC5533['raw_total']['xx']"], {}), "(NGC5533['raw_total']['xx'])\n", (3139, 3167), True, 'import numpy as np\n'), ((3179, 3217), 'numpy.asarray', 'np.asarray', (["NGC5533['raw_total']['yy']"], {}), "(NGC5533['raw_total']['yy'])\n", (3189, 3217), True, 'import numpy as np\n'), ((3535, 3577), 'numpy.asarray', 'np.asarray', (["NGC5533['raw_blackhole']['xx']"], {}), "(NGC5533['raw_blackhole']['xx'])\n", (3545, 3577), True, 'import numpy as np\n'), ((3589, 3631), 'numpy.asarray', 'np.asarray', (["NGC5533['raw_blackhole']['yy']"], {}), "(NGC5533['raw_blackhole']['yy'])\n", (3599, 3631), True, 'import numpy as np\n'), ((3981, 4019), 'numpy.asarray', 'np.asarray', (["NGC5533['raw_bulge']['xx']"], {}), "(NGC5533['raw_bulge']['xx'])\n", (3991, 4019), True, 'import numpy as np\n'), ((4031, 4069), 'numpy.asarray', 'np.asarray', (["NGC5533['raw_bulge']['yy']"], {}), "(NGC5533['raw_bulge']['yy'])\n", (4041, 4069), True, 'import numpy as np\n'), ((4382, 4419), 'numpy.asarray', 'np.asarray', (["NGC5533['raw_disk']['xx']"], {}), "(NGC5533['raw_disk']['xx'])\n", (4392, 4419), True, 'import numpy as np\n'), ((4431, 4468), 'numpy.asarray', 'np.asarray', (["NGC5533['raw_disk']['yy']"], {}), "(NGC5533['raw_disk']['yy'])\n", (4441, 4468), True, 'import numpy as np\n'), ((4772, 4809), 'numpy.asarray', 'np.asarray', (["NGC5533['raw_halo']['xx']"], {}), "(NGC5533['raw_halo']['xx'])\n", (4782, 4809), True, 'import numpy as np\n'), ((4821, 4858), 'numpy.asarray', 'np.asarray', (["NGC5533['raw_halo']['yy']"], {}), "(NGC5533['raw_halo']['yy'])\n", (4831, 4858), True, 'import numpy as np\n'), ((5161, 5197), 'numpy.asarray', 'np.asarray', (["NGC5533['raw_gas']['xx']"], {}), "(NGC5533['raw_gas']['xx'])\n", (5171, 5197), True, 'import numpy as np\n'), ((5209, 5245), 'numpy.asarray', 'np.asarray', (["NGC5533['raw_gas']['yy']"], {}), "(NGC5533['raw_gas']['yy'])\n", (5219, 5245), True, 'import numpy as np\n'), ((5612, 5656), 'dataPython.getXYdata', 'dp.getXYdata', (['"""data/NGC0891/891_dtBulge.dat"""'], {}), "('data/NGC0891/891_dtBulge.dat')\n", (5624, 5656), True, 'import dataPython as dp\n'), ((5688, 5731), 'dataPython.getXYdata', 'dp.getXYdata', (['"""data/NGC0891/891_dtDisk.dat"""'], {}), "('data/NGC0891/891_dtDisk.dat')\n", (5700, 5731), True, 'import dataPython as dp\n'), ((5764, 5806), 'dataPython.getXYdata', 'dp.getXYdata', (['"""data/NGC0891/891_dtGas.dat"""'], {}), "('data/NGC0891/891_dtGas.dat')\n", (5776, 5806), True, 'import dataPython as dp\n'), ((5856, 5900), 'dataPython.getXYdata_wXYerr', 'dp.getXYdata_wXYerr', (['"""data/NGC0891/891_data"""'], {}), "('data/NGC0891/891_data')\n", (5875, 5900), True, 'import dataPython as dp\n'), ((6666, 6704), 'numpy.asarray', 'np.asarray', (["NGC0891['raw_bulge']['xx']"], {}), "(NGC0891['raw_bulge']['xx'])\n", (6676, 6704), True, 'import numpy as np\n'), ((6716, 6754), 'numpy.asarray', 'np.asarray', (["NGC0891['raw_bulge']['yy']"], {}), "(NGC0891['raw_bulge']['yy'])\n", (6726, 6754), True, 'import numpy as np\n'), ((7067, 7104), 'numpy.asarray', 'np.asarray', (["NGC0891['raw_disk']['xx']"], {}), "(NGC0891['raw_disk']['xx'])\n", (7077, 7104), True, 'import numpy as np\n'), ((7116, 7153), 'numpy.asarray', 'np.asarray', (["NGC0891['raw_disk']['yy']"], {}), "(NGC0891['raw_disk']['yy'])\n", (7126, 7153), True, 'import numpy as np\n'), ((7456, 7492), 'numpy.asarray', 'np.asarray', (["NGC0891['raw_gas']['xx']"], {}), "(NGC0891['raw_gas']['xx'])\n", (7466, 7492), True, 'import numpy as np\n'), ((7504, 7540), 'numpy.asarray', 'np.asarray', (["NGC0891['raw_gas']['yy']"], {}), "(NGC0891['raw_gas']['yy'])\n", (7514, 7540), True, 'import numpy as np\n'), ((7972, 8020), 'dataPython.getXYdata', 'dp.getXYdata', (['"""data/NGC7814/7814reallybulge.dat"""'], {}), "('data/NGC7814/7814reallybulge.dat')\n", (7984, 8020), True, 'import dataPython as dp\n'), ((8052, 8099), 'dataPython.getXYdata', 'dp.getXYdata', (['"""data/NGC7814/7814reallydisk.dat"""'], {}), "('data/NGC7814/7814reallydisk.dat')\n", (8064, 8099), True, 'import dataPython as dp\n'), ((8132, 8178), 'dataPython.getXYdata', 'dp.getXYdata', (['"""data/NGC7814/7814reallygas.dat"""'], {}), "('data/NGC7814/7814reallygas.dat')\n", (8144, 8178), True, 'import dataPython as dp\n'), ((8228, 8275), 'dataPython.getXYdata_wXYerr', 'dp.getXYdata_wXYerr', (['"""data/NGC7814/ngc7814data"""'], {}), "('data/NGC7814/ngc7814data')\n", (8247, 8275), True, 'import dataPython as dp\n'), ((9037, 9075), 'numpy.asarray', 'np.asarray', (["NGC7814['raw_bulge']['xx']"], {}), "(NGC7814['raw_bulge']['xx'])\n", (9047, 9075), True, 'import numpy as np\n'), ((9087, 9125), 'numpy.asarray', 'np.asarray', (["NGC7814['raw_bulge']['yy']"], {}), "(NGC7814['raw_bulge']['yy'])\n", (9097, 9125), True, 'import numpy as np\n'), ((9438, 9475), 'numpy.asarray', 'np.asarray', (["NGC7814['raw_disk']['xx']"], {}), "(NGC7814['raw_disk']['xx'])\n", (9448, 9475), True, 'import numpy as np\n'), ((9487, 9524), 'numpy.asarray', 'np.asarray', (["NGC7814['raw_disk']['yy']"], {}), "(NGC7814['raw_disk']['yy'])\n", (9497, 9524), True, 'import numpy as np\n'), ((9827, 9863), 'numpy.asarray', 'np.asarray', (["NGC7814['raw_gas']['xx']"], {}), "(NGC7814['raw_gas']['xx'])\n", (9837, 9863), True, 'import numpy as np\n'), ((9875, 9911), 'numpy.asarray', 'np.asarray', (["NGC7814['raw_gas']['yy']"], {}), "(NGC7814['raw_gas']['yy'])\n", (9885, 9911), True, 'import numpy as np\n'), ((10278, 10324), 'dataPython.getXYdata', 'dp.getXYdata', (['"""data/NGC5005/ngc5005_bulge.txt"""'], {}), "('data/NGC5005/ngc5005_bulge.txt')\n", (10290, 10324), True, 'import dataPython as dp\n'), ((10356, 10401), 'dataPython.getXYdata', 'dp.getXYdata', (['"""data/NGC5005/ngc5005_disk.txt"""'], {}), "('data/NGC5005/ngc5005_disk.txt')\n", (10368, 10401), True, 'import dataPython as dp\n'), ((10434, 10479), 'dataPython.getXYdata', 'dp.getXYdata', (['"""data/NGC5005/ngc5005_halo.txt"""'], {}), "('data/NGC5005/ngc5005_halo.txt')\n", (10446, 10479), True, 'import dataPython as dp\n'), ((10512, 10556), 'dataPython.getXYdata', 'dp.getXYdata', (['"""data/NGC5005/ngc5005_gas.txt"""'], {}), "('data/NGC5005/ngc5005_gas.txt')\n", (10524, 10556), True, 'import dataPython as dp\n'), ((10606, 10658), 'dataPython.getXYdata_wXYerr', 'dp.getXYdata_wXYerr', (['"""data/NGC5005/ngc5005_data.txt"""'], {}), "('data/NGC5005/ngc5005_data.txt')\n", (10625, 10658), True, 'import dataPython as dp\n'), ((11032, 11070), 'numpy.asarray', 'np.asarray', (["NGC5005['raw_bulge']['xx']"], {}), "(NGC5005['raw_bulge']['xx'])\n", (11042, 11070), True, 'import numpy as np\n'), ((11082, 11120), 'numpy.asarray', 'np.asarray', (["NGC5005['raw_bulge']['yy']"], {}), "(NGC5005['raw_bulge']['yy'])\n", (11092, 11120), True, 'import numpy as np\n'), ((11433, 11470), 'numpy.asarray', 'np.asarray', (["NGC5005['raw_disk']['xx']"], {}), "(NGC5005['raw_disk']['xx'])\n", (11443, 11470), True, 'import numpy as np\n'), ((11482, 11519), 'numpy.asarray', 'np.asarray', (["NGC5005['raw_disk']['yy']"], {}), "(NGC5005['raw_disk']['yy'])\n", (11492, 11519), True, 'import numpy as np\n'), ((11823, 11860), 'numpy.asarray', 'np.asarray', (["NGC5005['raw_halo']['xx']"], {}), "(NGC5005['raw_halo']['xx'])\n", (11833, 11860), True, 'import numpy as np\n'), ((11872, 11909), 'numpy.asarray', 'np.asarray', (["NGC5005['raw_halo']['yy']"], {}), "(NGC5005['raw_halo']['yy'])\n", (11882, 11909), True, 'import numpy as np\n'), ((12212, 12248), 'numpy.asarray', 'np.asarray', (["NGC5005['raw_gas']['xx']"], {}), "(NGC5005['raw_gas']['xx'])\n", (12222, 12248), True, 'import numpy as np\n'), ((12260, 12296), 'numpy.asarray', 'np.asarray', (["NGC5005['raw_gas']['yy']"], {}), "(NGC5005['raw_gas']['yy'])\n", (12270, 12296), True, 'import numpy as np\n'), ((13020, 13072), 'dataPython.getXYdata_wYerr', 'dp.getXYdata_wYerr', (['"""data/othergalaxies/NGC3198.txt"""'], {}), "('data/othergalaxies/NGC3198.txt')\n", (13038, 13072), True, 'import dataPython as dp\n'), ((13354, 13404), 'dataPython.getXYdata_wYerr', 'dp.getXYdata_wYerr', (['"""data/othergalaxies/UGC89.txt"""'], {}), "('data/othergalaxies/UGC89.txt')\n", (13372, 13404), True, 'import dataPython as dp\n'), ((13672, 13723), 'dataPython.getXYdata_wYerr', 'dp.getXYdata_wYerr', (['"""data/othergalaxies/UGC477.txt"""'], {}), "('data/othergalaxies/UGC477.txt')\n", (13690, 13723), True, 'import dataPython as dp\n'), ((14000, 14052), 'dataPython.getXYdata_wYerr', 'dp.getXYdata_wYerr', (['"""data/othergalaxies/UGC1281.txt"""'], {}), "('data/othergalaxies/UGC1281.txt')\n", (14018, 14052), True, 'import dataPython as dp\n'), ((14338, 14390), 'dataPython.getXYdata_wYerr', 'dp.getXYdata_wYerr', (['"""data/othergalaxies/UGC1437.txt"""'], {}), "('data/othergalaxies/UGC1437.txt')\n", (14356, 14390), True, 'import dataPython as dp\n'), ((14676, 14728), 'dataPython.getXYdata_wYerr', 'dp.getXYdata_wYerr', (['"""data/othergalaxies/UGC2953.txt"""'], {}), "('data/othergalaxies/UGC2953.txt')\n", (14694, 14728), True, 'import dataPython as dp\n'), ((15013, 15065), 'dataPython.getXYdata_wYerr', 'dp.getXYdata_wYerr', (['"""data/othergalaxies/UGC4325.txt"""'], {}), "('data/othergalaxies/UGC4325.txt')\n", (15031, 15065), True, 'import dataPython as dp\n'), ((15350, 15402), 'dataPython.getXYdata_wYerr', 'dp.getXYdata_wYerr', (['"""data/othergalaxies/UGC5253.txt"""'], {}), "('data/othergalaxies/UGC5253.txt')\n", (15368, 15402), True, 'import dataPython as dp\n'), ((15690, 15742), 'dataPython.getXYdata_wYerr', 'dp.getXYdata_wYerr', (['"""data/othergalaxies/UGC6787.txt"""'], {}), "('data/othergalaxies/UGC6787.txt')\n", (15708, 15742), True, 'import dataPython as dp\n'), ((16032, 16085), 'dataPython.getXYdata_wYerr', 'dp.getXYdata_wYerr', (['"""data/othergalaxies/UGC10075.txt"""'], {}), "('data/othergalaxies/UGC10075.txt')\n", (16050, 16085), True, 'import dataPython as dp\n')] |
# -*- coding: utf-8 -*-
"""
@FileName : convert_to_pb.py
@Description : None
@Author : 齐鲁桐
@Email : <EMAIL>
@Time : 2019-04-01 19:25
@Modify : None
"""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
model = 'model.pb'
output_graph_def = tf.GraphDef()
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
import numpy as np
np.random.randn()
tf.gfile.GFile(model, "rb") | [
"numpy.random.randn",
"tensorflow.GraphDef",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"tensorflow.import_graph_def"
] | [((308, 321), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (319, 321), True, 'import tensorflow as tf\n'), ((596, 613), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (611, 613), True, 'import numpy as np\n'), ((614, 641), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['model', '"""rb"""'], {}), "(model, 'rb')\n", (628, 641), True, 'import tensorflow as tf\n'), ((364, 374), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (372, 374), True, 'import tensorflow as tf\n'), ((391, 404), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (402, 404), True, 'import tensorflow as tf\n'), ((525, 555), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {}), '(graph_def)\n', (544, 555), True, 'import tensorflow as tf\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 2 07:53:25 2022
@author: Administrator
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 1 15:18:39 2022
@author: NeoChen
"""
from pathlib import Path
import scipy.io.wavfile
from scipy import signal
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
# read file #
data_dir = Path(__file__).parent.parent / 'audio'
# =============================================================================
ref_path = data_dir / 'speech.wav'
deg_path = data_dir / 'speech_bab_0dB.wav'
degtest_path = data_dir / 'mixed_01081_jackhammer.wav'
# =============================================================================
#ref_path = data_dir / 'vocal_01081.wav'
#deg_path = data_dir / 'mixed_01081_jackhammer.wav'
sample_rate1, ref = scipy.io.wavfile.read(ref_path)
sample_rate2, deg = scipy.io.wavfile.read(deg_path)
sample_rate3, degtest = scipy.io.wavfile.read(degtest_path)
f1, t1, Zxx1 = signal.stft(ref, sample_rate1, nperseg=1000)
f2, t2, Zxx2 = signal.stft(deg, sample_rate2, nperseg=1000)
f3, t3, Zxx3 = signal.stft(degtest, sample_rate2, nperseg=1000)
# deep ML CNN#
# =============================================================================
# from keras.models import Sequential
# from keras.layers.core import Dense, Dropout, Activation,Flatten
# from keras.layers.embeddings import Embedding
#
# model = Sequential()
# model.add(Embedding(output_dim=32,
# input_dim=2000,
# input_length=100))
# model.add(Dropout(0.2))
# model.add(Flatten())
# model.add(Dense(units=256,
# activation='relu' ))
# model.add(Dropout(0.2))
# model.add(Dense(units=1,
# activation='sigmoid' ))
# model.summary()
#
# model.compile(loss='binary_crossentropy',metrics=['accuracy'])
# #進行訓練
# #batch_size:每一批次訓練100筆資料
# #epochs:執行10個訓練週期
# #verbose:顯示每次的訓練過程
# #validation_split:測試資料的比例
# train_history =model.fit(x_train, y_train,batch_size=100,
# epochs=10,verbose=2,validation_split=0.25)
#
# #評估訓練模型的準確率
# acu = model.evaluate(x_test, y_test, verbose=1)
# acu[1]
# =============================================================================
# ML LinearRegression #
# =============================================================================
# from sklearn.linear_model import LinearRegression
# model = LinearRegression(fit_intercept=True)
#
# model.fit(x[:, np.newaxis], y)
#
# xfit = np.linspace(0, 10, 1000)
# yfit = model.predict(xfit[:, np.newaxis])
#
# plt.scatter(x, y)
# plt.plot(xfit, yfit);
# =============================================================================
# =============================================================================
# from sklearn.preprocessing import MinMaxScaler
# scaler = MinMaxScaler(feature_range=(0, 1))#
# normalized_stft = scaler.transform(stft)
# scaler.fit(stft)
# features_convolution = np.reshape(normalized_stft,(400,1025, -1,1))
#
# model = Sequential()
#
# model.add(Conv2D(16, (3, 3), input_shape=features_convolution.shape[1:]))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
# #'''
# #model.add(Dropout(0.2))
#
# model.add(Conv2D(32, (3, 3)))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
#
# #model.add(Dropout(0.2))
#
# #'''
# #'''
# model.add(Conv2D(64, (3, 3),padding='same'))
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
# #'''
#
#
# model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
#
# #model.add(Dense(1000))#input_shape=features.shape[1:]
# model.add(Dense(64))#input_shape=features.shape[1:]
#
# model.add(Dense(10))
# model.add(Activation('softmax'))
# sgd = optimizers.SGD(lr=0.0000001, decay=1e-6, momentum=0.9, nesterov=True)
#
# model.compile(loss='categorical_crossentropy',
# optimizer='adam',
# metrics=['accuracy'])
# =============================================================================
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
scaler.fit(Zxx2.astype(float))
normalized_stft = scaler.transform(Zxx2.astype(float))
features_convolution = np.reshape(normalized_stft,(501,101, -1,1))
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from keras import optimizers
import keras
model = Sequential()
features_convolution_float = features_convolution/1000
#model.add(Conv2D(16, (3, 3), input_shape=(features_convolution.shape[1:])))
#model.add(Conv2D(16, (3, 3), input_shape=(features_convolution.reshape(-1,1).astype(float))))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
#'''
model.add(Dropout(0.2))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3),padding='same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors
model.add(Dense(1000))#input_shape=features.shape[1:]
model.add(Dense(64))#input_shape=features.shape[1:]
model.add(Dense(10))
model.add(Activation('softmax'))
#sgd = optimizers.SGD(lr=0.0000001, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
#y=keras.utils.to_categorical(labels, num_classes=10, dtype='float32')
history = model.fit(features_convolution, None,batch_size=8, epochs=40,validation_split=0.2) | [
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"sklearn.preprocessing.MinMaxScaler",
"pathlib.Path",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.models.Sequential",
"numpy.reshape",
"scipy.si... | [((978, 1022), 'scipy.signal.stft', 'signal.stft', (['ref', 'sample_rate1'], {'nperseg': '(1000)'}), '(ref, sample_rate1, nperseg=1000)\n', (989, 1022), False, 'from scipy import signal\n'), ((1038, 1082), 'scipy.signal.stft', 'signal.stft', (['deg', 'sample_rate2'], {'nperseg': '(1000)'}), '(deg, sample_rate2, nperseg=1000)\n', (1049, 1082), False, 'from scipy import signal\n'), ((1099, 1147), 'scipy.signal.stft', 'signal.stft', (['degtest', 'sample_rate2'], {'nperseg': '(1000)'}), '(degtest, sample_rate2, nperseg=1000)\n', (1110, 1147), False, 'from scipy import signal\n'), ((4114, 4148), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (4126, 4148), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((4258, 4304), 'numpy.reshape', 'np.reshape', (['normalized_stft', '(501, 101, -1, 1)'], {}), '(normalized_stft, (501, 101, -1, 1))\n', (4268, 4304), True, 'import numpy as np\n'), ((4530, 4542), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (4540, 4542), False, 'from tensorflow.keras.models import Sequential\n'), ((4781, 4799), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4791, 4799), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((4811, 4841), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (4823, 4841), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((4858, 4870), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (4865, 4870), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((4883, 4901), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), '(32, (3, 3))\n', (4889, 4901), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((4913, 4931), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (4923, 4931), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((4943, 4973), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (4955, 4973), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((4986, 4998), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (4993, 4998), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((5011, 5045), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (5017, 5045), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((5056, 5074), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (5066, 5074), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((5086, 5116), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5098, 5116), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((5129, 5138), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (5136, 5138), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((5210, 5221), 'tensorflow.keras.layers.Dense', 'Dense', (['(1000)'], {}), '(1000)\n', (5215, 5221), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((5264, 5273), 'tensorflow.keras.layers.Dense', 'Dense', (['(64)'], {}), '(64)\n', (5269, 5273), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((5317, 5326), 'tensorflow.keras.layers.Dense', 'Dense', (['(10)'], {}), '(10)\n', (5322, 5326), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((5338, 5359), 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (5348, 5359), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((372, 386), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (376, 386), False, 'from pathlib import Path\n')] |
import itertools
from collections import Counter
from typing import List, Optional, Dict, Tuple
import copy
import numpy as np
from hotpot.data_handling.dataset import ListBatcher, Dataset, QuestionAndParagraphsSpec, QuestionAndParagraphsDataset, \
Preprocessor, SampleFilter, TrainingDataHandler
from hotpot.data_handling.hotpot.hotpot_data import HotpotQuestion, HotpotQuestions
from hotpot.data_handling.relevance_training_data import BinaryQuestionAndParagraphs, \
IterativeQuestionAndParagraphs
from hotpot.utils import flatten_iterable
def get_segments_from_sentences(sentences: List[List[str]]):
# return flatten_iterable([[idx] * len(sent) for idx, sent in enumerate(sentences)])
segments = []
i = 0
for s in sentences:
if len(s) == 0:
continue
segments.extend([i] * len(s))
i += 1
return segments
class HotpotStratifiedBinaryQuestionParagraphPairsDataset(QuestionAndParagraphsDataset):
""" A class for handling a binary classification dataset for hotpot:
- each sample is a question and two paragraphs
- each sample is labeled with 0 or 1 - is the pair the gold one not
- in each epoch, each question will appear 4 times: with both gold paragraphs,
with one gold and one false (for each gold), and one without gold at all.
The distractors will be chosen at random each epoch
"""
def __init__(self, questions: List[HotpotQuestion], batcher: ListBatcher, fixed_dataset=False, sample_seed=18,
add_gold_distractor=True):
self.questions = questions
self.batcher = batcher
self.fixed_dataset = fixed_dataset
self.add_gold_distractor = add_gold_distractor
self.random = np.random.RandomState(seed=sample_seed)
self.gold_samples = self._build_gold_samples()
self.epoch_samples = None
def _build_gold_samples(self):
gold_samples = []
for question in self.questions:
pars = [flatten_iterable(question.supporting_facts[0].sentences),
flatten_iterable(question.supporting_facts[1].sentences)]
self.random.shuffle(pars)
gold_samples.append(BinaryQuestionAndParagraphs(question.question_tokens, pars, 1, num_distractors=0,
question_id=question.question_id, q_type=question.q_type))
return gold_samples
def get_batches(self, n_batches):
if len(self) < n_batches:
raise ValueError()
return itertools.islice(self.get_epoch(new_epoch=False), n_batches)
def get_samples(self, n_samples: int):
n_batches = self.batcher.epoch_size(n_samples)
self.get_epoch()
return self.batcher.get_epoch(self.random.choice(self.epoch_samples, n_samples, replace=False)), n_batches
def get_epoch(self, new_epoch=True):
if self.fixed_dataset:
new_epoch = False
if not new_epoch and self.epoch_samples is not None:
return self.batcher.get_epoch(self.epoch_samples)
false_samples = []
for question in self.questions:
two_distractors = [flatten_iterable(x.sentences) for x in self.random.choice(question.distractors, size=2,
replace=False)]
true_and_false_1 = [flatten_iterable(question.supporting_facts[0].sentences), two_distractors[0]]
true_and_false_2 = [flatten_iterable(question.supporting_facts[1].sentences), two_distractors[1]]
self.random.shuffle(true_and_false_1)
self.random.shuffle(true_and_false_2)
false_samples.append(
BinaryQuestionAndParagraphs(question.question_tokens, true_and_false_1, 0, num_distractors=1,
question_id=question.question_id, q_type=question.q_type))
false_samples.append(
BinaryQuestionAndParagraphs(question.question_tokens, true_and_false_2, 0, num_distractors=1,
question_id=question.question_id, q_type=question.q_type))
false_samples.append(BinaryQuestionAndParagraphs(question.question_tokens, [flatten_iterable(x.sentences)
for x in self.random.choice(
question.distractors, size=2,
replace=False)], 0, num_distractors=2, question_id=question.question_id, q_type=question.q_type))
if self.add_gold_distractor:
rand_q_idx = self.random.randint(len(self.gold_samples))
while self.gold_samples[rand_q_idx].question_id == question.question_id:
rand_q_idx = self.random.randint(len(self.gold_samples))
selected_q = self.gold_samples[rand_q_idx]
self.random.shuffle(selected_q.paragraphs)
false_samples.append(BinaryQuestionAndParagraphs(question.question_tokens,
selected_q.paragraphs,
label=0, num_distractors=2,
question_id=question.question_id,
q_type=question.q_type))
for gold in self.gold_samples:
self.random.shuffle(gold.paragraphs)
self.epoch_samples = self.gold_samples + false_samples
np.random.shuffle(self.epoch_samples)
return self.batcher.get_epoch(self.epoch_samples)
def get_spec(self):
batch_size = self.batcher.get_fixed_batch_size()
num_contexts = 2
max_q_words = max(len(q.question_tokens) for q in self.questions)
max_c_words = max(max(c.num_tokens for c in (q.distractors + q.supporting_facts)) for q in self.questions)
return QuestionAndParagraphsSpec(batch_size=batch_size, max_num_contexts=num_contexts,
max_num_question_words=max_q_words, max_num_context_words=max_c_words)
def get_vocab(self):
voc = set()
for q in self.questions:
voc.update(q.question_tokens)
for para in (q.distractors + q.supporting_facts):
voc.update(flatten_iterable(para.sentences))
return voc
def get_word_counts(self):
count = Counter()
for q in self.questions:
count.update(q.question_tokens)
for para in (q.distractors + q.supporting_facts):
count.update(flatten_iterable(para.sentences))
return count
def __len__(self):
len_mult = 5 if self.add_gold_distractor else 4
return self.batcher.epoch_size(len(self.gold_samples) * len_mult)
class HotpotIterativeRetrievalDataset(QuestionAndParagraphsDataset):
""" A class for handling an iterative retrieval dataset for hotpot.
This dataset is far more complex than HotpotStratifiedBinaryQuestionParagraphPairsDataset, for the following reasons:
- The order of the paragraphs matters:
* The two paragraphs are supposed to act as paragraphs that have been retrieved in
two iterations one after the other
* In gold bridge examples, the first paragraph is the one with a higher tfidf score, as it is more likely to be
the first supporting fact in the bridge
We probably shouldn't use the opposite direction, as there might be glitches. So the lower ranked supporting
fact should never be the first one, no matter the label
* In comparison questions, both orders are fine
- Each sample has two labels - one for each iteration. The second is 1 iff both paragraphs are gold.
Sampling methods:
- Comparison questions:
* Gold: the two gold paragraphs, in both possible orders
* first gold, second false: first is either one of the supporting facts, second is one of the distractors
(or the same paragraphs twice)
- Bridge questions:
* Gold: the two gold paragraphs, with the one with higher score first
* first gold, second false: the higher scored paragraphs with some distractor (or the same paragraphs twice)
- General:
* False 1: all distractors
* False 2: first distractor, second one of supporting facts
* False 3: a gold example from another question
"""
def __init__(self, questions: List[HotpotQuestion], batcher: ListBatcher, fixed_dataset=False, sample_seed=18,
bridge_as_comparison=False, group_pairs_in_batches=False, label_by_span=False,
num_distractors_in_batch=1):
self.questions = questions
self.batcher = batcher
self.fixed_dataset = fixed_dataset
self.bridge_as_comparison = bridge_as_comparison
self.group_pairs_in_batches = group_pairs_in_batches
self.label_by_span = label_by_span
self.num_distractors_in_batch = num_distractors_in_batch
if fixed_dataset and group_pairs_in_batches:
raise NotImplementedError()
if label_by_span and bridge_as_comparison:
raise ValueError()
self.qid2question = {q.question_id: q for q in questions}
self.random = np.random.RandomState(seed=sample_seed)
self.gold_samples = self._build_gold_samples()
self.epoch_samples = None
def _get_no_span_containing_golds(self, qid):
""" we assume that a passage not containing the answer should be the first in the hops.
If both contain (or not), they are regarded equal """
return [idx for idx, span in enumerate(self.qid2question[qid].gold_spans) if len(span) == 0]
def _build_gold_samples(self):
gold_samples = []
for question in self.questions:
if question.q_type == 'comparison' or self.bridge_as_comparison:
pars_order = [0, 1]
self.random.shuffle(pars_order)
else:
if not self.label_by_span:
pars_order = [0, 1] if question.gold_scores[0] > question.gold_scores[1] else [1, 0]
else:
gold_idxs = self._get_no_span_containing_golds(question.question_id)
pars_order = [0, 1] if 0 in gold_idxs else [1, 0]
if len(gold_idxs) != 1: # either both contain the answer or both don't contain, so regarded equal
self.random.shuffle(pars_order)
pars = [flatten_iterable(question.supporting_facts[i].sentences) for i in pars_order]
sentence_segs = [get_segments_from_sentences(question.supporting_facts[i].sentences) for i in pars_order]
gold_samples.append(IterativeQuestionAndParagraphs(question.question_tokens, pars,
first_label=1, second_label=1,
question_id=question.question_id, q_type=question.q_type,
sentence_segments=sentence_segs))
return gold_samples
def get_batches(self, n_batches):
if len(self) < n_batches:
raise ValueError()
return itertools.islice(self.get_epoch(new_epoch=False), n_batches)
def get_samples(self, n_samples: int):
n_batches = self.batcher.epoch_size(n_samples * (5 if not self.group_pairs_in_batches else 1))
if not self.group_pairs_in_batches:
return self._build_regular_batches(self.random.choice(self.gold_samples, n_samples, replace=False).tolist()), n_batches
return self._build_pair_batches(self.random.choice(self.gold_samples, n_samples, replace=False).tolist()), n_batches
def _sample_rand_par_other_q(self, qid):
rand_q_idx = self.random.randint(len(self.questions))
while self.questions[rand_q_idx].question_id == qid:
rand_q_idx = self.random.randint(len(self.questions))
return self.random.choice(self.questions[rand_q_idx].supporting_facts + self.questions[rand_q_idx].distractors)
def _sample_first_gold_second_false(self, qid):
question = self.qid2question[qid]
rand_par_other_q = self._sample_rand_par_other_q(qid)
if question.q_type == 'comparison' or self.bridge_as_comparison:
first_gold_par = question.supporting_facts[self.random.randint(2)]
else:
if not self.label_by_span:
first_gold_idx = 0 if question.gold_scores[0] > question.gold_scores[1] else 1
first_gold_par = question.supporting_facts[first_gold_idx]
else:
gold_idxs = self._get_no_span_containing_golds(question.question_id)
if len(gold_idxs) == 1:
first_gold_par = question.supporting_facts[gold_idxs[0]]
else:
first_gold_par = question.supporting_facts[self.random.randint(2)]
rand_par = self.random.choice([rand_par_other_q, first_gold_par, self.random.choice(question.distractors)],
p=[0.05, 0.1, 0.85])
pars = [flatten_iterable(first_gold_par.sentences), flatten_iterable(rand_par.sentences)]
segs = [get_segments_from_sentences(first_gold_par.sentences),
get_segments_from_sentences(rand_par.sentences)]
return IterativeQuestionAndParagraphs(question=question.question_tokens, paragraphs=pars,
first_label=1, second_label=0,
question_id=question.question_id,
q_type=question.q_type,
sentence_segments=segs)
def _sample_false_1(self, qid):
""" False sample of type 1: all distractors.
No sampling from other question here, as I think it's less effective in this case"""
question = self.qid2question[qid]
two_distractors = self.random.choice(question.distractors, size=2, replace=False)
pars = [flatten_iterable(x.sentences) for x in two_distractors]
segs = [get_segments_from_sentences(x.sentences) for x in two_distractors]
return IterativeQuestionAndParagraphs(question=question.question_tokens, paragraphs=pars,
first_label=0, second_label=0,
question_id=question.question_id,
q_type=question.q_type,
sentence_segments=segs)
def _sample_false_2(self, qid):
""" False sample of type 2: first distractor, second one of supporting facts """
question = self.qid2question[qid]
rand_par_other_q = self._sample_rand_par_other_q(qid)
distractor = self.random.choice([self.random.choice(question.distractors), rand_par_other_q], p=[0.9, 0.1])
gold = self.random.choice(question.supporting_facts)
pars = [flatten_iterable(x.sentences) for x in [distractor, gold]]
segs = [get_segments_from_sentences(x.sentences) for x in [distractor, gold]]
return IterativeQuestionAndParagraphs(question=question.question_tokens, paragraphs=pars,
first_label=0, second_label=0,
question_id=question.question_id,
q_type=question.q_type,
sentence_segments=segs)
def _sample_false_3(self, qid):
""" False sample of type 2: gold from other question """
question = self.qid2question[qid]
rand_q_idx = self.random.randint(len(self.gold_samples))
while self.gold_samples[rand_q_idx].question_id == question.question_id:
rand_q_idx = self.random.randint(len(self.gold_samples))
selected_q = self.gold_samples[rand_q_idx]
return IterativeQuestionAndParagraphs(question=question.question_tokens,
paragraphs=[x for x in selected_q.paragraphs],
first_label=0, second_label=0,
question_id=question.question_id,
q_type=question.q_type,
sentence_segments=[x for x in
selected_q.sentence_segments])
def get_epoch(self, new_epoch=True):
if self.group_pairs_in_batches:
return self._build_pair_batches(self.gold_samples)
if self.fixed_dataset:
new_epoch = False
if not new_epoch and self.epoch_samples is not None:
return self.batcher.get_epoch(self.epoch_samples)
return self._build_regular_batches(self.gold_samples, set_epoch_samples=True)
# false_samples = []
# for question in self.questions:
# # false_samples.append(self._sample_first_gold_second_false(question.question_id))
# # false_samples.append(self._sample_false_1(question.question_id))
# # false_samples.append(self._sample_false_2(question.question_id))
# # false_samples.append(self._sample_false_3(question.question_id))
# for _ in range(4):
# false_samples.append(self.random.choice([self._sample_first_gold_second_false,
# self._sample_false_1, self._sample_false_2,
# self._sample_false_3],
# p=[0.35, 0.25, 0.35, 0.05])(question.question_id))
# for gold in self.gold_samples:
# if gold.q_type == 'comparison' or self.bridge_as_comparison or \
# (self.label_by_span and len(self._get_no_span_containing_golds(gold.question_id)) != 1):
# # shuffling order when we can
# gold.paragraphs = [gold.paragraphs[1], gold.paragraphs[0]]
# gold.sentence_segments = [gold.sentence_segments[1], gold.sentence_segments[0]]
# self.epoch_samples = self.gold_samples + false_samples
# np.random.shuffle(self.epoch_samples)
# return self.batcher.get_epoch(self.epoch_samples)
def _build_regular_batches(self, gold_questions, set_epoch_samples=False):
false_samples = []
for question in gold_questions:
for _ in range(4):
false_samples.append(self.random.choice([self._sample_first_gold_second_false,
self._sample_false_1, self._sample_false_2,
self._sample_false_3],
p=[0.35, 0.25, 0.35, 0.05])(question.question_id))
for gold in gold_questions:
if gold.q_type == 'comparison' or self.bridge_as_comparison or \
(self.label_by_span and len(self._get_no_span_containing_golds(gold.question_id)) != 1):
# shuffling order when we can
gold.paragraphs = [gold.paragraphs[1], gold.paragraphs[0]]
gold.sentence_segments = [gold.sentence_segments[1], gold.sentence_segments[0]]
epoch_samples = gold_questions + false_samples
np.random.shuffle(epoch_samples)
if set_epoch_samples:
self.epoch_samples = epoch_samples
return self.batcher.get_epoch(epoch_samples)
def _build_pair_batches(self, gold_questions):
np.random.shuffle(gold_questions)
for q in gold_questions:
if q.q_type == 'comparison' or self.bridge_as_comparison or \
(self.label_by_span and len(self._get_no_span_containing_golds(q.question_id)) != 1):
# shuffling order when we can
q.paragraphs = [q.paragraphs[1], q.paragraphs[0]]
q.sentence_segments = [q.sentence_segments[1], q.sentence_segments[0]]
for batch_golds in self.batcher.get_epoch(gold_questions):
batch = []
for gold in batch_golds:
batch.append(gold)
for _ in range(self.num_distractors_in_batch):
batch.append(self.random.choice([self._sample_first_gold_second_false,
self._sample_false_1, self._sample_false_2, self._sample_false_3],
p=[0.35, 0.25, 0.35, 0.05])(gold.question_id))
yield batch
def get_spec(self):
batch_size = self.batcher.get_fixed_batch_size()
num_contexts = 2
max_q_words = max(len(q.question_tokens) for q in self.questions)
max_c_words = max(max(c.num_tokens for c in (q.distractors + q.supporting_facts)) for q in self.questions)
return QuestionAndParagraphsSpec(batch_size=batch_size, max_num_contexts=num_contexts,
max_num_question_words=max_q_words, max_num_context_words=max_c_words)
def get_vocab(self):
voc = set()
for q in self.questions:
voc.update(q.question_tokens)
for para in (q.distractors + q.supporting_facts):
voc.update(flatten_iterable(para.sentences))
return voc
def get_word_counts(self):
count = Counter()
for q in self.questions:
count.update(q.question_tokens)
for para in (q.distractors + q.supporting_facts):
count.update(flatten_iterable(para.sentences))
return count
def __len__(self):
return self.batcher.epoch_size(len(self.gold_samples) * (5 if not self.group_pairs_in_batches else 1))
class HotpotFullIterativeDataset(QuestionAndParagraphsDataset):
def __init__(self, questions: List[HotpotQuestion], batcher: ListBatcher, bridge_as_comparison=False):
self.questions = questions
self.batcher = batcher
self.bridge_as_comparison = bridge_as_comparison
self.samples = self._build_full_dataset()
def _get_labels(self, is_gold1: bool, is_gold2: bool, q_type: str, are_same: bool,
is_first_higher: bool) -> Tuple[int, int]:
if not is_gold1:
return 0, 0
if q_type == 'comparison' or self.bridge_as_comparison:
return int(is_gold1), int(is_gold1 and is_gold2 and not are_same)
else:
return int(is_gold1 and is_first_higher), int(is_gold1 and is_first_higher and is_gold2 and not are_same)
def _build_full_dataset(self):
samples = []
for question in self.questions:
pars_and_scores = list(zip(question.supporting_facts + question.distractors,
question.gold_scores + question.distractor_scores))
higher_gold = question.supporting_facts[0] \
if question.gold_scores[0] >= question.gold_scores[1] else question.supporting_facts[1]
for p1, score1 in pars_and_scores:
for p2, score2 in pars_and_scores:
first_label, second_label = self._get_labels(is_gold1=p1 in question.supporting_facts,
is_gold2=p2 in question.supporting_facts,
q_type=question.q_type,
are_same=p1 == p2,
is_first_higher=higher_gold == p1)
samples.append(IterativeQuestionAndParagraphs(question=question.question_tokens,
paragraphs=[flatten_iterable(p1.sentences),
flatten_iterable(p2.sentences)],
first_label=first_label, second_label=second_label,
question_id=question.question_id,
q_type=question.q_type,
sentence_segments=[get_segments_from_sentences(s)
for s in
[p1.sentences, p2.sentences]]))
return samples
def get_batches(self, n_batches):
if len(self) < n_batches:
raise ValueError()
return itertools.islice(self.get_epoch(), n_batches)
def get_samples(self, n_samples: int):
n_batches = self.batcher.epoch_size(n_samples)
return self.batcher.get_epoch(np.random.choice(self.samples, n_samples, replace=False)), n_batches
def get_epoch(self):
return self.batcher.get_epoch(self.samples)
def get_spec(self):
batch_size = self.batcher.get_fixed_batch_size()
num_contexts = 2
max_q_words = max(len(q.question_tokens) for q in self.questions)
max_c_words = max(max(c.num_tokens for c in (q.distractors + q.supporting_facts)) for q in self.questions)
return QuestionAndParagraphsSpec(batch_size=batch_size, max_num_contexts=num_contexts,
max_num_question_words=max_q_words, max_num_context_words=max_c_words)
def get_vocab(self):
voc = set()
for q in self.questions:
voc.update(q.question_tokens)
for para in (q.distractors + q.supporting_facts):
voc.update(flatten_iterable(para.sentences))
return voc
def get_word_counts(self):
count = Counter()
for q in self.questions:
count.update(q.question_tokens)
for para in (q.distractors + q.supporting_facts):
count.update(flatten_iterable(para.sentences))
return count
def __len__(self):
return self.batcher.epoch_size(len(self.samples))
class HotpotFullQuestionParagraphPairsDataset(QuestionAndParagraphsDataset):
def __init__(self, questions: List[HotpotQuestion], batcher: ListBatcher):
self.questions = questions
self.batcher = batcher
self.samples = self._build_full_dataset()
def _build_full_dataset(self):
samples = []
for question in self.questions:
for i, p1 in enumerate(question.distractors + question.supporting_facts):
for p2 in (question.distractors + question.supporting_facts)[i + 1:]:
label = 1 if ((p1 in question.supporting_facts) and (p2 in question.supporting_facts)) else 0
num_distractors = sum([p1 in question.distractors, p2 in question.distractors])
samples.append(BinaryQuestionAndParagraphs(question.question_tokens, [flatten_iterable(
p1.sentences),
flatten_iterable(
p2.sentences)], label, num_distractors=num_distractors, question_id=question.question_id,
q_type=question.q_type))
return samples
def get_batches(self, n_batches):
if len(self) < n_batches:
raise ValueError()
return itertools.islice(self.get_epoch(), n_batches)
def get_samples(self, n_samples: int):
n_batches = self.batcher.epoch_size(n_samples)
return self.batcher.get_epoch(np.random.choice(self.samples, n_samples, replace=False)), n_batches
def get_epoch(self):
return self.batcher.get_epoch(self.samples)
def get_spec(self):
batch_size = self.batcher.get_fixed_batch_size()
num_contexts = 2
max_q_words = max(len(q.question_tokens) for q in self.questions)
max_c_words = max(max(c.num_tokens for c in (q.distractors + q.supporting_facts)) for q in self.questions)
return QuestionAndParagraphsSpec(batch_size=batch_size, max_num_contexts=num_contexts,
max_num_question_words=max_q_words, max_num_context_words=max_c_words)
def get_vocab(self):
voc = set()
for q in self.questions:
voc.update(q.question_tokens)
for para in (q.distractors + q.supporting_facts):
voc.update(flatten_iterable(para.sentences))
return voc
def get_word_counts(self):
count = Counter()
for q in self.questions:
count.update(q.question_tokens)
for para in (q.distractors + q.supporting_facts):
count.update(flatten_iterable(para.sentences))
return count
def __len__(self):
return self.batcher.epoch_size(len(self.samples))
class HotpotTextLengthPreprocessor(Preprocessor):
def __init__(self, num_tokens_th):
self.num_tokens_th = num_tokens_th
def preprocess(self, question: HotpotQuestion):
for par in question.distractors:
while len(flatten_iterable(par.sentences)) > self.num_tokens_th:
par.sentences = par.sentences[:-1]
for par in question.supporting_facts:
while len(flatten_iterable(par.sentences)) > self.num_tokens_th:
if (len(par.sentences) - 1) in par.supporting_sentence_ids:
print("Warning: supporting fact above threshold. removing sample")
return None
par.sentences = par.sentences[:-1]
return question
class HotpotQuestionFilter(SampleFilter):
def __init__(self, num_distractors_th):
self.num_distractors_th = num_distractors_th
def keep(self, question: HotpotQuestion) -> bool:
return len(question.distractors) >= self.num_distractors_th
class HotpotBinaryRelevanceTrainingData(TrainingDataHandler):
def __init__(self, corpus: HotpotQuestions, train_batcher: ListBatcher, dev_batcher: ListBatcher,
sample_filter: Optional[SampleFilter] = None, preprocessor: Optional[Preprocessor] = None,
sample_train=None, sample_dev=None, sample_seed=18, add_gold_distractors=True):
super().__init__(train_batcher, dev_batcher, sample_filter, preprocessor, sample_train, sample_dev, sample_seed)
self.corpus = corpus
self.add_gold_distractors = add_gold_distractors
self._train = None
self._dev = None
def get_train(self) -> Dataset:
self._load_data()
return HotpotStratifiedBinaryQuestionParagraphPairsDataset(self._train, self.train_batcher, fixed_dataset=False,
add_gold_distractor=self.add_gold_distractors)
def get_eval(self) -> Dict[str, Dataset]: # TODO: are we sure wo don't want to use fixed datasets for evaluation?
self._load_data()
eval_sets = dict(
train=HotpotStratifiedBinaryQuestionParagraphPairsDataset(self._train,
self.dev_batcher,
fixed_dataset=False,
add_gold_distractor=self.add_gold_distractors),
dev=HotpotStratifiedBinaryQuestionParagraphPairsDataset(self._dev, self.dev_batcher,
fixed_dataset=False,
add_gold_distractor=self.add_gold_distractors))
return eval_sets
def __getstate__(self):
state = self.__dict__
state["_train"] = None
state["_dev"] = None
return state
def __setstate__(self, state):
self.__dict__ = state
class HotpotIterativeRelevanceTrainingData(TrainingDataHandler):
def __init__(self, corpus: HotpotQuestions, train_batcher: ListBatcher, dev_batcher: ListBatcher,
sample_filter: Optional[SampleFilter] = None, preprocessor: Optional[Preprocessor] = None,
sample_train=None, sample_dev=None, sample_seed=18, bridge_as_comparison=False,
group_pairs_in_batches=False, label_by_span=False, num_distractors_in_batch=1,
max_batch_size=None):
super().__init__(train_batcher, dev_batcher, sample_filter, preprocessor, sample_train, sample_dev, sample_seed)
self.corpus = corpus
self.bridge_as_comparison = bridge_as_comparison
self.group_pairs_in_batches = group_pairs_in_batches
self.label_by_span = label_by_span
self.num_distractors_in_batch = num_distractors_in_batch
self.max_batch_size = max_batch_size
self._train = None
self._dev = None
if self.label_by_span:
print("Labeling first golds by spans")
if self.bridge_as_comparison:
print("Considering comparison and bridge alike")
if self.group_pairs_in_batches:
print("Grouping positives and negatives, for ranking loss")
def get_train(self) -> Dataset:
self._load_data()
return HotpotIterativeRetrievalDataset(self._train, self.train_batcher, fixed_dataset=False,
bridge_as_comparison=self.bridge_as_comparison,
group_pairs_in_batches=self.group_pairs_in_batches,
label_by_span=self.label_by_span,
num_distractors_in_batch=self.num_distractors_in_batch)
def get_eval(self) -> Dict[str, Dataset]: # TODO: are we sure wo don't want to use fixed datasets for evaluation?
self._load_data()
bigger_batcher = self.dev_batcher
if self.group_pairs_in_batches:
bigger_batcher = copy.deepcopy(self.dev_batcher)
bigger_batcher.batch_size *= (self.num_distractors_in_batch+1)
if self.max_batch_size is not None:
bigger_batcher.batch_size = min(bigger_batcher.batch_size, int(self.max_batch_size/2))
eval_sets = dict(
train=HotpotIterativeRetrievalDataset(self._train,
bigger_batcher,
fixed_dataset=False,
bridge_as_comparison=self.bridge_as_comparison,
label_by_span=self.label_by_span),
dev=HotpotIterativeRetrievalDataset(self._dev,
bigger_batcher,
fixed_dataset=False, bridge_as_comparison=self.bridge_as_comparison,
label_by_span=self.label_by_span))
if self.group_pairs_in_batches:
eval_sets.update(dict(
train_grouped=HotpotIterativeRetrievalDataset(self._train,
self.dev_batcher,
fixed_dataset=False,
bridge_as_comparison=self.bridge_as_comparison,
group_pairs_in_batches=True,
label_by_span=self.label_by_span,
num_distractors_in_batch=self.num_distractors_in_batch),
dev_grouped=HotpotIterativeRetrievalDataset(self._dev, self.dev_batcher,
fixed_dataset=False,
bridge_as_comparison=self.bridge_as_comparison,
group_pairs_in_batches=True,
label_by_span=self.label_by_span,
num_distractors_in_batch=self.num_distractors_in_batch)))
return eval_sets
def __getstate__(self):
state = self.__dict__
state["_train"] = None
state["_dev"] = None
return state
def __setstate__(self, state):
if "bridge_as_comparison" not in state:
state["bridge_as_comparison"] = False
if "group_pairs_in_batches" not in state:
state["group_pairs_in_batches"] = False
if "label_by_span" not in state:
state["label_by_span"] = False
if "num_distractors_in_batch" not in state:
state["num_distractors_in_batch"] = 1
if "max_batch_size" not in state:
state["max_batch_size"] = None
self.__dict__ = state
| [
"copy.deepcopy",
"hotpot.data_handling.dataset.QuestionAndParagraphsSpec",
"numpy.random.RandomState",
"hotpot.data_handling.relevance_training_data.IterativeQuestionAndParagraphs",
"numpy.random.choice",
"collections.Counter",
"hotpot.data_handling.relevance_training_data.BinaryQuestionAndParagraphs",
... | [((1741, 1780), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'sample_seed'}), '(seed=sample_seed)\n', (1762, 1780), True, 'import numpy as np\n'), ((5599, 5636), 'numpy.random.shuffle', 'np.random.shuffle', (['self.epoch_samples'], {}), '(self.epoch_samples)\n', (5616, 5636), True, 'import numpy as np\n'), ((6006, 6166), 'hotpot.data_handling.dataset.QuestionAndParagraphsSpec', 'QuestionAndParagraphsSpec', ([], {'batch_size': 'batch_size', 'max_num_contexts': 'num_contexts', 'max_num_question_words': 'max_q_words', 'max_num_context_words': 'max_c_words'}), '(batch_size=batch_size, max_num_contexts=\n num_contexts, max_num_question_words=max_q_words, max_num_context_words\n =max_c_words)\n', (6031, 6166), False, 'from hotpot.data_handling.dataset import ListBatcher, Dataset, QuestionAndParagraphsSpec, QuestionAndParagraphsDataset, Preprocessor, SampleFilter, TrainingDataHandler\n'), ((6509, 6518), 'collections.Counter', 'Counter', ([], {}), '()\n', (6516, 6518), False, 'from collections import Counter\n'), ((9388, 9427), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': 'sample_seed'}), '(seed=sample_seed)\n', (9409, 9427), True, 'import numpy as np\n'), ((13534, 13738), 'hotpot.data_handling.relevance_training_data.IterativeQuestionAndParagraphs', 'IterativeQuestionAndParagraphs', ([], {'question': 'question.question_tokens', 'paragraphs': 'pars', 'first_label': '(1)', 'second_label': '(0)', 'question_id': 'question.question_id', 'q_type': 'question.q_type', 'sentence_segments': 'segs'}), '(question=question.question_tokens,\n paragraphs=pars, first_label=1, second_label=0, question_id=question.\n question_id, q_type=question.q_type, sentence_segments=segs)\n', (13564, 13738), False, 'from hotpot.data_handling.relevance_training_data import BinaryQuestionAndParagraphs, IterativeQuestionAndParagraphs\n'), ((14399, 14603), 'hotpot.data_handling.relevance_training_data.IterativeQuestionAndParagraphs', 'IterativeQuestionAndParagraphs', ([], {'question': 'question.question_tokens', 'paragraphs': 'pars', 'first_label': '(0)', 'second_label': '(0)', 'question_id': 'question.question_id', 'q_type': 'question.q_type', 'sentence_segments': 'segs'}), '(question=question.question_tokens,\n paragraphs=pars, first_label=0, second_label=0, question_id=question.\n question_id, q_type=question.q_type, sentence_segments=segs)\n', (14429, 14603), False, 'from hotpot.data_handling.relevance_training_data import BinaryQuestionAndParagraphs, IterativeQuestionAndParagraphs\n'), ((15362, 15566), 'hotpot.data_handling.relevance_training_data.IterativeQuestionAndParagraphs', 'IterativeQuestionAndParagraphs', ([], {'question': 'question.question_tokens', 'paragraphs': 'pars', 'first_label': '(0)', 'second_label': '(0)', 'question_id': 'question.question_id', 'q_type': 'question.q_type', 'sentence_segments': 'segs'}), '(question=question.question_tokens,\n paragraphs=pars, first_label=0, second_label=0, question_id=question.\n question_id, q_type=question.q_type, sentence_segments=segs)\n', (15392, 15566), False, 'from hotpot.data_handling.relevance_training_data import BinaryQuestionAndParagraphs, IterativeQuestionAndParagraphs\n'), ((16167, 16442), 'hotpot.data_handling.relevance_training_data.IterativeQuestionAndParagraphs', 'IterativeQuestionAndParagraphs', ([], {'question': 'question.question_tokens', 'paragraphs': '[x for x in selected_q.paragraphs]', 'first_label': '(0)', 'second_label': '(0)', 'question_id': 'question.question_id', 'q_type': 'question.q_type', 'sentence_segments': '[x for x in selected_q.sentence_segments]'}), '(question=question.question_tokens,\n paragraphs=[x for x in selected_q.paragraphs], first_label=0,\n second_label=0, question_id=question.question_id, q_type=question.\n q_type, sentence_segments=[x for x in selected_q.sentence_segments])\n', (16197, 16442), False, 'from hotpot.data_handling.relevance_training_data import BinaryQuestionAndParagraphs, IterativeQuestionAndParagraphs\n'), ((19666, 19698), 'numpy.random.shuffle', 'np.random.shuffle', (['epoch_samples'], {}), '(epoch_samples)\n', (19683, 19698), True, 'import numpy as np\n'), ((19889, 19922), 'numpy.random.shuffle', 'np.random.shuffle', (['gold_questions'], {}), '(gold_questions)\n', (19906, 19922), True, 'import numpy as np\n'), ((21206, 21366), 'hotpot.data_handling.dataset.QuestionAndParagraphsSpec', 'QuestionAndParagraphsSpec', ([], {'batch_size': 'batch_size', 'max_num_contexts': 'num_contexts', 'max_num_question_words': 'max_q_words', 'max_num_context_words': 'max_c_words'}), '(batch_size=batch_size, max_num_contexts=\n num_contexts, max_num_question_words=max_q_words, max_num_context_words\n =max_c_words)\n', (21231, 21366), False, 'from hotpot.data_handling.dataset import ListBatcher, Dataset, QuestionAndParagraphsSpec, QuestionAndParagraphsDataset, Preprocessor, SampleFilter, TrainingDataHandler\n'), ((21709, 21718), 'collections.Counter', 'Counter', ([], {}), '()\n', (21716, 21718), False, 'from collections import Counter\n'), ((25661, 25821), 'hotpot.data_handling.dataset.QuestionAndParagraphsSpec', 'QuestionAndParagraphsSpec', ([], {'batch_size': 'batch_size', 'max_num_contexts': 'num_contexts', 'max_num_question_words': 'max_q_words', 'max_num_context_words': 'max_c_words'}), '(batch_size=batch_size, max_num_contexts=\n num_contexts, max_num_question_words=max_q_words, max_num_context_words\n =max_c_words)\n', (25686, 25821), False, 'from hotpot.data_handling.dataset import ListBatcher, Dataset, QuestionAndParagraphsSpec, QuestionAndParagraphsDataset, Preprocessor, SampleFilter, TrainingDataHandler\n'), ((26164, 26173), 'collections.Counter', 'Counter', ([], {}), '()\n', (26171, 26173), False, 'from collections import Counter\n'), ((28415, 28575), 'hotpot.data_handling.dataset.QuestionAndParagraphsSpec', 'QuestionAndParagraphsSpec', ([], {'batch_size': 'batch_size', 'max_num_contexts': 'num_contexts', 'max_num_question_words': 'max_q_words', 'max_num_context_words': 'max_c_words'}), '(batch_size=batch_size, max_num_contexts=\n num_contexts, max_num_question_words=max_q_words, max_num_context_words\n =max_c_words)\n', (28440, 28575), False, 'from hotpot.data_handling.dataset import ListBatcher, Dataset, QuestionAndParagraphsSpec, QuestionAndParagraphsDataset, Preprocessor, SampleFilter, TrainingDataHandler\n'), ((28918, 28927), 'collections.Counter', 'Counter', ([], {}), '()\n', (28925, 28927), False, 'from collections import Counter\n'), ((13301, 13343), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['first_gold_par.sentences'], {}), '(first_gold_par.sentences)\n', (13317, 13343), False, 'from hotpot.utils import flatten_iterable\n'), ((13345, 13381), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['rand_par.sentences'], {}), '(rand_par.sentences)\n', (13361, 13381), False, 'from hotpot.utils import flatten_iterable\n'), ((14245, 14274), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['x.sentences'], {}), '(x.sentences)\n', (14261, 14274), False, 'from hotpot.utils import flatten_iterable\n'), ((15202, 15231), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['x.sentences'], {}), '(x.sentences)\n', (15218, 15231), False, 'from hotpot.utils import flatten_iterable\n'), ((34333, 34364), 'copy.deepcopy', 'copy.deepcopy', (['self.dev_batcher'], {}), '(self.dev_batcher)\n', (34346, 34364), False, 'import copy\n'), ((1993, 2049), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['question.supporting_facts[0].sentences'], {}), '(question.supporting_facts[0].sentences)\n', (2009, 2049), False, 'from hotpot.utils import flatten_iterable\n'), ((2071, 2127), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['question.supporting_facts[1].sentences'], {}), '(question.supporting_facts[1].sentences)\n', (2087, 2127), False, 'from hotpot.utils import flatten_iterable\n'), ((2199, 2347), 'hotpot.data_handling.relevance_training_data.BinaryQuestionAndParagraphs', 'BinaryQuestionAndParagraphs', (['question.question_tokens', 'pars', '(1)'], {'num_distractors': '(0)', 'question_id': 'question.question_id', 'q_type': 'question.q_type'}), '(question.question_tokens, pars, 1,\n num_distractors=0, question_id=question.question_id, q_type=question.q_type\n )\n', (2226, 2347), False, 'from hotpot.data_handling.relevance_training_data import BinaryQuestionAndParagraphs, IterativeQuestionAndParagraphs\n'), ((3171, 3200), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['x.sentences'], {}), '(x.sentences)\n', (3187, 3200), False, 'from hotpot.utils import flatten_iterable\n'), ((3396, 3452), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['question.supporting_facts[0].sentences'], {}), '(question.supporting_facts[0].sentences)\n', (3412, 3452), False, 'from hotpot.utils import flatten_iterable\n'), ((3506, 3562), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['question.supporting_facts[1].sentences'], {}), '(question.supporting_facts[1].sentences)\n', (3522, 3562), False, 'from hotpot.utils import flatten_iterable\n'), ((3734, 3894), 'hotpot.data_handling.relevance_training_data.BinaryQuestionAndParagraphs', 'BinaryQuestionAndParagraphs', (['question.question_tokens', 'true_and_false_1', '(0)'], {'num_distractors': '(1)', 'question_id': 'question.question_id', 'q_type': 'question.q_type'}), '(question.question_tokens, true_and_false_1, 0,\n num_distractors=1, question_id=question.question_id, q_type=question.q_type\n )\n', (3761, 3894), False, 'from hotpot.data_handling.relevance_training_data import BinaryQuestionAndParagraphs, IterativeQuestionAndParagraphs\n'), ((3981, 4141), 'hotpot.data_handling.relevance_training_data.BinaryQuestionAndParagraphs', 'BinaryQuestionAndParagraphs', (['question.question_tokens', 'true_and_false_2', '(0)'], {'num_distractors': '(1)', 'question_id': 'question.question_id', 'q_type': 'question.q_type'}), '(question.question_tokens, true_and_false_2, 0,\n num_distractors=1, question_id=question.question_id, q_type=question.q_type\n )\n', (4008, 4141), False, 'from hotpot.data_handling.relevance_training_data import BinaryQuestionAndParagraphs, IterativeQuestionAndParagraphs\n'), ((10633, 10689), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['question.supporting_facts[i].sentences'], {}), '(question.supporting_facts[i].sentences)\n', (10649, 10689), False, 'from hotpot.utils import flatten_iterable\n'), ((10861, 11055), 'hotpot.data_handling.relevance_training_data.IterativeQuestionAndParagraphs', 'IterativeQuestionAndParagraphs', (['question.question_tokens', 'pars'], {'first_label': '(1)', 'second_label': '(1)', 'question_id': 'question.question_id', 'q_type': 'question.q_type', 'sentence_segments': 'sentence_segs'}), '(question.question_tokens, pars, first_label=\n 1, second_label=1, question_id=question.question_id, q_type=question.\n q_type, sentence_segments=sentence_segs)\n', (10891, 11055), False, 'from hotpot.data_handling.relevance_training_data import BinaryQuestionAndParagraphs, IterativeQuestionAndParagraphs\n'), ((25203, 25259), 'numpy.random.choice', 'np.random.choice', (['self.samples', 'n_samples'], {'replace': '(False)'}), '(self.samples, n_samples, replace=False)\n', (25219, 25259), True, 'import numpy as np\n'), ((27957, 28013), 'numpy.random.choice', 'np.random.choice', (['self.samples', 'n_samples'], {'replace': '(False)'}), '(self.samples, n_samples, replace=False)\n', (27973, 28013), True, 'import numpy as np\n'), ((5016, 5187), 'hotpot.data_handling.relevance_training_data.BinaryQuestionAndParagraphs', 'BinaryQuestionAndParagraphs', (['question.question_tokens', 'selected_q.paragraphs'], {'label': '(0)', 'num_distractors': '(2)', 'question_id': 'question.question_id', 'q_type': 'question.q_type'}), '(question.question_tokens, selected_q.paragraphs,\n label=0, num_distractors=2, question_id=question.question_id, q_type=\n question.q_type)\n', (5043, 5187), False, 'from hotpot.data_handling.relevance_training_data import BinaryQuestionAndParagraphs, IterativeQuestionAndParagraphs\n'), ((6408, 6440), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['para.sentences'], {}), '(para.sentences)\n', (6424, 6440), False, 'from hotpot.utils import flatten_iterable\n'), ((6687, 6719), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['para.sentences'], {}), '(para.sentences)\n', (6703, 6719), False, 'from hotpot.utils import flatten_iterable\n'), ((21608, 21640), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['para.sentences'], {}), '(para.sentences)\n', (21624, 21640), False, 'from hotpot.utils import flatten_iterable\n'), ((21887, 21919), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['para.sentences'], {}), '(para.sentences)\n', (21903, 21919), False, 'from hotpot.utils import flatten_iterable\n'), ((26063, 26095), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['para.sentences'], {}), '(para.sentences)\n', (26079, 26095), False, 'from hotpot.utils import flatten_iterable\n'), ((26342, 26374), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['para.sentences'], {}), '(para.sentences)\n', (26358, 26374), False, 'from hotpot.utils import flatten_iterable\n'), ((28817, 28849), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['para.sentences'], {}), '(para.sentences)\n', (28833, 28849), False, 'from hotpot.utils import flatten_iterable\n'), ((29096, 29128), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['para.sentences'], {}), '(para.sentences)\n', (29112, 29128), False, 'from hotpot.utils import flatten_iterable\n'), ((29483, 29514), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['par.sentences'], {}), '(par.sentences)\n', (29499, 29514), False, 'from hotpot.utils import flatten_iterable\n'), ((29657, 29688), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['par.sentences'], {}), '(par.sentences)\n', (29673, 29688), False, 'from hotpot.utils import flatten_iterable\n'), ((4266, 4295), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['x.sentences'], {}), '(x.sentences)\n', (4282, 4295), False, 'from hotpot.utils import flatten_iterable\n'), ((27327, 27357), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['p1.sentences'], {}), '(p1.sentences)\n', (27343, 27357), False, 'from hotpot.utils import flatten_iterable\n'), ((27408, 27438), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['p2.sentences'], {}), '(p2.sentences)\n', (27424, 27438), False, 'from hotpot.utils import flatten_iterable\n'), ((24100, 24130), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['p1.sentences'], {}), '(p1.sentences)\n', (24116, 24130), False, 'from hotpot.utils import flatten_iterable\n'), ((24210, 24240), 'hotpot.utils.flatten_iterable', 'flatten_iterable', (['p2.sentences'], {}), '(p2.sentences)\n', (24226, 24240), False, 'from hotpot.utils import flatten_iterable\n')] |
import numpy as np
def machine_learning_TI(x_train, y_train, x_test, y_test, mode, TI_test):
if len(x_train.shape) == 1:
y_train = np.array(y_train).reshape(-1, 1).ravel()
y_test = np.array(y_test).reshape(-1, 1).ravel()
x_train = np.array(x_train).reshape(-1, 1)
x_test = np.array(x_test).reshape(-1, 1)
TI_test = np.array(TI_test).reshape(-1, 1)
if len(x_train.shape) != 1 and x_train.shape[1] == 1:
y_train = np.array(y_train).reshape(-1, 1).ravel()
y_test = np.array(y_test).reshape(-1, 1).ravel()
x_train = np.array(x_train).reshape(-1, 1)
x_test = np.array(x_test).reshape(-1, 1)
TI_test = np.array(TI_test).reshape(-1, 1)
else:
y_train = np.array(y_train)
y_test = np.array(y_test)
x_train = np.array(x_train)
x_test = np.array(x_test)
TI_test = np.array(TI_test)
if "RF" in mode:
from sklearn.ensemble import RandomForestRegressor
rfc_new = RandomForestRegressor(random_state=42, n_estimators=100)
# rfc_new = RandomForestRegressor(random_state=42,max_features=2,n_estimators=100)
rfc_new = rfc_new.fit(x_train, y_train.ravel())
TI_pred = rfc_new.predict(x_test)
if "SVR" in mode:
from sklearn.svm import SVR
clf = SVR(C=1.0, epsilon=0.2, kernel="poly", degree=2)
clf.fit(x_train, y_train)
TI_pred = clf.predict(x_test)
if "MARS" in mode:
from pyearth import Earth
MARS_model = Earth()
# MARS_model = Earth(max_terms=8,max_degree=2)
MARS_model.fit(x_test, y_test)
TI_pred[mask2] = MARS_model.predict(x_test)
print(MARS_model.summary())
if "NN" in mode:
# import stuff
NN_model = None
return TI_pred
| [
"pyearth.Earth",
"sklearn.svm.SVR",
"numpy.array",
"sklearn.ensemble.RandomForestRegressor"
] | [((748, 765), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (756, 765), True, 'import numpy as np\n'), ((783, 799), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (791, 799), True, 'import numpy as np\n'), ((818, 835), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (826, 835), True, 'import numpy as np\n'), ((853, 869), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (861, 869), True, 'import numpy as np\n'), ((888, 905), 'numpy.array', 'np.array', (['TI_test'], {}), '(TI_test)\n', (896, 905), True, 'import numpy as np\n'), ((1006, 1062), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'random_state': '(42)', 'n_estimators': '(100)'}), '(random_state=42, n_estimators=100)\n', (1027, 1062), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((1326, 1374), 'sklearn.svm.SVR', 'SVR', ([], {'C': '(1.0)', 'epsilon': '(0.2)', 'kernel': '"""poly"""', 'degree': '(2)'}), "(C=1.0, epsilon=0.2, kernel='poly', degree=2)\n", (1329, 1374), False, 'from sklearn.svm import SVR\n'), ((1527, 1534), 'pyearth.Earth', 'Earth', ([], {}), '()\n', (1532, 1534), False, 'from pyearth import Earth\n'), ((262, 279), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (270, 279), True, 'import numpy as np\n'), ((312, 328), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (320, 328), True, 'import numpy as np\n'), ((362, 379), 'numpy.array', 'np.array', (['TI_test'], {}), '(TI_test)\n', (370, 379), True, 'import numpy as np\n'), ((587, 604), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (595, 604), True, 'import numpy as np\n'), ((637, 653), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (645, 653), True, 'import numpy as np\n'), ((687, 704), 'numpy.array', 'np.array', (['TI_test'], {}), '(TI_test)\n', (695, 704), True, 'import numpy as np\n'), ((146, 163), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (154, 163), True, 'import numpy as np\n'), ((204, 220), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (212, 220), True, 'import numpy as np\n'), ((471, 488), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (479, 488), True, 'import numpy as np\n'), ((529, 545), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (537, 545), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function
import os, sys, shutil
import tempfile
import subprocess
import numpy as np
from .linelists import TSLineList, get_default_linelist
from .marcs import MARCSModel, interp_atmosphere
from . import utils
_lpoint_max = 1000000 # hardcoded into turbospectrum spectrum.inc file, adopt this to match
_ERASESTR= " "
_TURBO_DIR_ = '/home/alexji/lib/Turbospectrum2019/exec-v19.1' #46s on a 5000-5500A test
#_TURBO_DIR_ = '/home/alexji/lib/Turbospectrum2019/exec-gf-v19.1' #77s on a 5000-5500A test
_TURBO_DIR_ = '/home/alexji/lib/Turbospectrum2019-ie/exec-v19.1-ie' #47s on a 5000-5500A test
def run_synth(wmin, wmax, dwl, *args,
linelist=None,
atmosphere=None,
Teff=None, logg=None, MH=None, vt=None,
aFe=None, CFe=None, NFe=None, rFe=None, sFe=None,
modelopac=None,
outfname=None, twd=None, verbose=False,
costheta=1.0,isotopes={}, marcsfile=True,
Hlinelist=None,debug=False
):
"""
Run a turbospectrum synthesis.
Based heavily on https://github.com/jobovy/apogee/blob/master/apogee/modelspec/turbospec.py
From Nov 2020
INPUT ARGUMENTS:
wmin, wmax, dwl: which wavelength range and step to run
lists with abundances: e.g. (6, 0.0), (8, 1.0), (20, 0.4)
[Atomic number1,diff1]
[Atomic number2,diff2]
...
[Atomic numberM,diffM]
SYNTHEIS KEYWORDS:
isotopes= ('solar') use 'solar' or 'arcturus' isotope ratios;
can also be a dictionary with isotope ratios (e.g., isotopes= {'6.012':'0.9375','6.013':'0.0625'})
costheta= (1.) cosine of the viewing angle
ATMOSPHERE KEYWORDS:
atmosphere: either a string pointing to the MARCS model file, or a MARCS model object
Teff, logg, MH, vt: can specify all of these 4 parameters to call interp_atmosphere
aFe, CFe, NFe, rFe, sFe: can add these to interp_atmosphere if desired
modelopac= (None)
(a) if set to an existing filename: assume babsma_lu has already been run and use this continuous opacity in bsyn_lu
(b) if set to a non-existing filename: store the continuous opacity in this file
marcsfile= True
Note: you currently need <NAME>'s version of Turbospectrum to parse the interpolated atmospheres
https://github.com/iaescala/Turbospectrum2019
LINELIST KEYWORDS:
air= (True) if True, perform the synthesis in air wavelengths (affects the default Hlinelist, nothing else; output is in air if air, vacuum otherwise); set to False at your own risk, as Turbospectrum expects the linelist in air wavelengths!)
Hlinelist= (None) Hydrogen linelists to use; can be set to the path of a linelist file or to the name of an APOGEE linelist; if None, then we first search for the Hlinedata.vac in the APOGEE linelist directory (if air=False) or we use the internal Turbospectrum Hlinelist (if air=True)
linelist= (None) molecular and atomic linelists to use; can be set to the path of a linelist file or to the name of an APOGEE linelist, or lists of such files; if a single filename is given, the code will first search for files with extensions '.atoms', '.molec' or that start with 'turboatoms.' and 'turbomolec.'
OUTPUT:
(wavelengths,cont-norm. spectrum, spectrum (nwave))
if keyword outfname is set to a path:
save the output of bsyn_lu (spectrum) to outfname
"""
if debug: verbose=True
Nwl = np.ceil((wmax-wmin)/dwl)
if Nwl >= _lpoint_max:
raise ValueError(f"Trying to synthesize {Nwl} >= {_lpoint_max} wavelength points")
## working directory
if twd is None:
twd = tempfile.mkdtemp(dir=os.getcwd()+"/tmp")
## Linelist
if linelist is None:
linelist = get_default_linelist(wmin, wmax)
elif isinstance(linelist, str):
linelist = TSLineList(os.path.abspath(linelist))
else:
assert isinstance(linelist, TSLineList)
linelistfilenames = [linelist.get_fname()]
rmLinelists = False
# Link the Turbospectrum DATA directory
if not os.path.exists(os.path.join(twd, 'DATA')):
os.symlink(os.getenv('TURBODATA'),os.path.join(twd,'DATA'))
if Hlinelist is None:
Hlinelist = 'DATA/Hlinedata'
linelistfilenames.append(Hlinelist)
if isinstance(isotopes,str) and isotopes.lower() == 'solar':
isotopes= {}
elif isinstance(isotopes,str) and isotopes.lower() == 'arcturus':
isotopes= {'6.012':'0.9375',
'6.013':'0.0625'}
elif not isinstance(isotopes,dict):
raise ValueError("'isotopes=' input not understood, should be 'solar', 'arcturus', or a dictionary")
## Stellar atmosphere
## I have a bunch of random wrapping to make extensions easier for the future
## But it's overkill for now
if atmosphere is not None:
# The MARCS models need you to set vt separately
assert vt is not None, vt
if isinstance(atmosphere, str):
atmosphere = MARCSModel.load(atmosphere)
assert isinstance(atmosphere, MARCSModel)
atmosphere.vt = vt
Teff, logg, MH, aFe = atmosphere.Teff, atmosphere.logg, \
atmosphere.MH, atmosphere.AM
else:
# APJ Not tested but should work..
assert Teff is not None, Teff
assert logg is not None, logg
assert MH is not None, MH
assert vt is not None, vt
atmosphere = interp_atmosphere(Teff, logg, MH, vt,
aFe, twd)
atmosphere = MARCSModel.load(atmosphere)
#atmosphere.writeto(os.path.join(twd, 'atm.mod'))
modelfilename = atmosphere.get_fname()
## Abundances
abundances = validate_abundances(list(args), atmosphere.MH)
if modelopac is None or \
(isinstance(modelopac,str) and not os.path.exists(modelopac)):
# Now write the script file for babsma_lu
scriptfilename= os.path.join(twd,'babsma.par')
modelopacname= os.path.join(twd,'mopac')
_write_script(scriptfilename,
wmin,wmax,dwl,
None,
modelfilename,
marcsfile,
modelopacname,
atmosphere.MH,
atmosphere.AM,
abundances,
atmosphere.vt,
atmosphere.spherical,
None,None,None,bsyn=False)
# Run babsma
sys.stdout.write('\r'+"Running Turbospectrum babsma_lu ...\r")
sys.stdout.flush()
if verbose:
stdout= None
stderr= None
else:
stdout= open('/dev/null', 'w')
stderr= subprocess.STDOUT
try:
p= subprocess.Popen([os.path.join(_TURBO_DIR_, 'babsma_lu')],
cwd=twd,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr)
with open(os.path.join(twd,'babsma.par'),'r') as parfile:
for line in parfile:
p.stdin.write(line.encode('utf-8'))
stdout, stderr= p.communicate()
except subprocess.CalledProcessError:
for linelistfilename in linelistfilenames:
os.remove(linelistfilename,twd)
if os.path.exists(os.path.join(twd,'DATA')):
os.remove(os.path.join(twd,'DATA'))
raise RuntimeError("Running babsma_lu failed ...")
finally:
#if os.path.exists(os.path.join(twd,'babsma.par')) \
# and outfname is None: #not 'saveTurboInput' in kwargs:
# os.remove(os.path.join(twd,'babsma.par'))
sys.stdout.write('\r'+_ERASESTR+'\r')
sys.stdout.flush()
if isinstance(modelopac,str):
shutil.copy(modelopacname,modelopac)
else:
shutil.copy(modelopac,twd)
modelopacname= os.path.join(twd,os.path.basename(modelopac))
# Now write the script file for bsyn_lu
scriptfilename= os.path.join(twd,'bsyn.par')
outfilename= os.path.join(twd,'bsyn.out')
_write_script(scriptfilename,
wmin,wmax,dwl,
costheta,
modelfilename,
marcsfile,
modelopacname,
atmosphere.MH,
atmosphere.AM,
abundances, #indiv_abu,
None,
atmosphere.spherical,
outfilename,
isotopes,
linelistfilenames,
bsyn=True)
# Run bsyn
sys.stdout.write('\r'+"Running Turbospectrum bsyn_lu ...\r")
sys.stdout.flush()
if verbose:
stdout= None
stderr= None
else:
stdout= open('/dev/null', 'w')
stderr= subprocess.STDOUT
try:
p= subprocess.Popen([os.path.join(_TURBO_DIR_, 'bsyn_lu')],
cwd=twd,
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr)
with open(os.path.join(twd,'bsyn.par'),'r') as parfile:
for line in parfile:
p.stdin.write(line.encode('utf-8'))
stdout, stderr= p.communicate()
except subprocess.CalledProcessError:
raise RuntimeError("Running bsyn_lu failed ...")
finally:
if outfname is not None:
turbosavefilename= outfname
if os.path.dirname(turbosavefilename) == '':
turbosavefilename= os.path.join(os.getcwd(),turbosavefilename)
try:
subprocess.check_call(['tar','cvzf',turbosavefilename,
os.path.basename(os.path.normpath(twd))])
except subprocess.CalledProcessError:
raise RuntimeError("Tar-zipping the Turbospectrum input and output failed; you will have to manually delete the temporary directory ...")
# # Need to remove babsma.par, bc not removed above
# if os.path.exists(os.path.join(twd,'babsma.par')):
# os.remove(os.path.join(twd,'babsma.par'))
#if os.path.exists(os.path.join(twd,'bsyn.par')):
# os.remove(os.path.join(twd,'bsyn.par'))
#if os.path.exists(modelopacname):
# os.remove(modelopacname)
#if os.path.exists(modelopacname+'.mod'):
# os.remove(modelopacname+'.mod')
#if os.path.exists(os.path.join(twd,'DATA')):
# os.remove(os.path.join(twd,'DATA'))
#if os.path.exists(os.path.join(twd,'dummy-output.dat')):
# os.remove(os.path.join(twd,'dummy-output.dat'))
#if os.path.exists(modelfilename):
# os.remove(modelfilename)
#if rmLinelists:
# for linelistfilename in linelistfilenames[1:]:
# os.remove(linelistfilename)
sys.stdout.write('\r'+_ERASESTR+'\r')
sys.stdout.flush()
# Now read the output
turboOut= np.loadtxt(outfilename)
# Clean up
#if not debug:
# os.remove(outfilename)
# os.rmdir(twd)
# Return wav, cont-norm, full spectrum
return (turboOut[:,0],turboOut[:,1],turboOut[:,2])
def validate_abundances(abundances, MH):
""" Input is format [(Z1, XFe1), (Z2, XFe2), ...] """
assert isinstance(abundances, list), abundances
if len(abundances) == 0: return {26: MH + utils.get_solar(26)}
Zs = []
XFes = []
for Z,XFe in abundances:
Z = int(Z)
assert (Z >= 3) & (Z <= 92), Z
Zs.append(Z)
XFe = np.round(float(XFe),3)
assert (XFe >= -9) & (XFe <= 9), XFe
XFes.append(XFe)
# From Jo: Make sure to adjust C for the atmosphere's C value, by definitely including it (#61)
if 6 not in Zs: abundances.append([6,0.])
# Add N as well I guess
if 7 not in Zs: abundances.append([7,0.])
new_abundances = {}
for Z, XFe in zip(Zs, XFes):
new_abundances[Z] = XFe + MH + utils.get_solar(Z)
return new_abundances
def _write_script(scriptfilename,
wmin,wmax,dw,
costheta,
modelfilename,
marcsfile,
modelopacname,
metals,
alphafe,
indiv_abu, # dictionary with atomic number, abundance
vmicro,
spherical,
resultfilename,
isotopes,
linelistfilenames,
bsyn=False):
"""Write the script file for babsma and bsyn"""
with open(scriptfilename,'w') as scriptfile:
scriptfile.write("'LAMBDA_MIN:' '%.3f'\n" % wmin)
scriptfile.write("'LAMBDA_MAX:' '%.3f'\n" % wmax)
scriptfile.write("'LAMBDA_STEP:' '%.3f'\n" % dw)
if bsyn:
scriptfile.write("'INTENSITY/FLUX:' 'Flux'\n")
scriptfile.write("'COS(THETA) :' '%.3f'\n" % costheta)
scriptfile.write("'ABFIND :' '.false.'\n")
if not bsyn:
scriptfile.write("'MODELINPUT:' '%s'\n" % modelfilename)
if marcsfile:
scriptfile.write("'MARCS-FILE:' '.true.'\n")
else:
scriptfile.write("'MARCS-FILE:' '.false.'\n")
scriptfile.write("'MODELOPAC:' '%s'\n" % modelopacname)
if bsyn:
scriptfile.write("'RESULTFILE :' '%s'\n"
% resultfilename)
scriptfile.write("'METALLICITY:' '%.3f'\n" % metals)
scriptfile.write("'ALPHA/Fe :' '%.3f'\n" % alphafe)
scriptfile.write("'HELIUM :' '0.00'\n")
scriptfile.write("'R-PROCESS :' '0.00'\n")
scriptfile.write("'S-PROCESS :' '0.00'\n")
# Individual abundances
nabu= len(indiv_abu)
if nabu > 0:
scriptfile.write("'INDIVIDUAL ABUNDANCES:' '%i'\n" % nabu)
for abu in indiv_abu:
scriptfile.write("%i %.3f\n" % (abu,indiv_abu[abu]))
if bsyn:
niso= len(isotopes)
if niso > 0:
scriptfile.write("'ISOTOPES : ' '%i'\n" % niso)
for iso in isotopes:
scriptfile.write('%s %s\n' % (iso,isotopes[iso]))
# Linelists
nlines= len(linelistfilenames)
scriptfile.write("'NFILES :' '%i'\n" % nlines)
for linelistfilename in linelistfilenames:
scriptfile.write("%s\n" % linelistfilename)
if spherical:
scriptfile.write("'SPHERICAL:' 'T'\n")
else:
scriptfile.write("'SPHERICAL:' 'F'\n")
scriptfile.write("30\n")
scriptfile.write("300.00\n")
scriptfile.write("15\n")
scriptfile.write("1.30\n")
else:
scriptfile.write("'XIFIX:' 'T'\n")
scriptfile.write("%.3f\n" % vmicro)
return None
| [
"sys.stdout.write",
"os.path.abspath",
"os.remove",
"numpy.ceil",
"os.path.basename",
"os.getcwd",
"os.path.dirname",
"os.path.exists",
"sys.stdout.flush",
"numpy.loadtxt",
"os.path.normpath",
"os.path.join",
"os.getenv",
"shutil.copy"
] | [((3671, 3699), 'numpy.ceil', 'np.ceil', (['((wmax - wmin) / dwl)'], {}), '((wmax - wmin) / dwl)\n', (3678, 3699), True, 'import numpy as np\n'), ((8316, 8345), 'os.path.join', 'os.path.join', (['twd', '"""bsyn.par"""'], {}), "(twd, 'bsyn.par')\n", (8328, 8345), False, 'import os, sys, shutil\n'), ((8362, 8391), 'os.path.join', 'os.path.join', (['twd', '"""bsyn.out"""'], {}), "(twd, 'bsyn.out')\n", (8374, 8391), False, 'import os, sys, shutil\n'), ((8897, 8959), 'sys.stdout.write', 'sys.stdout.write', (["('\\r' + 'Running Turbospectrum bsyn_lu ...\\r')"], {}), "('\\r' + 'Running Turbospectrum bsyn_lu ...\\r')\n", (8913, 8959), False, 'import os, sys, shutil\n'), ((8962, 8980), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8978, 8980), False, 'import os, sys, shutil\n'), ((11284, 11307), 'numpy.loadtxt', 'np.loadtxt', (['outfilename'], {}), '(outfilename)\n', (11294, 11307), True, 'import numpy as np\n'), ((6140, 6171), 'os.path.join', 'os.path.join', (['twd', '"""babsma.par"""'], {}), "(twd, 'babsma.par')\n", (6152, 6171), False, 'import os, sys, shutil\n'), ((6194, 6220), 'os.path.join', 'os.path.join', (['twd', '"""mopac"""'], {}), "(twd, 'mopac')\n", (6206, 6220), False, 'import os, sys, shutil\n'), ((6697, 6761), 'sys.stdout.write', 'sys.stdout.write', (["('\\r' + 'Running Turbospectrum babsma_lu ...\\r')"], {}), "('\\r' + 'Running Turbospectrum babsma_lu ...\\r')\n", (6713, 6761), False, 'import os, sys, shutil\n'), ((6768, 6786), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6784, 6786), False, 'import os, sys, shutil\n'), ((8155, 8182), 'shutil.copy', 'shutil.copy', (['modelopac', 'twd'], {}), '(modelopac, twd)\n', (8166, 8182), False, 'import os, sys, shutil\n'), ((11178, 11219), 'sys.stdout.write', 'sys.stdout.write', (["('\\r' + _ERASESTR + '\\r')"], {}), "('\\r' + _ERASESTR + '\\r')\n", (11194, 11219), False, 'import os, sys, shutil\n'), ((11224, 11242), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11240, 11242), False, 'import os, sys, shutil\n'), ((4301, 4326), 'os.path.join', 'os.path.join', (['twd', '"""DATA"""'], {}), "(twd, 'DATA')\n", (4313, 4326), False, 'import os, sys, shutil\n'), ((4348, 4370), 'os.getenv', 'os.getenv', (['"""TURBODATA"""'], {}), "('TURBODATA')\n", (4357, 4370), False, 'import os, sys, shutil\n'), ((4371, 4396), 'os.path.join', 'os.path.join', (['twd', '"""DATA"""'], {}), "(twd, 'DATA')\n", (4383, 4396), False, 'import os, sys, shutil\n'), ((7981, 8022), 'sys.stdout.write', 'sys.stdout.write', (["('\\r' + _ERASESTR + '\\r')"], {}), "('\\r' + _ERASESTR + '\\r')\n", (7997, 8022), False, 'import os, sys, shutil\n'), ((8031, 8049), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (8047, 8049), False, 'import os, sys, shutil\n'), ((8100, 8137), 'shutil.copy', 'shutil.copy', (['modelopacname', 'modelopac'], {}), '(modelopacname, modelopac)\n', (8111, 8137), False, 'import os, sys, shutil\n'), ((8222, 8249), 'os.path.basename', 'os.path.basename', (['modelopac'], {}), '(modelopac)\n', (8238, 8249), False, 'import os, sys, shutil\n'), ((4075, 4100), 'os.path.abspath', 'os.path.abspath', (['linelist'], {}), '(linelist)\n', (4090, 4100), False, 'import os, sys, shutil\n'), ((6038, 6063), 'os.path.exists', 'os.path.exists', (['modelopac'], {}), '(modelopac)\n', (6052, 6063), False, 'import os, sys, shutil\n'), ((9160, 9196), 'os.path.join', 'os.path.join', (['_TURBO_DIR_', '"""bsyn_lu"""'], {}), "(_TURBO_DIR_, 'bsyn_lu')\n", (9172, 9196), False, 'import os, sys, shutil\n'), ((9391, 9420), 'os.path.join', 'os.path.join', (['twd', '"""bsyn.par"""'], {}), "(twd, 'bsyn.par')\n", (9403, 9420), False, 'import os, sys, shutil\n'), ((9762, 9796), 'os.path.dirname', 'os.path.dirname', (['turbosavefilename'], {}), '(turbosavefilename)\n', (9777, 9796), False, 'import os, sys, shutil\n'), ((3895, 3906), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3904, 3906), False, 'import os, sys, shutil\n'), ((6998, 7036), 'os.path.join', 'os.path.join', (['_TURBO_DIR_', '"""babsma_lu"""'], {}), "(_TURBO_DIR_, 'babsma_lu')\n", (7010, 7036), False, 'import os, sys, shutil\n'), ((7251, 7282), 'os.path.join', 'os.path.join', (['twd', '"""babsma.par"""'], {}), "(twd, 'babsma.par')\n", (7263, 7282), False, 'import os, sys, shutil\n'), ((7553, 7585), 'os.remove', 'os.remove', (['linelistfilename', 'twd'], {}), '(linelistfilename, twd)\n', (7562, 7585), False, 'import os, sys, shutil\n'), ((7615, 7640), 'os.path.join', 'os.path.join', (['twd', '"""DATA"""'], {}), "(twd, 'DATA')\n", (7627, 7640), False, 'import os, sys, shutil\n'), ((9852, 9863), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9861, 9863), False, 'import os, sys, shutil\n'), ((7668, 7693), 'os.path.join', 'os.path.join', (['twd', '"""DATA"""'], {}), "(twd, 'DATA')\n", (7680, 7693), False, 'import os, sys, shutil\n'), ((10027, 10048), 'os.path.normpath', 'os.path.normpath', (['twd'], {}), '(twd)\n', (10043, 10048), False, 'import os, sys, shutil\n')] |
import numpy as np
import pandas as pd
import os
from download import download
import requests
import json
import time
start = time.time()
def distance():
'''
Cette fonction nous sert à créer le dataframe des distances
'''
# Chargement des données géographiques
url = 'https://raw.githubusercontent.com/Eldohrim/Project_2021_HAX712X/main/asltam/data/data_geo2.csv'
path = os.path.join(os.getcwd(), 'data_geo2.csv')
download(url, path, replace=False)
geo = pd.read_csv('./data_geo2.csv')
# Initialisation des coordonnées
latt = list(geo['Latt'])
long = list(geo['Long'])
coor = []
n = len(long)
for i in range(n):
coor.append([long[i], latt[i]])
# Création de la matrice des distances
D = np.zeros((n, n))
for i in range(n):
# La matrice est diagonale
# On ne construit donc que la diagonale supérieure
x, y = coor[i]
for j in range(i, n):
# On calcule les deux sens car la requête suit
# le sens de la route : on prend la distance la plus petite,
# qui sera la plus vraisemblable
x1, y1 = coor[j]
r1 = requests.get(f"http://router.project-osrm.org/route/v1/car/{x},{y};{x1},{y1}?overview=false""")
routes_1 = json.loads(r1.content)
route_1 = routes_1.get("routes")[0]
r2 = requests.get(f"http://router.project-osrm.org/route/v1/car/{x1},{y1};{x},{y}?overview=false""")
routes_2 = json.loads(r2.content)
route_2 = routes_2.get("routes")[0]
D[i, j] = min(round(route_1['distance']/1000),
round(route_2['distance']/1000))
# On symétrise la matrice pour avoir le tableau de données complet
Df = D + D.T
# Création du dataframe
dist = pd.DataFrame(Df)
dist.rename(columns=geo[' Nom gare '], inplace=True)
dist.set_index(geo[' Nom gare '], inplace=True)
dist.to_csv('./data_dist.csv')
end = time.time()
print("Temps passé pour exécuter distance: {0:.5f} s.".format(end - start))
| [
"pandas.DataFrame",
"json.loads",
"pandas.read_csv",
"os.getcwd",
"numpy.zeros",
"time.time",
"requests.get",
"download.download"
] | [((129, 140), 'time.time', 'time.time', ([], {}), '()\n', (138, 140), False, 'import time\n'), ((1988, 1999), 'time.time', 'time.time', ([], {}), '()\n', (1997, 1999), False, 'import time\n'), ((445, 479), 'download.download', 'download', (['url', 'path'], {'replace': '(False)'}), '(url, path, replace=False)\n', (453, 479), False, 'from download import download\n'), ((490, 520), 'pandas.read_csv', 'pd.read_csv', (['"""./data_geo2.csv"""'], {}), "('./data_geo2.csv')\n", (501, 520), True, 'import pandas as pd\n'), ((764, 780), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (772, 780), True, 'import numpy as np\n'), ((1819, 1835), 'pandas.DataFrame', 'pd.DataFrame', (['Df'], {}), '(Df)\n', (1831, 1835), True, 'import pandas as pd\n'), ((411, 422), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (420, 422), False, 'import os\n'), ((1175, 1278), 'requests.get', 'requests.get', (['f"""http://router.project-osrm.org/route/v1/car/{x},{y};{x1},{y1}?overview=false"""'], {}), "(\n f'http://router.project-osrm.org/route/v1/car/{x},{y};{x1},{y1}?overview=false'\n )\n", (1187, 1278), False, 'import requests\n'), ((1294, 1316), 'json.loads', 'json.loads', (['r1.content'], {}), '(r1.content)\n', (1304, 1316), False, 'import json\n'), ((1382, 1485), 'requests.get', 'requests.get', (['f"""http://router.project-osrm.org/route/v1/car/{x1},{y1};{x},{y}?overview=false"""'], {}), "(\n f'http://router.project-osrm.org/route/v1/car/{x1},{y1};{x},{y}?overview=false'\n )\n", (1394, 1485), False, 'import requests\n'), ((1501, 1523), 'json.loads', 'json.loads', (['r2.content'], {}), '(r2.content)\n', (1511, 1523), False, 'import json\n')] |
import numpy as np
import perfplot
try:
import cartesio as cs
except ImportError:
import os
import sys
sys.path.insert(
0, os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
)
import cartesio as cs
# noinspection PyUnresolvedReferences
def run_perf_nms():
np.random.seed(0)
n_range = [2 ** k for k in range(10)]
def setup(n: int) -> np.ndarray:
bbs = cs.bbox.utils.random(n)
return bbs
perfplot.show(
setup=setup,
kernels=[
lambda a: cs.bbox.nms(a, thresh=0.85),
lambda a: cs.bbox.nms.py_func(a, thresh=0.85),
],
labels=[
"nms",
"nms_pyfunc",
],
n_range=n_range,
xlabel="#bbs",
logx=True,
logy=True,
title="NMS performance",
equality_check=np.allclose,
time_unit="ms",
)
if __name__ == "__main__":
run_perf_nms()
| [
"cartesio.bbox.utils.random",
"numpy.random.seed",
"cartesio.bbox.nms.py_func",
"cartesio.bbox.nms",
"os.path.dirname"
] | [((323, 340), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (337, 340), True, 'import numpy as np\n'), ((436, 459), 'cartesio.bbox.utils.random', 'cs.bbox.utils.random', (['n'], {}), '(n)\n', (456, 459), True, 'import cartesio as cs\n'), ((560, 587), 'cartesio.bbox.nms', 'cs.bbox.nms', (['a'], {'thresh': '(0.85)'}), '(a, thresh=0.85)\n', (571, 587), True, 'import cartesio as cs\n'), ((611, 646), 'cartesio.bbox.nms.py_func', 'cs.bbox.nms.py_func', (['a'], {'thresh': '(0.85)'}), '(a, thresh=0.85)\n', (630, 646), True, 'import cartesio as cs\n'), ((197, 222), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (212, 222), False, 'import os\n')] |
import pandas as pd
import numpy as np
from sklearn import preprocessing
##
class MultiColomnLabelEncoder:
##
def __init__(self):
self.dataTypes = {}
self.__catColumns = []
self.__MultiLE = {}
## Later, self.dataTypes will be used to convert dtypes to the original ones.
def __Get_Dtypes(self, data=pd.DataFrame()):
##to get original data datatypes
for colomn in data.columns:
self.dataTypes[colomn] = data[colomn].dtypes
return self
##
def fit(self, data):
##
self.__Get_Dtypes(data)
##
self.__catColumns = [cat for cat in self.dataTypes.keys()
if (self.dataTypes[cat].name == 'category')]
##
for col in self.__catColumns:
le = preprocessing.LabelEncoder()
le.fit(data.loc[:, col])
self.__MultiLE[col] = le
##
return self
##
def transform(self, data):
##
catData = data[self.__catColumns]
data = data.drop(self.__catColumns, axis=1)
##
def Transform_Rec(dta=catData):
##
nCol = dta.shape[1]
##
if nCol == 1:
col = dta.columns[0]
le = self.__MultiLE[col]
transformed = le.transform(dta.iloc[:, 0])
transformed = pd.DataFrame({col: transformed})
##
return transformed
else:
##
if (nCol % 2 == 0):
middle_index = int(nCol / 2)
else:
middle_index = int(nCol / 2 - 0.5)
##
left = dta.iloc[:, :middle_index]
right = dta.iloc[:, middle_index:]
##
return pd.concat([Transform_Rec(dta=left), Transform_Rec(dta=right)], axis=1)
##
catData = Transform_Rec(dta=catData)
catData.set_index(data.index, inplace=True)
##
data = pd.concat([data, catData], axis=1)
##
for i, j in self.dataTypes.items():
try:
data[i] = data[i].astype(j)
except:
pass
##
return data
##
class MultiColomnOneHotEncoder:
##
def __init__(self):
self.__catColumns = []
self.__MultiOHE = {}
##
def __getCategoryColomns(self, data=pd.DataFrame()):
catColumns = []
for i, j in enumerate(data):
if (data.dtypes[i].name == 'category'):
catColumns.append(j)
else:
continue
##
self.__catColumns = catColumns
##
return
##
def fit(self, data):
##
self.__getCategoryColomns(data)
##
for col in self.__catColumns:
OneHotEncoder = preprocessing.OneHotEncoder(sparse=False)
OneHotEncoder.fit(np.array(data.loc[:, col]).reshape(-1, 1))
self.__MultiOHE[col] = OneHotEncoder
##
return self
def transform(self, data):
##
catData = data[self.__catColumns]
data = data.drop(self.__catColumns, axis=1)
##
def Transform_Rec(dta=catData):
##
nCol = dta.shape[1]
##
if nCol == 1:
##
col = dta.columns[0]
OneHotEncoder = self.__MultiOHE[col]
transformed = OneHotEncoder.transform(np.array(dta.loc[:, col]).reshape(-1, 1))
transformed = pd.DataFrame(transformed)
transformed.columns = [str(col) + '_' + str(c) for c in transformed.columns]
##
return transformed
else:
##
if (nCol % 2 == 0):
middle_index = int(nCol / 2)
else:
middle_index = int(nCol / 2 - 0.5)
##
left = dta.iloc[:, :middle_index]
right = dta.iloc[:, middle_index:]
##
return pd.concat([Transform_Rec(dta=left), Transform_Rec(dta=right)], axis=1)
##
transformedCatData = Transform_Rec(dta=catData)
transformedCatData.set_index(data.index, inplace=True)
##
return pd.concat([data, transformedCatData], axis=1)
##
class MultiColomnScaler:
##
def __init__(self):
self.scaler = object()
##
def fit(self, data):
##
self.scaler = preprocessing.MinMaxScaler(feature_range=(0, 1))
self.scaler.fit(data)
##
return self
##
def transform(self, data):
##
columns = data.columns.tolist()
##
data = pd.DataFrame(self.scaler.transform(data.as_matrix()))
##
data.columns = columns
##
return data | [
"pandas.DataFrame",
"sklearn.preprocessing.MinMaxScaler",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.preprocessing.LabelEncoder",
"numpy.array",
"pandas.concat"
] | [((341, 355), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (353, 355), True, 'import pandas as pd\n'), ((2046, 2080), 'pandas.concat', 'pd.concat', (['[data, catData]'], {'axis': '(1)'}), '([data, catData], axis=1)\n', (2055, 2080), True, 'import pandas as pd\n'), ((2445, 2459), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2457, 2459), True, 'import pandas as pd\n'), ((4364, 4409), 'pandas.concat', 'pd.concat', (['[data, transformedCatData]'], {'axis': '(1)'}), '([data, transformedCatData], axis=1)\n', (4373, 4409), True, 'import pandas as pd\n'), ((4595, 4643), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (4621, 4643), False, 'from sklearn import preprocessing\n'), ((805, 833), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (831, 833), False, 'from sklearn import preprocessing\n'), ((2892, 2933), 'sklearn.preprocessing.OneHotEncoder', 'preprocessing.OneHotEncoder', ([], {'sparse': '(False)'}), '(sparse=False)\n', (2919, 2933), False, 'from sklearn import preprocessing\n'), ((1390, 1422), 'pandas.DataFrame', 'pd.DataFrame', (['{col: transformed}'], {}), '({col: transformed})\n', (1402, 1422), True, 'import pandas as pd\n'), ((3600, 3625), 'pandas.DataFrame', 'pd.DataFrame', (['transformed'], {}), '(transformed)\n', (3612, 3625), True, 'import pandas as pd\n'), ((2964, 2990), 'numpy.array', 'np.array', (['data.loc[:, col]'], {}), '(data.loc[:, col])\n', (2972, 2990), True, 'import numpy as np\n'), ((3528, 3553), 'numpy.array', 'np.array', (['dta.loc[:, col]'], {}), '(dta.loc[:, col])\n', (3536, 3553), True, 'import numpy as np\n')] |
import numpy as np
import tensorflow as tf
import tensorflow.keras.layers as kl
class ProbabilityDistribution(tf.keras.Model):
def call(self, logits, **kwargs):
# sample a random categorical action from given logits
return tf.squeeze(tf.random.categorical(logits, 1), axis=-1)
class CNNModel(tf.keras.Model):
def __init__(self, num_actions):
super().__init__('mlp_policy')
self.conv1 = kl.Conv2D(128, 3, activation='selu')
self.mp1 = kl.MaxPool2D((5, 5))
self.conv2 = kl.Conv2D(128, 3, activation='selu')
self.mp2 = kl.MaxPool2D((3, 3))
self.flatten = kl.Flatten()
self.hidden1 = kl.Dense(128, activation='selu')
self.hidden2 = kl.Dense(128, activation='selu')
self.value = kl.Dense(1, name='value')
# logits are unnormalized log probabilities
self.logits = kl.Dense(num_actions, activation='softmax', name='policy_logits')
self.dist = ProbabilityDistribution()
def call(self, inputs):
# inputs is a numpy array, convert to Tensor
x = tf.convert_to_tensor(inputs)
x = self.conv1(x)
x = self.mp1(x)
x = self.conv2(x)
x = self.mp2(x)
x = self.flatten(x)
# separate hidden layers from the same input tensor
hidden_logs = self.hidden1(x)
hidden_vals = self.hidden2(x)
return self.logits(hidden_logs), self.value(hidden_vals)
def action_value(self, obs):
# executes call() under the hood
logits, value = self.predict(obs)
action = self.dist.predict(logits)
return np.squeeze(action, axis=-1), np.squeeze(value, axis=-1)
| [
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Dense",
"tensorflow.convert_to_tensor",
"tensorflow.random.categorical",
"tensorflow.keras.layers.MaxPool2D",
"numpy.squeeze",
"tensorflow.keras.layers.Flatten"
] | [((428, 464), 'tensorflow.keras.layers.Conv2D', 'kl.Conv2D', (['(128)', '(3)'], {'activation': '"""selu"""'}), "(128, 3, activation='selu')\n", (437, 464), True, 'import tensorflow.keras.layers as kl\n'), ((484, 504), 'tensorflow.keras.layers.MaxPool2D', 'kl.MaxPool2D', (['(5, 5)'], {}), '((5, 5))\n', (496, 504), True, 'import tensorflow.keras.layers as kl\n'), ((526, 562), 'tensorflow.keras.layers.Conv2D', 'kl.Conv2D', (['(128)', '(3)'], {'activation': '"""selu"""'}), "(128, 3, activation='selu')\n", (535, 562), True, 'import tensorflow.keras.layers as kl\n'), ((582, 602), 'tensorflow.keras.layers.MaxPool2D', 'kl.MaxPool2D', (['(3, 3)'], {}), '((3, 3))\n', (594, 602), True, 'import tensorflow.keras.layers as kl\n'), ((626, 638), 'tensorflow.keras.layers.Flatten', 'kl.Flatten', ([], {}), '()\n', (636, 638), True, 'import tensorflow.keras.layers as kl\n'), ((662, 694), 'tensorflow.keras.layers.Dense', 'kl.Dense', (['(128)'], {'activation': '"""selu"""'}), "(128, activation='selu')\n", (670, 694), True, 'import tensorflow.keras.layers as kl\n'), ((718, 750), 'tensorflow.keras.layers.Dense', 'kl.Dense', (['(128)'], {'activation': '"""selu"""'}), "(128, activation='selu')\n", (726, 750), True, 'import tensorflow.keras.layers as kl\n'), ((772, 797), 'tensorflow.keras.layers.Dense', 'kl.Dense', (['(1)'], {'name': '"""value"""'}), "(1, name='value')\n", (780, 797), True, 'import tensorflow.keras.layers as kl\n'), ((872, 937), 'tensorflow.keras.layers.Dense', 'kl.Dense', (['num_actions'], {'activation': '"""softmax"""', 'name': '"""policy_logits"""'}), "(num_actions, activation='softmax', name='policy_logits')\n", (880, 937), True, 'import tensorflow.keras.layers as kl\n'), ((1078, 1106), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['inputs'], {}), '(inputs)\n', (1098, 1106), True, 'import tensorflow as tf\n'), ((255, 287), 'tensorflow.random.categorical', 'tf.random.categorical', (['logits', '(1)'], {}), '(logits, 1)\n', (276, 287), True, 'import tensorflow as tf\n'), ((1611, 1638), 'numpy.squeeze', 'np.squeeze', (['action'], {'axis': '(-1)'}), '(action, axis=-1)\n', (1621, 1638), True, 'import numpy as np\n'), ((1640, 1666), 'numpy.squeeze', 'np.squeeze', (['value'], {'axis': '(-1)'}), '(value, axis=-1)\n', (1650, 1666), True, 'import numpy as np\n')] |
import cv2
import numpy as np
def augment(img, obj, projection, template, color=False, scale=4):
h, w = template.shape
vertices = obj.vertices
img = np.ascontiguousarray(img, dtype=np.uint8)
a = np.array([[0, 0, 0], [w, 0, 0], [w, h, 0], [0, h, 0]], np.float64)
imgpts = np.int32(cv2.perspectiveTransform(
a.reshape(-1, 1, 3), projection))
cv2.fillConvexPoly(img, imgpts, (0, 0, 0))
for face in obj.faces:
face_vertices = face[0]
points = np.array([vertices[vertex - 1] for vertex in face_vertices])
points = scale*points
points = np.array([[p[2] + w/2, p[0] + h/2, p[1]] for p in points])
dst = cv2.perspectiveTransform(points.reshape(-1, 1, 3), projection)
imgpts = np.int32(dst)
if color is False:
cv2.fillConvexPoly(img, imgpts, (50, 50, 50))
else:
cv2.fillConvexPoly(img, imgpts, face[-1])
return img
class three_d_object:
def __init__(self, filename_obj, filename_texture, color_fixed=False):
self.texture = cv2.imread(filename_texture)
self.vertices = []
self.faces = []
self.texcoords = []
for line in open(filename_obj, "r"):
if line.startswith('#'):
continue
values = line.split()
if not values:
continue
if values[0] == 'v':
v = [float(a) for a in values[1:4]]
self.vertices.append(v)
elif values[0] == 'vt':
self.texcoords.append([float(a) for a in values[1:3]])
elif values[0] == 'f':
face_vertices = []
face_texcoords = []
for v in values[1:]:
w = v.split('/')
face_vertices.append(int(w[0]))
if len(w) >= 2 and len(w[1]) > 0:
face_texcoords.append(int(w[1]))
else:
color_fixed = True
face_texcoords.append(0)
self.faces.append([face_vertices, face_texcoords])
for f in self.faces:
if not color_fixed:
f.append(three_d_object.decide_face_color(
f[-1], self.texture, self.texcoords))
else:
f.append((50, 50, 50))
def decide_face_color(hex_color, texture, textures):
h, w, _ = texture.shape
col = np.zeros(3)
coord = np.zeros(2)
all_us = []
all_vs = []
for i in hex_color:
t = textures[i - 1]
coord = np.array([t[0], t[1]])
u, v = int(w*(t[0]) - 0.0001), int(h*(1-t[1]) - 0.0001)
all_us.append(u)
all_vs.append(v)
u = int(sum(all_us)/len(all_us))
v = int(sum(all_vs)/len(all_vs))
col = np.uint8(texture[v, u])
col = [int(a) for a in col]
col = tuple(col)
return (col)
| [
"numpy.uint8",
"numpy.zeros",
"cv2.imread",
"numpy.array",
"numpy.int32",
"cv2.fillConvexPoly",
"numpy.ascontiguousarray"
] | [((163, 204), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {'dtype': 'np.uint8'}), '(img, dtype=np.uint8)\n', (183, 204), True, 'import numpy as np\n'), ((214, 280), 'numpy.array', 'np.array', (['[[0, 0, 0], [w, 0, 0], [w, h, 0], [0, h, 0]]', 'np.float64'], {}), '([[0, 0, 0], [w, 0, 0], [w, h, 0], [0, h, 0]], np.float64)\n', (222, 280), True, 'import numpy as np\n'), ((377, 419), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['img', 'imgpts', '(0, 0, 0)'], {}), '(img, imgpts, (0, 0, 0))\n', (395, 419), False, 'import cv2\n'), ((497, 557), 'numpy.array', 'np.array', (['[vertices[vertex - 1] for vertex in face_vertices]'], {}), '([vertices[vertex - 1] for vertex in face_vertices])\n', (505, 557), True, 'import numpy as np\n'), ((605, 667), 'numpy.array', 'np.array', (['[[p[2] + w / 2, p[0] + h / 2, p[1]] for p in points]'], {}), '([[p[2] + w / 2, p[0] + h / 2, p[1]] for p in points])\n', (613, 667), True, 'import numpy as np\n'), ((758, 771), 'numpy.int32', 'np.int32', (['dst'], {}), '(dst)\n', (766, 771), True, 'import numpy as np\n'), ((1063, 1091), 'cv2.imread', 'cv2.imread', (['filename_texture'], {}), '(filename_texture)\n', (1073, 1091), False, 'import cv2\n'), ((2469, 2480), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2477, 2480), True, 'import numpy as np\n'), ((2497, 2508), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2505, 2508), True, 'import numpy as np\n'), ((2877, 2900), 'numpy.uint8', 'np.uint8', (['texture[v, u]'], {}), '(texture[v, u])\n', (2885, 2900), True, 'import numpy as np\n'), ((811, 856), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['img', 'imgpts', '(50, 50, 50)'], {}), '(img, imgpts, (50, 50, 50))\n', (829, 856), False, 'import cv2\n'), ((883, 924), 'cv2.fillConvexPoly', 'cv2.fillConvexPoly', (['img', 'imgpts', 'face[-1]'], {}), '(img, imgpts, face[-1])\n', (901, 924), False, 'import cv2\n'), ((2630, 2652), 'numpy.array', 'np.array', (['[t[0], t[1]]'], {}), '([t[0], t[1]])\n', (2638, 2652), True, 'import numpy as np\n')] |
import sys
import warnings
from PyQt5.QtWidgets import (QApplication, QMainWindow, QMenu, QVBoxLayout,
QSizePolicy, QMessageBox, QWidget, QPushButton)
#from PyQt5.QtGui import QIcon
from PyQt5 import uic
import numpy as np
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import (FigureCanvasQTAgg as FigureCanvas,
NavigationToolbar2QT as NavigationToolbar)
import matplotlib.cbook
from mpl_toolkits.axes_grid1 import make_axes_locatable
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
from mpl_toolkits.mplot3d import axes3d, proj3d
from matplotlib.patches import FancyArrowPatch
from itertools import product, combinations
from scipy.linalg import expm, norm
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
def cComponents(c):
return np.array([np.real(c), np.imag(c)])
def M(axis, theta):
return expm(np.cross(np.eye(3), axis/norm(axis)*theta))
def EulerRot(alpha, beta, gamma):
z_ax = np.array([0,0,1])
y_ax = np.array([0,1,0])
return np.matmul(np.matmul(M(z_ax, alpha), M(y_ax, beta)), M(z_ax, gamma))
def RotatePoints(x, y, z, A):
points = zip(x, y, z)
vec = np.array([np.dot(A, p) for p in points])
return vec.T
def WignerD(alpha, beta, gamma):
'''
Wigner D-matrix for J=1.
'''
c = np.cos(beta)
s = np.sin(beta)
c2 = np.cos(beta/2)
s2 = np.sin(beta/2)
little_d = np.array([[c2**2, -s/np.sqrt(2), s2**2],
[s/np.sqrt(2), c, -s/np.sqrt(2)],
[s2**2, s/np.sqrt(2), c2**2]])
expon = np.exp(-1j* np.array([[-alpha - gamma, -alpha, -alpha + gamma],
[-gamma, 0, gamma],
[alpha - gamma, alpha, alpha + gamma]]))
return expon*little_d
class App(QMainWindow):
def __init__(self):
super().__init__()
uic.loadUi('Visualizer.ui', self)
self.left = 10
self.top = 10
self.title = 'Polarization Rotation Visualizer'
self.width = 640
self.height = 400
# self.vec = -np.array([0,0,1.5])
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
# self.setGeometry(self.left, self.top, self.width, self.height)
self.alpha_LineEdit.returnPressed.connect(lambda: self.set_slider(0))
self.beta_LineEdit.returnPressed.connect(lambda: self.set_slider(1))
self.gamma_LineEdit.returnPressed.connect(lambda: self.set_slider(2))
self.alphaSlider.valueChanged.connect(lambda: self.sliderChanged(0))
self.betaSlider.valueChanged.connect(lambda: self.sliderChanged(1))
self.gammaSlider.valueChanged.connect(lambda: self.sliderChanged(2))
self.resetButton.clicked.connect(self.reset)
self.rightRadioButton.clicked.connect(self.change_poln)
self.linearRadioButton.clicked.connect(self.change_poln)
self.leftRadioButton.clicked.connect(self.change_poln)
self.polnButtonGroup.setId(self.rightRadioButton, -2)
self.polnButtonGroup.setId(self.linearRadioButton, 0)
self.polnButtonGroup.setId(self.leftRadioButton, 1)
alpha = np.radians(self.alphaSlider.value())
beta = np.radians(self.betaSlider.value())
gamma = np.radians(self.gammaSlider.value())
#Lists with reference to the angles, the sliders, and the textboxes
self.angles = np.array([alpha, beta, gamma])
self.lineEdits = [self.alpha_LineEdit, self.beta_LineEdit,
self.gamma_LineEdit]
self.sliders = [self.alphaSlider, self.betaSlider, self.gammaSlider]
self.alpha_LineEdit.setText(str(int(np.degrees(self.angles[0]))))
self.beta_LineEdit.setText(str(int(np.degrees(self.angles[1]))))
self.gamma_LineEdit.setText(str(int(np.degrees(self.angles[2]))))
self.plot_layout3d = QVBoxLayout()
self.pc = PlotCanvas3D(self, width=5, height=4)
self.toolbar3d = NavigationToolbar(self.pc, self)
self.plot_layout3d.addWidget(self.pc)
self.plot_layout3d.addWidget(self.toolbar3d)
self.gridLayout.addLayout(self.plot_layout3d, 0, 0, 0, 1)
self.plot_layout2d = QVBoxLayout()
self.dens_plot = PlotCanvas2D(self, width=3, height=3)
self.toolbar2d = NavigationToolbar(self.dens_plot, self)
self.plot_layout2d.addWidget(self.dens_plot)
self.plot_layout2d.addWidget(self.toolbar2d)
self.gridLayout_5.addLayout(self.plot_layout2d, 0, 0, 0, 1)
self.initialize_pol()
self.show()
def initialize_pol(self):
self.inp_polzn = self.checkInputPoln()
r = 0.15
N = 25
if self.inp_polzn == 0: #Linear input polarization
rot_init = np.array([0, np.pi/2, 0])
self.vec = np.array([1.5,0,0]) #starts the beam along the +x-axis
self.state = np.array([0, 1, 0])
z = np.linspace(-r, r, N)
y = np.zeros_like(z)
x = 0.75*np.ones_like(z)
else: #circular input polarization
rot_init = np.array([0, 0, 0])
self.vec = np.array([0, 0, -1.5])
if self.inp_polzn > 0: #right-hand circular
self.state = np.array([0, 0, 1])
else: #left-hand circular
self.state = np.array([1, 0, 0])
theta = np.linspace(0, 2*np.pi, N)
x, y = r*np.cos(theta), r*np.sin(theta)
z = -0.75*np.ones_like(x)
self.pol_curve = np.stack((x, y, z), axis=1)
#rotates the beam to z-axis from initial orientation
self.init_vec = np.copy(self.vec)
self.init_state = np.copy(self.state)
self.init_pol_curve = np.copy(self.pol_curve)
self.rotate(*rot_init)
#resets the initital states
self.init_vec = np.copy(self.vec)
self.init_state = np.copy(self.state)
self.init_pol_curve = np.copy(self.pol_curve)
def rotate(self, alpha=None, beta=None, gamma=None):
'''Performs the rotation operations on the state and the indidence
vector.'''
if (alpha == None and beta == None and gamma == None):
alpha, beta, gamma = self.angles#self.alpha, self.beta, self.gamma
self.R = EulerRot(alpha, beta, gamma)
self.D = WignerD(alpha, beta, gamma)
self.state = np.dot(self.D, self.init_state)
self.vec = np.dot(self.R, self.init_vec)
self.pol_curve = self.rotation_op(self.R, self.init_pol_curve)
self.pc.update_vec(self.vec)
self.pc.update_poln(self.pol_curve, self.inp_polzn)
self.update_state()
rho = np.abs(np.outer(self.state, np.conj(self.state)))
self.dens_plot.update_figure(rho)
def rotation_op(self, mat, vectors):
'''Generalizes the rotation operation to also run over a list of
vectors. Vectors should be in a Nx3 numpy array.'''
vectors = np.array(list(vectors))
return np.array([np.dot(mat, v) for v in vectors])
def reset(self):
'''
Resets the slider angles back to zero.
'''
# self.angles = np.array([0, 0, 0]) #this line messes up the "reset" afterwards
self.initialize_pol()
self.rotate()
for ind in range(3):
self.sliders[ind].setValue(0)
self.lineEdits[ind].setText(str(0))
def change_poln(self):
'''
Changes the state of the input light polarization when a different
radio button is selected.
'''
self.initialize_pol()
self.rotate()
def checkInputPoln(self):
'''
Checks the input polarization state corresponding to the radio button
that is checked.
Returns: one of -1, 0, or +1.
'''
polzn = self.polnButtonGroup.checkedId()
if polzn < 0:
polzn = int(polzn/2)
return polzn
def set_slider(self, ind):
'''
Set the position of the slider by entering a value in the
corresponding text box.
'''
val = np.clip(int(self.lineEdits[ind].text()), -180, 180)
self.angles[ind] = np.radians(val)
self.sliders[ind].setValue(val)
self.lineEdits[ind].setText(str(val))
self.rotate()
self.pc.update_plot()
def update_state(self):
'''
Prints the values of the rotated state to the textboxes
'''
self.negativeLineEdit.setText('{:.2f}'.format(self.state[0]))
self.zeroLineEdit.setText('{:.2f}'.format(self.state[1]))
self.positiveLineEdit.setText('{:.2f}'.format(self.state[2]))
def sliderChanged(self, ind):
'''
Responds to a mouse adjustment of a slider value.
'''
val = self.sliders[ind].value() #retrieves the value of the slider
self.angles[ind] = np.radians(val) #sets the corresponding angle
self.lineEdits[ind].setText(str(val)) #updates the corresponding textbox
self.rotate()
self.pc.update_plot()
class PlotCanvas3D(FigureCanvas):
def __init__(self, parent=None, width=5, height=7, dpi=100, vec=None):
self.origin = np.array([0.,0.,0.])
if (vec == None):
self.vec = self.origin
else:
self.vec = vec
self.fig = Figure(figsize=(width, height), dpi=dpi)
FigureCanvas.__init__(self, self.fig)
self.axes = self.fig.gca(projection='3d')
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QSizePolicy.Maximum,
QSizePolicy.Maximum)
FigureCanvas.updateGeometry(self)
self.x_ax = self.arrow3D(self.origin, np.array([0.75, 0., 0.]))
self.y_ax = self.arrow3D(self.origin, np.array([0., 0.75, 0.]))
self.z_ax = self.arrow3D(self.origin, np.array([0., 0., 1.5]), color="g")
self._init_plot()
def _init_plot(self):
# data = 2*np.random.rand(3, 25) - 1
self.vec = np.array([0,0,-1.5])
self.beam = self.arrow3D(self.vec, self.origin, lw=5, color='r')
# draw bounding cube
r = [-1, 1]
for s, e in combinations(np.array(list(product(r, r, r))), 2):
if np.sum(np.abs(s-e)) == r[1]-r[0]:
self.axes.plot3D(*zip(s, e), color="C0", lw=0)
# points = self.axes.scatter(*(data))
self.axes.add_artist(self.x_ax)
self.axes.add_artist(self.y_ax)
self.axes.add_artist(self.z_ax)
#adds origin point
self.axes.scatter([0], [0], [0], color="k", s=8)
# draw sphere representing atoms
self.axes.scatter([0], [0], [0], color="C3", s=200, alpha=0.5)
self.axes.add_artist(self.beam)
self.axes.set_xticklabels([])
self.axes.set_yticklabels([])
self.axes.set_zticklabels([])
self.draw()
def update_plot(self):
self.draw()
def update_vec(self, vec):
self.vec = vec
self.beam.remove()
self.beam = self.arrow3D(self.vec, self.origin, lw=5, color='r')
self.axes.add_artist(self.beam)
def update_poln(self, curve, inpt):
try:
for seg in self.pol_curve:
seg.remove()
except AttributeError:
pass
curve = curve.reshape(-1, 1, 3)
segments = np.concatenate([curve[:-1], curve[1:]], axis=1)
cmap = plt.get_cmap('seismic')
if np.abs(inpt) == 1:
#account for the last to first segment
lastseg = np.array([curve[-1], curve[0]]).reshape(1, 2, 3)
segments = np.append(segments, lastseg, axis=0)
cmap_params = np.linspace(0, 1, len(curve), endpoint=False)
if inpt > 0:
cmap_params = np.flip(cmap_params)
else:
cmap_params = np.ones(len(curve))*0.25
self.pol_curve = []
colors = [cmap(i) for i in cmap_params]
for j, seg in enumerate(segments):
lseg, = self.axes.plot(seg[:,0], seg[:,1], seg[:,2],
lw=3, color=colors[j])
lseg.set_solid_capstyle('round')
self.pol_curve.append(lseg,)
self.update_plot()
def arrow3D(self, point1, point2, color='k', lw=3):
return Arrow3D(*zip(point1, point2), mutation_scale=20,
lw=lw, arrowstyle="-|>", color=color)
class PlotCanvas2D(FigureCanvas):
def __init__(self, parent=None, width=5, height=5, dpi=100):
self.fig = Figure(figsize=(width, height), dpi=dpi)
FigureCanvas.__init__(self, self.fig)
self.axes = self.fig.add_subplot(111)
self.axes.set_aspect('equal')
self.setParent(parent)
FigureCanvas.setSizePolicy(self,
QSizePolicy.Expanding,
QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
self.divider = make_axes_locatable(self.axes)
self.cax = self.divider.append_axes('right', size='5%', pad=0.05)
data = np.zeros((3,3))
self.im = self.axes.imshow(data, cmap='viridis', vmin=0, vmax=1, origin='lower')
self.fig.colorbar(self.im, cax=self.cax, orientation='vertical')
self.axes.xaxis.set_major_locator(plt.MultipleLocator(1))
self.axes.xaxis.set_minor_locator(plt.MultipleLocator(0.5))
self.axes.yaxis.set_major_locator(plt.MultipleLocator(1))
self.axes.yaxis.set_minor_locator(plt.MultipleLocator(0.5))
# self.axes.set_xticks([-0.5, 0.5, 1.5, 2.5])
# self.axes.set_yticks([-0.5, 0.5, 1.5, 2.5])
self.axes.set_xticklabels(['', '-1', '0', '+1'])
self.axes.set_yticklabels(['', '-1', '0', '+1'])
self.axes.set_title('Density Matrix Magnitudes', fontsize=8)
self.axes.grid(b=True, which='minor', linestyle='-')
plt.tight_layout()
self.draw()
# self.init_plot()
def init_plot(self):
self.draw()
def update_figure(self, mat):
self.im.set_data(mat)
self.draw()
if __name__ == '__main__':
def run_app():
app = QApplication(sys.argv)
ex = App()
ex.show()
app.exec_()
run_app() | [
"numpy.abs",
"PyQt5.uic.loadUi",
"PyQt5.QtWidgets.QVBoxLayout",
"numpy.imag",
"numpy.sin",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"PyQt5.QtWidgets.QApplication",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.MultipleLocator",
"numpy.zeros_like",
"numpy.copy",
"numpy.... | [((524, 599), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'matplotlib.cbook.mplDeprecation'}), "('ignore', category=matplotlib.cbook.mplDeprecation)\n", (547, 599), False, 'import warnings\n'), ((1472, 1491), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (1480, 1491), True, 'import numpy as np\n'), ((1502, 1521), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (1510, 1521), True, 'import numpy as np\n'), ((1823, 1835), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (1829, 1835), True, 'import numpy as np\n'), ((1845, 1857), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (1851, 1857), True, 'import numpy as np\n'), ((1868, 1884), 'numpy.cos', 'np.cos', (['(beta / 2)'], {}), '(beta / 2)\n', (1874, 1884), True, 'import numpy as np\n'), ((1893, 1909), 'numpy.sin', 'np.sin', (['(beta / 2)'], {}), '(beta / 2)\n', (1899, 1909), True, 'import numpy as np\n'), ((882, 945), 'matplotlib.patches.FancyArrowPatch.__init__', 'FancyArrowPatch.__init__', (['self', '(0, 0)', '(0, 0)', '*args'], {}), '(self, (0, 0), (0, 0), *args, **kwargs)\n', (906, 945), False, 'from matplotlib.patches import FancyArrowPatch\n'), ((1079, 1130), 'mpl_toolkits.mplot3d.proj3d.proj_transform', 'proj3d.proj_transform', (['xs3d', 'ys3d', 'zs3d', 'renderer.M'], {}), '(xs3d, ys3d, zs3d, renderer.M)\n', (1100, 1130), False, 'from mpl_toolkits.mplot3d import axes3d, proj3d\n'), ((1200, 1236), 'matplotlib.patches.FancyArrowPatch.draw', 'FancyArrowPatch.draw', (['self', 'renderer'], {}), '(self, renderer)\n', (1220, 1236), False, 'from matplotlib.patches import FancyArrowPatch\n'), ((2420, 2453), 'PyQt5.uic.loadUi', 'uic.loadUi', (['"""Visualizer.ui"""', 'self'], {}), "('Visualizer.ui', self)\n", (2430, 2453), False, 'from PyQt5 import uic\n'), ((4070, 4100), 'numpy.array', 'np.array', (['[alpha, beta, gamma]'], {}), '([alpha, beta, gamma])\n', (4078, 4100), True, 'import numpy as np\n'), ((4600, 4613), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (4611, 4613), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QMenu, QVBoxLayout, QSizePolicy, QMessageBox, QWidget, QPushButton\n'), ((4707, 4739), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['self.pc', 'self'], {}), '(self.pc, self)\n', (4724, 4739), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((4986, 4999), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (4997, 4999), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QMenu, QVBoxLayout, QSizePolicy, QMessageBox, QWidget, QPushButton\n'), ((5090, 5129), 'matplotlib.backends.backend_qt5agg.NavigationToolbar2QT', 'NavigationToolbar', (['self.dens_plot', 'self'], {}), '(self.dens_plot, self)\n', (5107, 5129), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((6402, 6429), 'numpy.stack', 'np.stack', (['(x, y, z)'], {'axis': '(1)'}), '((x, y, z), axis=1)\n', (6410, 6429), True, 'import numpy as np\n'), ((6527, 6544), 'numpy.copy', 'np.copy', (['self.vec'], {}), '(self.vec)\n', (6534, 6544), True, 'import numpy as np\n'), ((6572, 6591), 'numpy.copy', 'np.copy', (['self.state'], {}), '(self.state)\n', (6579, 6591), True, 'import numpy as np\n'), ((6623, 6646), 'numpy.copy', 'np.copy', (['self.pol_curve'], {}), '(self.pol_curve)\n', (6630, 6646), True, 'import numpy as np\n'), ((6761, 6778), 'numpy.copy', 'np.copy', (['self.vec'], {}), '(self.vec)\n', (6768, 6778), True, 'import numpy as np\n'), ((6806, 6825), 'numpy.copy', 'np.copy', (['self.state'], {}), '(self.state)\n', (6813, 6825), True, 'import numpy as np\n'), ((6857, 6880), 'numpy.copy', 'np.copy', (['self.pol_curve'], {}), '(self.pol_curve)\n', (6864, 6880), True, 'import numpy as np\n'), ((7344, 7375), 'numpy.dot', 'np.dot', (['self.D', 'self.init_state'], {}), '(self.D, self.init_state)\n', (7350, 7375), True, 'import numpy as np\n'), ((7396, 7425), 'numpy.dot', 'np.dot', (['self.R', 'self.init_vec'], {}), '(self.R, self.init_vec)\n', (7402, 7425), True, 'import numpy as np\n'), ((9237, 9252), 'numpy.radians', 'np.radians', (['val'], {}), '(val)\n', (9247, 9252), True, 'import numpy as np\n'), ((9988, 10003), 'numpy.radians', 'np.radians', (['val'], {}), '(val)\n', (9998, 10003), True, 'import numpy as np\n'), ((10343, 10368), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (10351, 10368), True, 'import numpy as np\n'), ((10490, 10530), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(width, height)', 'dpi': 'dpi'}), '(figsize=(width, height), dpi=dpi)\n', (10496, 10530), False, 'from matplotlib.figure import Figure\n'), ((10540, 10577), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__', 'FigureCanvas.__init__', (['self', 'self.fig'], {}), '(self, self.fig)\n', (10561, 10577), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((10672, 10746), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.setSizePolicy', 'FigureCanvas.setSizePolicy', (['self', 'QSizePolicy.Maximum', 'QSizePolicy.Maximum'], {}), '(self, QSizePolicy.Maximum, QSizePolicy.Maximum)\n', (10698, 10746), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((10790, 10823), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.updateGeometry', 'FigureCanvas.updateGeometry', (['self'], {}), '(self)\n', (10817, 10823), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((11176, 11198), 'numpy.array', 'np.array', (['[0, 0, -1.5]'], {}), '([0, 0, -1.5])\n', (11184, 11198), True, 'import numpy as np\n'), ((12606, 12653), 'numpy.concatenate', 'np.concatenate', (['[curve[:-1], curve[1:]]'], {'axis': '(1)'}), '([curve[:-1], curve[1:]], axis=1)\n', (12620, 12653), True, 'import numpy as np\n'), ((12670, 12693), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""seismic"""'], {}), "('seismic')\n", (12682, 12693), True, 'import matplotlib.pyplot as plt\n'), ((13843, 13883), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(width, height)', 'dpi': 'dpi'}), '(figsize=(width, height), dpi=dpi)\n', (13849, 13883), False, 'from matplotlib.figure import Figure\n'), ((13893, 13930), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.__init__', 'FigureCanvas.__init__', (['self', 'self.fig'], {}), '(self, self.fig)\n', (13914, 13930), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((14060, 14138), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.setSizePolicy', 'FigureCanvas.setSizePolicy', (['self', 'QSizePolicy.Expanding', 'QSizePolicy.Expanding'], {}), '(self, QSizePolicy.Expanding, QSizePolicy.Expanding)\n', (14086, 14138), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((14182, 14215), 'matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg.updateGeometry', 'FigureCanvas.updateGeometry', (['self'], {}), '(self)\n', (14209, 14215), True, 'from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n'), ((14250, 14280), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['self.axes'], {}), '(self.axes)\n', (14269, 14280), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((14372, 14388), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (14380, 14388), True, 'import numpy as np\n'), ((15199, 15217), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15215, 15217), True, 'import matplotlib.pyplot as plt\n'), ((15503, 15525), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (15515, 15525), False, 'from PyQt5.QtWidgets import QApplication, QMainWindow, QMenu, QVBoxLayout, QSizePolicy, QMessageBox, QWidget, QPushButton\n'), ((1296, 1306), 'numpy.real', 'np.real', (['c'], {}), '(c)\n', (1303, 1306), True, 'import numpy as np\n'), ((1308, 1318), 'numpy.imag', 'np.imag', (['c'], {}), '(c)\n', (1315, 1318), True, 'import numpy as np\n'), ((1388, 1397), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1394, 1397), True, 'import numpy as np\n'), ((1681, 1693), 'numpy.dot', 'np.dot', (['A', 'p'], {}), '(A, p)\n', (1687, 1693), True, 'import numpy as np\n'), ((2115, 2231), 'numpy.array', 'np.array', (['[[-alpha - gamma, -alpha, -alpha + gamma], [-gamma, 0, gamma], [alpha -\n gamma, alpha, alpha + gamma]]'], {}), '([[-alpha - gamma, -alpha, -alpha + gamma], [-gamma, 0, gamma], [\n alpha - gamma, alpha, alpha + gamma]])\n', (2123, 2231), True, 'import numpy as np\n'), ((5593, 5620), 'numpy.array', 'np.array', (['[0, np.pi / 2, 0]'], {}), '([0, np.pi / 2, 0])\n', (5601, 5620), True, 'import numpy as np\n'), ((5643, 5664), 'numpy.array', 'np.array', (['[1.5, 0, 0]'], {}), '([1.5, 0, 0])\n', (5651, 5664), True, 'import numpy as np\n'), ((5724, 5743), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (5732, 5743), True, 'import numpy as np\n'), ((5775, 5796), 'numpy.linspace', 'np.linspace', (['(-r)', 'r', 'N'], {}), '(-r, r, N)\n', (5786, 5796), True, 'import numpy as np\n'), ((5814, 5830), 'numpy.zeros_like', 'np.zeros_like', (['z'], {}), '(z)\n', (5827, 5830), True, 'import numpy as np\n'), ((5947, 5966), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (5955, 5966), True, 'import numpy as np\n'), ((5991, 6013), 'numpy.array', 'np.array', (['[0, 0, -1.5]'], {}), '([0, 0, -1.5])\n', (5999, 6013), True, 'import numpy as np\n'), ((6245, 6273), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', 'N'], {}), '(0, 2 * np.pi, N)\n', (6256, 6273), True, 'import numpy as np\n'), ((10871, 10897), 'numpy.array', 'np.array', (['[0.75, 0.0, 0.0]'], {}), '([0.75, 0.0, 0.0])\n', (10879, 10897), True, 'import numpy as np\n'), ((10944, 10970), 'numpy.array', 'np.array', (['[0.0, 0.75, 0.0]'], {}), '([0.0, 0.75, 0.0])\n', (10952, 10970), True, 'import numpy as np\n'), ((11017, 11042), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.5]'], {}), '([0.0, 0.0, 1.5])\n', (11025, 11042), True, 'import numpy as np\n'), ((12706, 12718), 'numpy.abs', 'np.abs', (['inpt'], {}), '(inpt)\n', (12712, 12718), True, 'import numpy as np\n'), ((12873, 12909), 'numpy.append', 'np.append', (['segments', 'lastseg'], {'axis': '(0)'}), '(segments, lastseg, axis=0)\n', (12882, 12909), True, 'import numpy as np\n'), ((14605, 14627), 'matplotlib.pyplot.MultipleLocator', 'plt.MultipleLocator', (['(1)'], {}), '(1)\n', (14624, 14627), True, 'import matplotlib.pyplot as plt\n'), ((14672, 14696), 'matplotlib.pyplot.MultipleLocator', 'plt.MultipleLocator', (['(0.5)'], {}), '(0.5)\n', (14691, 14696), True, 'import matplotlib.pyplot as plt\n'), ((14741, 14763), 'matplotlib.pyplot.MultipleLocator', 'plt.MultipleLocator', (['(1)'], {}), '(1)\n', (14760, 14763), True, 'import matplotlib.pyplot as plt\n'), ((14808, 14832), 'matplotlib.pyplot.MultipleLocator', 'plt.MultipleLocator', (['(0.5)'], {}), '(0.5)\n', (14827, 14832), True, 'import matplotlib.pyplot as plt\n'), ((5853, 5868), 'numpy.ones_like', 'np.ones_like', (['z'], {}), '(z)\n', (5865, 5868), True, 'import numpy as np\n'), ((6101, 6120), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (6109, 6120), True, 'import numpy as np\n'), ((6190, 6209), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (6198, 6209), True, 'import numpy as np\n'), ((6348, 6363), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (6360, 6363), True, 'import numpy as np\n'), ((7689, 7708), 'numpy.conj', 'np.conj', (['self.state'], {}), '(self.state)\n', (7696, 7708), True, 'import numpy as np\n'), ((8011, 8025), 'numpy.dot', 'np.dot', (['mat', 'v'], {}), '(mat, v)\n', (8017, 8025), True, 'import numpy as np\n'), ((13040, 13060), 'numpy.flip', 'np.flip', (['cmap_params'], {}), '(cmap_params)\n', (13047, 13060), True, 'import numpy as np\n'), ((1404, 1414), 'scipy.linalg.norm', 'norm', (['axis'], {}), '(axis)\n', (1408, 1414), False, 'from scipy.linalg import expm, norm\n'), ((1945, 1955), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (1952, 1955), True, 'import numpy as np\n'), ((1995, 2005), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2002, 2005), True, 'import numpy as np\n'), ((2013, 2023), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2020, 2023), True, 'import numpy as np\n'), ((2063, 2073), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2070, 2073), True, 'import numpy as np\n'), ((4371, 4397), 'numpy.degrees', 'np.degrees', (['self.angles[0]'], {}), '(self.angles[0])\n', (4381, 4397), True, 'import numpy as np\n'), ((4445, 4471), 'numpy.degrees', 'np.degrees', (['self.angles[1]'], {}), '(self.angles[1])\n', (4455, 4471), True, 'import numpy as np\n'), ((4520, 4546), 'numpy.degrees', 'np.degrees', (['self.angles[2]'], {}), '(self.angles[2])\n', (4530, 4546), True, 'import numpy as np\n'), ((6294, 6307), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (6300, 6307), True, 'import numpy as np\n'), ((6311, 6324), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (6317, 6324), True, 'import numpy as np\n'), ((11380, 11396), 'itertools.product', 'product', (['r', 'r', 'r'], {}), '(r, r, r)\n', (11387, 11396), False, 'from itertools import product, combinations\n'), ((11427, 11440), 'numpy.abs', 'np.abs', (['(s - e)'], {}), '(s - e)\n', (11433, 11440), True, 'import numpy as np\n'), ((12800, 12831), 'numpy.array', 'np.array', (['[curve[-1], curve[0]]'], {}), '([curve[-1], curve[0]])\n', (12808, 12831), True, 'import numpy as np\n')] |
# OpenCV Tutorial from Murtaza's Workshop - Robotics and AI
import numpy as np
import cv2
width = 640
height = 640
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
cap.set(3, width)
cap.set(4, height)
cap.set(10, 150)
def get_contours(img):
biggest = np.array([])
max_area = 0
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 5000:
# cv2.drawContours(img_contour, cnt, -1, (255, 0, 0), 3)
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
if area > max_area and len(approx) == 4:
biggest = approx
max_area = area
cv2.drawContours(img_contour, biggest, -1, (255, 0, 0), 5)
return biggest
def reorder(points):
points = points.reshape((4, 2))
new_points = np.zeros((4, 1, 2), np.int32)
add = points.sum(axis=1)
new_points[0] = points[np.argmin(add)]
new_points[3] = points[np.argmax(add)]
diff = np.diff(points, axis=1)
new_points[1] = points[np.argmin(diff)]
new_points[2] = points[np.argmax(diff)]
return new_points
def get_warp(img, biggest):
if biggest.size == 0:
return img
pass
biggest = reorder(biggest)
pts1 = np.float32(biggest)
pts2 = np.float32([[0, 0], [width, 0], [0, height], [width, height]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
img_output = cv2.warpPerspective(img, matrix, (width, height))
img_cropped = img_output[10:img_output.shape[0]-10, 10:img_output.shape[1]-10]
img_cropped = cv2.resize(img_cropped, (width, height))
return img_output
# return img_cropped
def pre_processing(img):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_blur = cv2.GaussianBlur(img_gray, (5, 5), 1)
img_canny = cv2.Canny(img_blur, 200, 200)
kernel = np.ones((5, 5))
img_dilation = cv2.dilate(img_canny, kernel, iterations=2)
img_threshold = cv2.erode(img_dilation, kernel, iterations=1)
return img_threshold
while True:
success, img = cap.read()
img = cv2.resize(img, (width, height))
img_contour = img.copy()
img_thres = pre_processing(img)
biggest = get_contours(img_thres)
img_warped = get_warp(img, biggest)
cv2.imshow("Result", img_warped)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cv2.destroyAllWindows()
| [
"cv2.GaussianBlur",
"numpy.argmax",
"cv2.getPerspectiveTransform",
"cv2.arcLength",
"cv2.approxPolyDP",
"numpy.ones",
"numpy.argmin",
"cv2.erode",
"cv2.imshow",
"cv2.warpPerspective",
"cv2.contourArea",
"cv2.dilate",
"cv2.cvtColor",
"cv2.drawContours",
"cv2.destroyAllWindows",
"cv2.res... | [((123, 157), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)', 'cv2.CAP_DSHOW'], {}), '(0, cv2.CAP_DSHOW)\n', (139, 157), False, 'import cv2\n'), ((2418, 2441), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2439, 2441), False, 'import cv2\n'), ((251, 263), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (259, 263), True, 'import numpy as np\n'), ((307, 370), 'cv2.findContours', 'cv2.findContours', (['img', 'cv2.RETR_EXTERNAL', 'cv2.CHAIN_APPROX_NONE'], {}), '(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n', (323, 370), False, 'import cv2\n'), ((753, 811), 'cv2.drawContours', 'cv2.drawContours', (['img_contour', 'biggest', '(-1)', '(255, 0, 0)', '(5)'], {}), '(img_contour, biggest, -1, (255, 0, 0), 5)\n', (769, 811), False, 'import cv2\n'), ((907, 936), 'numpy.zeros', 'np.zeros', (['(4, 1, 2)', 'np.int32'], {}), '((4, 1, 2), np.int32)\n', (915, 936), True, 'import numpy as np\n'), ((1064, 1087), 'numpy.diff', 'np.diff', (['points'], {'axis': '(1)'}), '(points, axis=1)\n', (1071, 1087), True, 'import numpy as np\n'), ((1328, 1347), 'numpy.float32', 'np.float32', (['biggest'], {}), '(biggest)\n', (1338, 1347), True, 'import numpy as np\n'), ((1359, 1421), 'numpy.float32', 'np.float32', (['[[0, 0], [width, 0], [0, height], [width, height]]'], {}), '([[0, 0], [width, 0], [0, height], [width, height]])\n', (1369, 1421), True, 'import numpy as np\n'), ((1435, 1474), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['pts1', 'pts2'], {}), '(pts1, pts2)\n', (1462, 1474), False, 'import cv2\n'), ((1492, 1541), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'matrix', '(width, height)'], {}), '(img, matrix, (width, height))\n', (1511, 1541), False, 'import cv2\n'), ((1644, 1684), 'cv2.resize', 'cv2.resize', (['img_cropped', '(width, height)'], {}), '(img_cropped, (width, height))\n', (1654, 1684), False, 'import cv2\n'), ((1774, 1811), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1786, 1811), False, 'import cv2\n'), ((1827, 1864), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['img_gray', '(5, 5)', '(1)'], {}), '(img_gray, (5, 5), 1)\n', (1843, 1864), False, 'import cv2\n'), ((1881, 1910), 'cv2.Canny', 'cv2.Canny', (['img_blur', '(200)', '(200)'], {}), '(img_blur, 200, 200)\n', (1890, 1910), False, 'import cv2\n'), ((1924, 1939), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (1931, 1939), True, 'import numpy as np\n'), ((1959, 2002), 'cv2.dilate', 'cv2.dilate', (['img_canny', 'kernel'], {'iterations': '(2)'}), '(img_canny, kernel, iterations=2)\n', (1969, 2002), False, 'import cv2\n'), ((2023, 2068), 'cv2.erode', 'cv2.erode', (['img_dilation', 'kernel'], {'iterations': '(1)'}), '(img_dilation, kernel, iterations=1)\n', (2032, 2068), False, 'import cv2\n'), ((2148, 2180), 'cv2.resize', 'cv2.resize', (['img', '(width, height)'], {}), '(img, (width, height))\n', (2158, 2180), False, 'import cv2\n'), ((2329, 2361), 'cv2.imshow', 'cv2.imshow', (['"""Result"""', 'img_warped'], {}), "('Result', img_warped)\n", (2339, 2361), False, 'import cv2\n'), ((411, 431), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (426, 431), False, 'import cv2\n'), ((993, 1007), 'numpy.argmin', 'np.argmin', (['add'], {}), '(add)\n', (1002, 1007), True, 'import numpy as np\n'), ((1036, 1050), 'numpy.argmax', 'np.argmax', (['add'], {}), '(add)\n', (1045, 1050), True, 'import numpy as np\n'), ((1115, 1130), 'numpy.argmin', 'np.argmin', (['diff'], {}), '(diff)\n', (1124, 1130), True, 'import numpy as np\n'), ((1159, 1174), 'numpy.argmax', 'np.argmax', (['diff'], {}), '(diff)\n', (1168, 1174), True, 'import numpy as np\n'), ((544, 568), 'cv2.arcLength', 'cv2.arcLength', (['cnt', '(True)'], {}), '(cnt, True)\n', (557, 568), False, 'import cv2\n'), ((590, 630), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['cnt', '(0.02 * peri)', '(True)'], {}), '(cnt, 0.02 * peri, True)\n', (606, 630), False, 'import cv2\n'), ((2369, 2383), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2380, 2383), False, 'import cv2\n')] |
# Conway's Game of Life written by <NAME>
import numpy as np
import os
FAST_IO = [" ", "#"]
IO = [u"\u001b[30m\u001b[40m \u001b[0m", u"\u001b[37;1m\u001b[47;1m \u001b[0m"]\
# Optional Faster Version
IO = FAST_IO
# Main function
def iterate(buffer, w, h):
tmp_buffer = np.empty(shape=(h+2, w+2)) # Creating a new empty buffer is significantly faster than copying the old one
strb = u"\u001b[41m\u001b[37;1mConway's Game Of Life\u001b[0m"
for x in range(1, h-1):
strb += "\n"
for y in range(1, w-1):
alive = buffer[x, y]
neighbors = np.sum(buffer[x-1:x+2, y-1:y+2])-int(alive)
tmp_buffer[x, y] = int(neighbors==3) + int((neighbors==2)*alive)
strb += IO[int(tmp_buffer[x, y])]
os.system('cls' if os.name == 'nt' else 'clear') # This line makes escape codes work in the windows command line?? What??
print(u"{}\u001b[0m".format(strb))
return tmp_buffer
# Main code
def main():
# 12x7 ratios give a bounding box, since characters are not perfectly square
w, h = 120, 70
buffer = np.zeros(shape=(h+2, w+2))
buffer = np.random.randint(5, size=(h+2, w+2))
buffer = np.where(buffer==0, 1, 0)
# Setting outlines
buffer[0, 0:w] = 0
buffer[h, 0:w] = 0
buffer[0:h, 0] = 0
buffer[0:h, w] = 0
cycles = int(input("Run for how many cycles? "))
for _ in range(cycles):
buffer = iterate(buffer, w, h)
if __name__ == "__main__":
main() | [
"numpy.sum",
"numpy.empty",
"numpy.zeros",
"os.system",
"numpy.random.randint",
"numpy.where"
] | [((289, 319), 'numpy.empty', 'np.empty', ([], {'shape': '(h + 2, w + 2)'}), '(shape=(h + 2, w + 2))\n', (297, 319), True, 'import numpy as np\n'), ((798, 846), 'os.system', 'os.system', (["('cls' if os.name == 'nt' else 'clear')"], {}), "('cls' if os.name == 'nt' else 'clear')\n", (807, 846), False, 'import os\n'), ((1134, 1164), 'numpy.zeros', 'np.zeros', ([], {'shape': '(h + 2, w + 2)'}), '(shape=(h + 2, w + 2))\n', (1142, 1164), True, 'import numpy as np\n'), ((1175, 1216), 'numpy.random.randint', 'np.random.randint', (['(5)'], {'size': '(h + 2, w + 2)'}), '(5, size=(h + 2, w + 2))\n', (1192, 1216), True, 'import numpy as np\n'), ((1227, 1254), 'numpy.where', 'np.where', (['(buffer == 0)', '(1)', '(0)'], {}), '(buffer == 0, 1, 0)\n', (1235, 1254), True, 'import numpy as np\n'), ((609, 649), 'numpy.sum', 'np.sum', (['buffer[x - 1:x + 2, y - 1:y + 2]'], {}), '(buffer[x - 1:x + 2, y - 1:y + 2])\n', (615, 649), True, 'import numpy as np\n')] |
import matplotlib
matplotlib.use('agg', warn=False, force=True)
import pytest
import optoanalysis
import numpy as np
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
plot_similarity_tolerance = 30
float_relative_tolerance = 1e-3
def test_load_data():
"""
Tests that load_data works and therefore that DataObject.__init__, DataObject.get_time_data and DataObject.getPSD work. Specifically it checks that the data loads and that it returns an object of type DataObject. It checks that the filepath points to the correct place. The data is sampled at the correct frequency and therefore that it has loaded the times correctly. It checks that the max frequency in the PSD is approximately equal to the Nyquist frequency for the test data. It also checks that the data returned by get_time_data matches the data loaded.
"""
data = optoanalysis.load_data("testData.raw")
assert type(data) == optoanalysis.optoanalysis.DataObject
assert data.filename == "testData.raw"
time = data.time.get_array()
assert time[1]-time[0] == pytest.approx(1/data.SampleFreq, rel=float_relative_tolerance)
assert max(data.freqs) == pytest.approx(data.SampleFreq/2, rel=0.00001) # max freq in PSD is approx equal to Nyquist frequency
data.load_time_data()
t, V = data.get_time_data()
np.testing.assert_array_equal(t, time)
np.testing.assert_array_equal(V, data.voltage)
optoanalysis.load_data("testData.raw", ObjectType="thermo") #testing specifically for init of ThermoObject here
return None
GlobalData = optoanalysis.load_data("testData.raw", NPerSegmentPSD=int(1e5)) # Load data to be used in upcoming tests - so that it doesn't need to be loaded for each individual function to be tested
@pytest.mark.mpl_image_compare(tolerance=plot_similarity_tolerance) # this decorator compares the figure object returned by the following function to the baseline png image stored in tests/baseline
def test_plot_PSD():
"""
This tests that the plot of the PSD produced by DataObject.plot_PSD is produced correctly and matches the baseline to a certain tolerance.
"""
fig, ax = GlobalData.plot_PSD([0, 400], show_fig=False)
return fig
@pytest.mark.mpl_image_compare(tolerance=plot_similarity_tolerance) # this decorator compares the figure object returned by the following function to the baseline png image stored in tests/baseline
def test_get_fit():
"""
Tests that DataObject.get_fit works and therefore tests fitPSD, fit_curvefit and PSD_Fitting as these are dependancies. It tests that the output values of the fitting are correct (both the values and thier errors) and that the plot looks the same as the baseline, within a certain tolerance.
"""
A, F, Gamma, fig, ax = GlobalData.get_fit(75000, 10000, show_fig=False)
assert A.n == pytest.approx(584418711252, rel=float_relative_tolerance)
assert F.n == pytest.approx(466604, rel=float_relative_tolerance)
assert Gamma.n == pytest.approx(3951.716, rel=float_relative_tolerance)
assert A.std_dev == pytest.approx(5827258935, rel=float_relative_tolerance)
assert F.std_dev == pytest.approx(50.3576, rel=float_relative_tolerance)
assert Gamma.std_dev == pytest.approx(97.5671, rel=float_relative_tolerance)
return fig
def test_extract_parameters():
"""
Tests that DataObject.extract_parameters works and returns the correct values.
"""
with open("testDataPressure.dat", 'r') as file:
for line in file:
pressure = float(line.split("mbar")[0])
R, M, ConvFactor = GlobalData.extract_parameters(pressure, 0.15)
#assert R.n == pytest.approx(3.27536e-8, rel=float_relative_tolerance)
#assert M.n == pytest.approx(3.23808e-19, rel=float_relative_tolerance)
#assert ConvFactor.n == pytest.approx(190629, rel=float_relative_tolerance)
#assert R.std_dev == pytest.approx(4.97914e-9, rel=float_relative_tolerance)
#assert M.std_dev == pytest.approx(9.84496e-20, rel=float_relative_tolerance)
#assert ConvFactor.std_dev == pytest.approx(58179.9, rel=float_relative_tolerance)
return None
def test_get_time_data():
"""
Tests that DataObject.get_time_data returns the correct number of values.
"""
t, v = GlobalData.get_time_data(timeStart=0, timeEnd=1e-3)
assert len(t) == len(v)
assert len(t) == 10000
return None
@pytest.mark.mpl_image_compare(tolerance=plot_similarity_tolerance)
def test_plot_time_data():
"""
This tests that the plot of the time trace (from -1ms to 1ms) produced by DataObject.plot_time_data is produced correctly and matches the baseline to a certain tolerance.
"""
fig, ax = GlobalData.plot_time_data(timeStart=-1e-3, timeEnd=1e-3, units='ms', show_fig=False)
return fig
def test_calc_area_under_PSD():
"""
This tests that the calculation of the area under the PSD
from 50 to 100 KHz, calculated by
DataObject.calc_area_under_PSD is unchanged.
"""
TrueArea = 1.6900993420543872e-06
area = GlobalData.calc_area_under_PSD(50e3, 100e3)
assert area == pytest.approx(TrueArea, rel=float_relative_tolerance)
return None
def test_get_fit_auto():
"""
This tests that DataObect.get_fit_auto continues to return the same
values as when the test was created, to within the set tolerance.
"""
ATrue = 466612.80058291875
AErrTrue = 54.936633293369404
OmegaTrapTrue = 583205139563.28
OmegaTrapErrTrue = 7359927227.585048
BigGammaTrue = 3946.998785496495
BigGammaErrTrue = 107.96706466271127
A, OmegaTrap, BigGamma, _, _ = GlobalData.get_fit_auto(70e3, MakeFig=False, show_fig=False)
assert A.n == pytest.approx(ATrue, rel=float_relative_tolerance)
assert OmegaTrap.n == pytest.approx(OmegaTrapTrue, rel=float_relative_tolerance)
assert BigGamma.n == pytest.approx(BigGammaTrue, rel=float_relative_tolerance)
assert A.std_dev == pytest.approx(AErrTrue, rel=float_relative_tolerance)
assert OmegaTrap.std_dev == pytest.approx(OmegaTrapErrTrue, rel=float_relative_tolerance)
assert BigGamma.std_dev == pytest.approx(BigGammaErrTrue, rel=float_relative_tolerance)
return None
@pytest.mark.mpl_image_compare(tolerance=plot_similarity_tolerance)
def test_extract_motion():
"""
Tests the DataObject.extract_motion function, and therefore
the get_ZXY_data function and get_ZXY_freqs function.
"""
expectedLength = int(np.floor(len(GlobalData.time)/3))
z, x, y, t, fig, ax = GlobalData.extract_ZXY_motion([75e3, 167e3, 185e3], 5e3, [15e3, 15e3, 15e3], 3, NPerSegmentPSD=int(1e5), MakeFig=True, show_fig=False)
assert len(z) == len(t)
assert len(z) == len(x)
assert len(x) == len(y)
assert len(z) == expectedLength
return fig
@pytest.mark.mpl_image_compare(tolerance=plot_similarity_tolerance)
def test_plot_phase_space():
"""
Test the plot_phase_space and therefore calc_phase_space function.
"""
fig1, axscatter, axhistx, axhisty, cb = GlobalData.plot_phase_space(75e3, GlobalData.ConvFactor, PeakWidth=10e3, ShowPSD=False, show_fig=False, FractionOfSampleFreq=3)
return fig1
def test_multi_load_data():
"""
Tests the multi_load_data function, checks that the data
is loaded correctly by checking various properties are set.
"""
data = optoanalysis.multi_load_data(1, [1, 36], [0])
assert data[0].filename == "CH1_RUN00000001_REPEAT0000.raw"
assert data[1].filename == "CH1_RUN00000036_REPEAT0000.raw"
for dataset in data:
assert type(dataset) == optoanalysis.optoanalysis.DataObject
time = dataset.time.get_array()
assert time[1]-time[0] == pytest.approx(1/dataset.SampleFreq, rel=float_relative_tolerance)
assert max(dataset.freqs) == pytest.approx(dataset.SampleFreq/2, rel=0.00001) # max freq in PSD is approx equal to Nyquist frequency
return None
GlobalMultiData = optoanalysis.multi_load_data(1, [1, 36], [0], NPerSegmentPSD=int(1e5)) # Load data to be used in upcoming tests - so that it doesn't need to be loaded for each individual function to be tested
def test_calc_temp():
"""
Tests calc_temp by calculating the temperature of the
z degree of data from it's reference.
"""
for dataset in GlobalMultiData:
dataset.get_fit_auto(65e3, MakeFig=False, show_fig=False)
T = optoanalysis.calc_temp(GlobalMultiData[0], GlobalMultiData[1])
assert T.n == pytest.approx(2.6031509367704735, rel=float_relative_tolerance)
assert T.std_dev == pytest.approx(0.21312482508893446, rel=float_relative_tolerance)
return None
@pytest.mark.mpl_image_compare(tolerance=plot_similarity_tolerance)
def test_multi_plot_PSD():
"""
This tests that the plot of PSD for the 2 datasets (from 0 to 300KHz)
produced by DataObject.multi_plot_PSD is produced correctly and matches the
baseline to a certain tolerance.
"""
fig, ax = optoanalysis.multi_plot_PSD(GlobalMultiData, xlim=[0, 300], units="kHz", LabelArray=["Reference", "Cooled"], ColorArray=["red", "blue"], alphaArray=[0.8, 0.8], show_fig=False)
return fig
@pytest.mark.mpl_image_compare(tolerance=plot_similarity_tolerance)
def test_multi_plot_time():
"""
This tests that the plot of time for the 2 datasets (from -1000us to 1000us)
produced by DataObject.multi_plot_time is produced correctly and matches the
baseline to a certain tolerance.
"""
fig, ax = optoanalysis.multi_plot_time(GlobalMultiData, SubSampleN=1, units='us', xlim=[-1000, 1000], LabelArray=["Reference", "Cooled"], show_fig=False)
return fig
@pytest.mark.mpl_image_compare(tolerance=plot_similarity_tolerance)
def test_multi_subplots_time():
"""
This tests that the plots of time for the 2 datasets (from -1000us to 1000us)
produced by DataObject.multi_subplots_time is produced correctly and matches the
baseline to a certain tolerance.
"""
fig, ax = optoanalysis.multi_subplots_time(GlobalMultiData, SubSampleN=1, units='us', xlim=[-1000, 1000], LabelArray=["Reference", "Cooled"], show_fig=False)
return fig
| [
"optoanalysis.multi_subplots_time",
"optoanalysis.load_data",
"numpy.testing.assert_array_equal",
"optoanalysis.multi_plot_time",
"optoanalysis.calc_temp",
"optoanalysis.multi_load_data",
"matplotlib.use",
"pytest.mark.mpl_image_compare",
"pytest.approx",
"optoanalysis.multi_plot_PSD"
] | [((18, 63), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {'warn': '(False)', 'force': '(True)'}), "('agg', warn=False, force=True)\n", (32, 63), False, 'import matplotlib\n'), ((1774, 1840), 'pytest.mark.mpl_image_compare', 'pytest.mark.mpl_image_compare', ([], {'tolerance': 'plot_similarity_tolerance'}), '(tolerance=plot_similarity_tolerance)\n', (1803, 1840), False, 'import pytest\n'), ((2228, 2294), 'pytest.mark.mpl_image_compare', 'pytest.mark.mpl_image_compare', ([], {'tolerance': 'plot_similarity_tolerance'}), '(tolerance=plot_similarity_tolerance)\n', (2257, 2294), False, 'import pytest\n'), ((4427, 4493), 'pytest.mark.mpl_image_compare', 'pytest.mark.mpl_image_compare', ([], {'tolerance': 'plot_similarity_tolerance'}), '(tolerance=plot_similarity_tolerance)\n', (4456, 4493), False, 'import pytest\n'), ((6231, 6297), 'pytest.mark.mpl_image_compare', 'pytest.mark.mpl_image_compare', ([], {'tolerance': 'plot_similarity_tolerance'}), '(tolerance=plot_similarity_tolerance)\n', (6260, 6297), False, 'import pytest\n'), ((6820, 6886), 'pytest.mark.mpl_image_compare', 'pytest.mark.mpl_image_compare', ([], {'tolerance': 'plot_similarity_tolerance'}), '(tolerance=plot_similarity_tolerance)\n', (6849, 6886), False, 'import pytest\n'), ((8655, 8721), 'pytest.mark.mpl_image_compare', 'pytest.mark.mpl_image_compare', ([], {'tolerance': 'plot_similarity_tolerance'}), '(tolerance=plot_similarity_tolerance)\n', (8684, 8721), False, 'import pytest\n'), ((9164, 9230), 'pytest.mark.mpl_image_compare', 'pytest.mark.mpl_image_compare', ([], {'tolerance': 'plot_similarity_tolerance'}), '(tolerance=plot_similarity_tolerance)\n', (9193, 9230), False, 'import pytest\n'), ((9650, 9716), 'pytest.mark.mpl_image_compare', 'pytest.mark.mpl_image_compare', ([], {'tolerance': 'plot_similarity_tolerance'}), '(tolerance=plot_similarity_tolerance)\n', (9679, 9716), False, 'import pytest\n'), ((886, 924), 'optoanalysis.load_data', 'optoanalysis.load_data', (['"""testData.raw"""'], {}), "('testData.raw')\n", (908, 924), False, 'import optoanalysis\n'), ((1350, 1388), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['t', 'time'], {}), '(t, time)\n', (1379, 1388), True, 'import numpy as np\n'), ((1393, 1439), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['V', 'data.voltage'], {}), '(V, data.voltage)\n', (1422, 1439), True, 'import numpy as np\n'), ((1444, 1503), 'optoanalysis.load_data', 'optoanalysis.load_data', (['"""testData.raw"""'], {'ObjectType': '"""thermo"""'}), "('testData.raw', ObjectType='thermo')\n", (1466, 1503), False, 'import optoanalysis\n'), ((7376, 7421), 'optoanalysis.multi_load_data', 'optoanalysis.multi_load_data', (['(1)', '[1, 36]', '[0]'], {}), '(1, [1, 36], [0])\n', (7404, 7421), False, 'import optoanalysis\n'), ((8403, 8465), 'optoanalysis.calc_temp', 'optoanalysis.calc_temp', (['GlobalMultiData[0]', 'GlobalMultiData[1]'], {}), '(GlobalMultiData[0], GlobalMultiData[1])\n', (8425, 8465), False, 'import optoanalysis\n'), ((8971, 9154), 'optoanalysis.multi_plot_PSD', 'optoanalysis.multi_plot_PSD', (['GlobalMultiData'], {'xlim': '[0, 300]', 'units': '"""kHz"""', 'LabelArray': "['Reference', 'Cooled']", 'ColorArray': "['red', 'blue']", 'alphaArray': '[0.8, 0.8]', 'show_fig': '(False)'}), "(GlobalMultiData, xlim=[0, 300], units='kHz',\n LabelArray=['Reference', 'Cooled'], ColorArray=['red', 'blue'],\n alphaArray=[0.8, 0.8], show_fig=False)\n", (8998, 9154), False, 'import optoanalysis\n'), ((9489, 9636), 'optoanalysis.multi_plot_time', 'optoanalysis.multi_plot_time', (['GlobalMultiData'], {'SubSampleN': '(1)', 'units': '"""us"""', 'xlim': '[-1000, 1000]', 'LabelArray': "['Reference', 'Cooled']", 'show_fig': '(False)'}), "(GlobalMultiData, SubSampleN=1, units='us',\n xlim=[-1000, 1000], LabelArray=['Reference', 'Cooled'], show_fig=False)\n", (9517, 9636), False, 'import optoanalysis\n'), ((9984, 10135), 'optoanalysis.multi_subplots_time', 'optoanalysis.multi_subplots_time', (['GlobalMultiData'], {'SubSampleN': '(1)', 'units': '"""us"""', 'xlim': '[-1000, 1000]', 'LabelArray': "['Reference', 'Cooled']", 'show_fig': '(False)'}), "(GlobalMultiData, SubSampleN=1, units='us',\n xlim=[-1000, 1000], LabelArray=['Reference', 'Cooled'], show_fig=False)\n", (10016, 10135), False, 'import optoanalysis\n'), ((1093, 1157), 'pytest.approx', 'pytest.approx', (['(1 / data.SampleFreq)'], {'rel': 'float_relative_tolerance'}), '(1 / data.SampleFreq, rel=float_relative_tolerance)\n', (1106, 1157), False, 'import pytest\n'), ((1187, 1232), 'pytest.approx', 'pytest.approx', (['(data.SampleFreq / 2)'], {'rel': '(1e-05)'}), '(data.SampleFreq / 2, rel=1e-05)\n', (1200, 1232), False, 'import pytest\n'), ((2851, 2908), 'pytest.approx', 'pytest.approx', (['(584418711252)'], {'rel': 'float_relative_tolerance'}), '(584418711252, rel=float_relative_tolerance)\n', (2864, 2908), False, 'import pytest\n'), ((2927, 2978), 'pytest.approx', 'pytest.approx', (['(466604)'], {'rel': 'float_relative_tolerance'}), '(466604, rel=float_relative_tolerance)\n', (2940, 2978), False, 'import pytest\n'), ((3001, 3054), 'pytest.approx', 'pytest.approx', (['(3951.716)'], {'rel': 'float_relative_tolerance'}), '(3951.716, rel=float_relative_tolerance)\n', (3014, 3054), False, 'import pytest\n'), ((3084, 3139), 'pytest.approx', 'pytest.approx', (['(5827258935)'], {'rel': 'float_relative_tolerance'}), '(5827258935, rel=float_relative_tolerance)\n', (3097, 3139), False, 'import pytest\n'), ((3168, 3220), 'pytest.approx', 'pytest.approx', (['(50.3576)'], {'rel': 'float_relative_tolerance'}), '(50.3576, rel=float_relative_tolerance)\n', (3181, 3220), False, 'import pytest\n'), ((3254, 3306), 'pytest.approx', 'pytest.approx', (['(97.5671)'], {'rel': 'float_relative_tolerance'}), '(97.5671, rel=float_relative_tolerance)\n', (3267, 3306), False, 'import pytest\n'), ((5141, 5194), 'pytest.approx', 'pytest.approx', (['TrueArea'], {'rel': 'float_relative_tolerance'}), '(TrueArea, rel=float_relative_tolerance)\n', (5154, 5194), False, 'import pytest\n'), ((5729, 5779), 'pytest.approx', 'pytest.approx', (['ATrue'], {'rel': 'float_relative_tolerance'}), '(ATrue, rel=float_relative_tolerance)\n', (5742, 5779), False, 'import pytest\n'), ((5806, 5864), 'pytest.approx', 'pytest.approx', (['OmegaTrapTrue'], {'rel': 'float_relative_tolerance'}), '(OmegaTrapTrue, rel=float_relative_tolerance)\n', (5819, 5864), False, 'import pytest\n'), ((5890, 5947), 'pytest.approx', 'pytest.approx', (['BigGammaTrue'], {'rel': 'float_relative_tolerance'}), '(BigGammaTrue, rel=float_relative_tolerance)\n', (5903, 5947), False, 'import pytest\n'), ((5972, 6025), 'pytest.approx', 'pytest.approx', (['AErrTrue'], {'rel': 'float_relative_tolerance'}), '(AErrTrue, rel=float_relative_tolerance)\n', (5985, 6025), False, 'import pytest\n'), ((6058, 6119), 'pytest.approx', 'pytest.approx', (['OmegaTrapErrTrue'], {'rel': 'float_relative_tolerance'}), '(OmegaTrapErrTrue, rel=float_relative_tolerance)\n', (6071, 6119), False, 'import pytest\n'), ((6151, 6211), 'pytest.approx', 'pytest.approx', (['BigGammaErrTrue'], {'rel': 'float_relative_tolerance'}), '(BigGammaErrTrue, rel=float_relative_tolerance)\n', (6164, 6211), False, 'import pytest\n'), ((8484, 8547), 'pytest.approx', 'pytest.approx', (['(2.6031509367704735)'], {'rel': 'float_relative_tolerance'}), '(2.6031509367704735, rel=float_relative_tolerance)\n', (8497, 8547), False, 'import pytest\n'), ((8572, 8636), 'pytest.approx', 'pytest.approx', (['(0.21312482508893446)'], {'rel': 'float_relative_tolerance'}), '(0.21312482508893446, rel=float_relative_tolerance)\n', (8585, 8636), False, 'import pytest\n'), ((7718, 7785), 'pytest.approx', 'pytest.approx', (['(1 / dataset.SampleFreq)'], {'rel': 'float_relative_tolerance'}), '(1 / dataset.SampleFreq, rel=float_relative_tolerance)\n', (7731, 7785), False, 'import pytest\n'), ((7822, 7870), 'pytest.approx', 'pytest.approx', (['(dataset.SampleFreq / 2)'], {'rel': '(1e-05)'}), '(dataset.SampleFreq / 2, rel=1e-05)\n', (7835, 7870), False, 'import pytest\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
import from 50b745c1d18d5c4b01d9d00e406b5fdaab3515ea @ KamLearn
Compute various statistics between estimated and correct classes in binary
cases
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
#==============================================================================
# Module metadata variables
#==============================================================================
#==============================================================================
# Imports
#==============================================================================
import logging
import numpy as np
#==============================================================================
# Public symbols
#==============================================================================
__all__ = ['BinClassStats']
#==============================================================================
# Constants
#==============================================================================
#==============================================================================
# Module variables
#==============================================================================
#==============================================================================
# Classes
#==============================================================================
class BinClassStats(object):
""" Compute various statistics of 2class sample data
Parameters
----------
tp : float
The number of True-Positives = n[1, 1]
fn : float
The number of False-Negatives = n[1, 0]
fp : float
The number of False-Positives = n[0, 1]
tn : float
The number of True-Negatives = n[0, 0]
Attributes
----------
n : array-like, shape=(2, 2), dtype=float
Contingency table of the correct and estimated samples. Rows and
columns correspond to the correct and the estimated samples.
c : array-like, shape(2, 0), dtype=float
Marginal counts of the correct(=true) samples
e : array-like, shape(2, 0), dtype=float
Marginal counts of the estimated samples
t : float
The number of total samples
"""
def __init__(self, tp, fn, fp, tn):
self.n = np.empty((2, 2))
self.n[1, 1] = float(tp)
self.n[1, 0] = float(fn)
self.n[0, 1] = float(fp)
self.n[0, 0] = float(tn)
self.c = np.sum(self.n, axis=1)
self.e = np.sum(self.n, axis=0)
self.t = np.sum(self.n)
if self.t <= 0.0 or np.any(self.n < 0.0) \
or np.any(np.isinf(self.n)) or np.any(np.isnan(self.n)):
raise ValueError("Illegal values are specified")
def negate(self):
""" negate the meanings of positives and negatives
"""
self.n[1, 1], self.n[0, 0] = self.n[0, 0], self.n[1, 1]
self.n[1, 0], self.n[0, 1] = self.n[0, 1], self.n[1, 0]
self.c = np.sum(self.n, axis=1)
self.e = np.sum(self.n, axis=0)
self.t = np.sum(self.n)
def ct(self):
""" Counts of contingency table elements
Returns
-------
tp : float
n[1, 1], the number of true positive samples
fn : float
n[1, 0], the number of false negative samples
fp : float
n[0, 1], the number of false positive samples
tn : float
n[0, 0], the number of true negative samples
"""
return self.n[1, 1], self.n[1, 0], self.n[0, 1], self.n[0, 0]
def str_ct(self, header=True):
""" Strings for ct()
Parameters
----------
header : boolean, default=True
include header info
Returns
-------
pr : list, type=str
list of message strings
"""
tp, fn, fp, tn = self.ct()
pr = []
if header:
pr.append("### Contingency Table ###")
pr.append("[ TP(1,1), FN(1,0) ] = [ %6.15g, %6.15g ]" % (tp, fn))
pr.append("[ FP(0,1), TN(0,0) ] = [ %6.15g, %6.15g ]" % (fp, tn))
return pr
def mct(self):
""" Marginal counts of contingency table elements
Returns
-------
cp : float
sum of correct positives
cn : float
sum of correct negatives
ep : float
sum of estimated positives
en : float
sum of estimated negatives
tc : float
total count
"""
return self.c[1], self.c[0], self.e[1], self.e[0], self.t
def str_mct(self, header=True):
""" Strings for mct()
Parameters
----------
header : boolean, default=True
include header info
Returns
-------
pr : list, type=str
list of message strings
"""
cp, cn, ep, en, t = self.mct()
pr = []
if header:
pr.append("### Marginal/Total Counts ###")
pr.append("True [ P, N ] = [ %6.15g, %6.15g ]" % (cp, cn))
pr.append("Est [ P, N ] = [ %6.15g, %6.15g ]" % (ep, en))
pr.append("Total = %.15g" % (t))
return pr
def acc(self):
""" Accuracy
Returns
-------
acc : float
accuracy
sd : float
s.d. of accuracy
"""
acc = (self.n[1, 1] + self.n[0, 0]) / self.t
sd = np.sqrt(acc * (1.0 - acc) / self.t)
return acc, sd
def str_acc(self, header=True):
""" Strings for acc()
Parameters
----------
header : boolean, default=True
include header info
Returns
-------
pr : list, type=str
list of message strings
"""
acc, sd = self.acc()
pr = []
if header:
pr.append("### Accuracy ###")
pr.append("Acc / S.D. = [ %.15g, %.15g ]" % (acc, sd))
return pr
def jaccard(self):
""" Jaccard / Dice coefficients
Returns
-------
jaccard : float
Jaccard coefficient
njaccard : float
Negated Jaccard coefficient
dice : float
Dice coefficient
ndice : float
Negated Dice coefficient
"""
jaccard = self.n[1, 1] / (self.t - self.n[0, 0])
njaccard = self.n[0, 0] / (self.t - self.n[1, 1])
dice = 2.0 * self.n[1, 1] / (self.c[1] + self.e[1])
ndice = 2.0 * self.n[0, 0] / (self.c[0] + self.e[0])
return jaccard, njaccard, dice, ndice
def str_jaccard(self, header=True):
""" Strings for jaccard()
Parameters
----------
header : boolean, default=True
include header info
Returns
-------
pr : list, type=str
list of message strings
"""
jaccard, njaccard, dice, ndice = self.jaccard()
pr = []
if header:
pr.append("### Jaccard / Dice Coefficients ###")
pr.append("Jaccard [ P, N ] = [ %.15g, %.15g ]" % (jaccard, njaccard))
pr.append("Dice [ P, N ] = [ %.15g, %.15g ]" % (dice, ndice))
return pr
def kldiv(self):
""" KL divergence
Returns
-------
kldivc : float
D( Correct || Estimated ) with natural log.
KL divergence from correct to estimated.
kldive : float
D( Estimated || Correct ) with natural log.
KL divergence from estimated to correct.
kldivc2 : float
D( Correct || Estimated ) with log2.
KL divergence from correct to estimated.
kldive2 : float
D( Estimated || Correct ) with log2.
KL divergence from estimated to correct.
"""
i = lambda n, m: 0.0 if n == 0.0 else \
np.inf if m == 0.0 else n * np.log(n / m)
kldivc = (i(self.c[0], self.e[0]) + i(self.c[1], self.e[1])) \
/ self.t
kldive = (i(self.e[0], self.c[0]) + i(self.e[1], self.c[1])) \
/ self.t
i2 = lambda n, m: 0.0 if n == 0.0 else \
np.inf if m == 0.0 else n * np.log2(n / m)
kldivc2 = (i2(self.c[0], self.e[0]) + i2(self.c[1], self.e[1])) \
/ self.t
kldive2 = (i2(self.e[0], self.c[0]) + i2(self.e[1], self.c[1])) \
/ self.t
return kldivc, kldive, kldivc2, kldive2
def str_kldiv(self, header=True):
""" Strings for kldiv()
Parameters
----------
header : boolean, default=True
include header info
Returns
-------
pr : list, type=str
list of message strings
"""
kldivc, kldive, kldivc2, kldive2 = self.kldiv()
pr = []
if header:
pr.append("### KL Divergence ###")
pr.append("[ D(C||E), D(E||C) ] with ln = [ %.15g, %.15g ]"
% (kldivc, kldive))
pr.append("[ D(C||E), D(E||C) ] with log2 = [ %.15g, %.15g ]"
% (kldivc2, kldive2))
return pr
def mi(self):
""" Mutual Information with natural log
Returns
-------
mi : float
I(C; E) = H(C) + H(E).- H(C, E) mutual information
nmic : float
I(C; E) / H(C). MI normalized by H(C)
nmie : float
I(C; E) / H(E). MI normalized by H(E)
amean : float
Arithmetic mean of two normalized mutual informations.
gmean : float
Geometric mean of two normalized mutual informations.
"""
# joint entropy of the pmf function n / sum(n)
en = lambda n: np.sum([0.0 if i == 0.0
else (-i / self.t) * np.log(i / self.t)
for i in np.ravel(n)])
hc = en(self.c)
he = en(self.e)
hj = en(self.n)
mi = np.max((0.0, hc + he - hj))
nmic = 1.0 if hc == 0.0 else mi / hc
nmie = 1.0 if he == 0.0 else mi / he
return mi, nmic, nmie, (nmic + nmie) / 2.0, np.sqrt(nmic * nmie)
def str_mi(self, header=True):
""" Strings for mi()
Parameters
----------
header : boolean, default=True
include header info
Returns
-------
pr : list, type=str
list of message strings
"""
mi, nmic, nmie, amean, gmean = self.mi()
pr = []
if header:
pr.append("### Mutual Information (natual log) ###")
pr.append("I(C;E) = %.15g" % (mi))
pr.append("[ I(C;E)/H(C), I(C;E)/H(E) ] = [ %.15g, %.15g ]" % \
(nmic, nmie))
pr.append("Arithmetic Mean = %.15g" % (amean))
pr.append("Geometric Mean = %.15g" % (gmean))
return pr
def mi2(self):
""" Mutual Information with log2
Returns
-------
mi : float
I(C; E) = H(C) + H(E).- H(C, E) mutual information
nmic : float
I(C; E) / H(C). MI normalized by H(C)
nmie : float
I(C; E) / H(E). MI normalized by H(E)
amean : float
Arithmetic mean of two normalized mutual informations.
gmean : float
Geometric mean of two normalized mutual informations.
"""
# joint entropy of the pmf function n / sum(n)
en = lambda n: np.sum([0.0 if i == 0.0
else (-i / self.t) * np.log2(i / self.t)
for i in np.ravel(n)])
hc = en(self.c)
he = en(self.e)
hj = en(self.n)
mi = np.max((0.0, hc + he - hj))
nmic = 1.0 if hc == 0.0 else mi / hc
nmie = 1.0 if he == 0.0 else mi / he
return mi, nmic, nmie, (nmic + nmie) / 2.0, np.sqrt(nmic * nmie)
def str_mi2(self, header=True):
""" Strings for mi2()
Parameters
----------
header : boolean, default=True
include header info
Returns
-------
pr : list, type=str
list of message strings
"""
mi, nmic, nmie, amean, gmean = self.mi2()
pr = []
if header:
pr.append("### Mutual Information (log2) ###")
pr.append("I(C;E) = %.15g" % (mi))
pr.append("[ I(C;E)/H(C), I(C;E)/H(E) ] = [ %.15g, %.15g ]" % \
(nmic, nmie))
pr.append("Arithmetic Mean = %.15g" % (amean))
pr.append("Geometric Mean = %.15g" % (gmean))
return pr
def prf(self, alpha=0.5):
""" Precision, recall, and F-measure
Parameters
----------
alpha : float, default=0.5
weight of precision in calculation of F-measures
Returns
p : float
Precision for a positive class
r : float
Recall for a positive class
f : float
F-measure for a positive class
"""
p = self.n[1, 1] / (self.n[1, 1] + self.n[0, 1])
r = self.n[1, 1] / (self.n[1, 1] + self.n[1, 0])
f = 1.0 / (alpha * (1.0 / p) + (1.0 - alpha) * (1.0 / r))
return p, r, f
def str_prf(self, alpha=0.5, header=True):
""" Strings for prf()
Parameters
----------
header : boolean, default=True
include header info
Returns
-------
pr : list, type=str
list of message strings
"""
p, r, f = self.prf()
pr = []
if header:
pr.append("### Precision, Recall, and F-measure ###")
pr.append("Precision = %.15g" % (p))
pr.append("Recall = %.15g" % (r))
pr.append("F-measure = %.15g" % (f))
return pr
def all(self):
""" all above statistics
Returns
-------
stats : float
list of all statistics
"""
stats = []
stats += self.ct()
stats += self.mct()
stats += self.acc()
stats += self.jaccard()
stats += self.kldiv()
stats += self.mi()
stats += self.mi2()
stats += self.prf()
return tuple(stats)
def str_all(self, header=True):
""" Strings for all()
Parameters
----------
header : boolean, default=True
include header info
Returns
-------
pr : list, type=str
list of message strings
"""
ret_str = ""
ret_str += "\n".join(self.str_ct(header)) + "\n\n"
ret_str += "\n".join(self.str_mct(header)) + "\n\n"
ret_str += "\n".join(self.str_acc(header)) + "\n\n"
ret_str += "\n".join(self.str_jaccard(header)) + "\n\n"
ret_str += "\n".join(self.str_kldiv(header)) + "\n\n"
ret_str += "\n".join(self.str_mi(header)) + "\n\n"
ret_str += "\n".join(self.str_mi2(header)) + "\n\n"
ret_str += "\n".join(self.str_prf(header))
return ret_str
#==============================================================================
# Functions
#==============================================================================
#==============================================================================
# Module initialization
#==============================================================================
# init logging system ---------------------------------------------------------
logger = logging.getLogger('fadm')
if not logger.handlers:
logger.addHandler(logging.NullHandler)
#==============================================================================
# Test routine
#==============================================================================
def _test():
""" test function for this module
"""
# perform doctest
import sys
import doctest
doctest.testmod()
sys.exit(0)
# Check if this is call as command script -------------------------------------
if __name__ == '__main__':
_test()
| [
"numpy.sum",
"numpy.log",
"doctest.testmod",
"numpy.ravel",
"numpy.empty",
"numpy.log2",
"numpy.isinf",
"logging.getLogger",
"numpy.isnan",
"numpy.any",
"numpy.max",
"sys.exit",
"numpy.sqrt"
] | [((15532, 15557), 'logging.getLogger', 'logging.getLogger', (['"""fadm"""'], {}), "('fadm')\n", (15549, 15557), False, 'import logging\n'), ((15923, 15940), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (15938, 15940), False, 'import doctest\n'), ((15946, 15957), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (15954, 15957), False, 'import sys\n'), ((2334, 2350), 'numpy.empty', 'np.empty', (['(2, 2)'], {}), '((2, 2))\n', (2342, 2350), True, 'import numpy as np\n'), ((2501, 2523), 'numpy.sum', 'np.sum', (['self.n'], {'axis': '(1)'}), '(self.n, axis=1)\n', (2507, 2523), True, 'import numpy as np\n'), ((2541, 2563), 'numpy.sum', 'np.sum', (['self.n'], {'axis': '(0)'}), '(self.n, axis=0)\n', (2547, 2563), True, 'import numpy as np\n'), ((2581, 2595), 'numpy.sum', 'np.sum', (['self.n'], {}), '(self.n)\n', (2587, 2595), True, 'import numpy as np\n'), ((3019, 3041), 'numpy.sum', 'np.sum', (['self.n'], {'axis': '(1)'}), '(self.n, axis=1)\n', (3025, 3041), True, 'import numpy as np\n'), ((3059, 3081), 'numpy.sum', 'np.sum', (['self.n'], {'axis': '(0)'}), '(self.n, axis=0)\n', (3065, 3081), True, 'import numpy as np\n'), ((3099, 3113), 'numpy.sum', 'np.sum', (['self.n'], {}), '(self.n)\n', (3105, 3113), True, 'import numpy as np\n'), ((5498, 5533), 'numpy.sqrt', 'np.sqrt', (['(acc * (1.0 - acc) / self.t)'], {}), '(acc * (1.0 - acc) / self.t)\n', (5505, 5533), True, 'import numpy as np\n'), ((10013, 10040), 'numpy.max', 'np.max', (['(0.0, hc + he - hj)'], {}), '((0.0, hc + he - hj))\n', (10019, 10040), True, 'import numpy as np\n'), ((11738, 11765), 'numpy.max', 'np.max', (['(0.0, hc + he - hj)'], {}), '((0.0, hc + he - hj))\n', (11744, 11765), True, 'import numpy as np\n'), ((2625, 2645), 'numpy.any', 'np.any', (['(self.n < 0.0)'], {}), '(self.n < 0.0)\n', (2631, 2645), True, 'import numpy as np\n'), ((10184, 10204), 'numpy.sqrt', 'np.sqrt', (['(nmic * nmie)'], {}), '(nmic * nmie)\n', (10191, 10204), True, 'import numpy as np\n'), ((11909, 11929), 'numpy.sqrt', 'np.sqrt', (['(nmic * nmie)'], {}), '(nmic * nmie)\n', (11916, 11929), True, 'import numpy as np\n'), ((2670, 2686), 'numpy.isinf', 'np.isinf', (['self.n'], {}), '(self.n)\n', (2678, 2686), True, 'import numpy as np\n'), ((2698, 2714), 'numpy.isnan', 'np.isnan', (['self.n'], {}), '(self.n)\n', (2706, 2714), True, 'import numpy as np\n'), ((7974, 7987), 'numpy.log', 'np.log', (['(n / m)'], {}), '(n / m)\n', (7980, 7987), True, 'import numpy as np\n'), ((8263, 8277), 'numpy.log2', 'np.log2', (['(n / m)'], {}), '(n / m)\n', (8270, 8277), True, 'import numpy as np\n'), ((9912, 9923), 'numpy.ravel', 'np.ravel', (['n'], {}), '(n)\n', (9920, 9923), True, 'import numpy as np\n'), ((11637, 11648), 'numpy.ravel', 'np.ravel', (['n'], {}), '(n)\n', (11645, 11648), True, 'import numpy as np\n'), ((9853, 9871), 'numpy.log', 'np.log', (['(i / self.t)'], {}), '(i / self.t)\n', (9859, 9871), True, 'import numpy as np\n'), ((11577, 11596), 'numpy.log2', 'np.log2', (['(i / self.t)'], {}), '(i / self.t)\n', (11584, 11596), True, 'import numpy as np\n')] |
''' Metric class for tracking correlations by saving predictions '''
import numpy as np
from overrides import overrides
from allennlp.training.metrics.metric import Metric
from sklearn.metrics import matthews_corrcoef, confusion_matrix
from scipy.stats import pearsonr, spearmanr
import torch
@Metric.register("fastMatthews")
class FastMatthews(Metric):
"""Fast version of Matthews correlation.
Computes confusion matrix on each batch, and computes MCC from this when
get_metric() is called. Should match the numbers from the Correlation()
class, but will be much faster and use less memory on large datasets.
"""
def __init__(self, n_classes=2):
assert n_classes >= 2
self.n_classes = n_classes
self.reset()
def __call__(self, predictions, labels):
# Convert from Tensor if necessary
if isinstance(predictions, torch.Tensor):
predictions = predictions.cpu().numpy()
if isinstance(labels, torch.Tensor):
labels = labels.cpu().numpy()
assert predictions.dtype in [np.int32, np.int64, int]
assert labels.dtype in [np.int32, np.int64, int]
C = confusion_matrix(labels.ravel(), predictions.ravel(),
labels=np.arange(self.n_classes, dtype=np.int32))
assert C.shape == (self.n_classes, self.n_classes)
self._C += C
def mcc_from_confmat(self, C):
# Code below from
# https://github.com/scikit-learn/scikit-learn/blob/ed5e127b/sklearn/metrics/classification.py#L460
t_sum = C.sum(axis=1, dtype=np.float64)
p_sum = C.sum(axis=0, dtype=np.float64)
n_correct = np.trace(C, dtype=np.float64)
n_samples = p_sum.sum()
cov_ytyp = n_correct * n_samples - np.dot(t_sum, p_sum)
cov_ypyp = n_samples ** 2 - np.dot(p_sum, p_sum)
cov_ytyt = n_samples ** 2 - np.dot(t_sum, t_sum)
mcc = cov_ytyp / np.sqrt(cov_ytyt * cov_ypyp)
if np.isnan(mcc):
return 0.
else:
return mcc
def get_metric(self, reset=False):
# Compute Matthews correlation from confusion matrix.
# see https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
correlation = self.mcc_from_confmat(self._C)
if reset:
self.reset()
return correlation
@overrides
def reset(self):
self._C = np.zeros((self.n_classes, self.n_classes),
dtype=np.int64)
@Metric.register("correlation")
class Correlation(Metric):
"""Aggregate predictions, then calculate specified correlation"""
def __init__(self, corr_type):
self._predictions = []
self._labels = []
if corr_type == 'pearson':
corr_fn = pearsonr
elif corr_type == 'spearman':
corr_fn = spearmanr
elif corr_type == 'matthews':
corr_fn = matthews_corrcoef
else:
raise ValueError("Correlation type not supported")
self._corr_fn = corr_fn
self.corr_type = corr_type
def _correlation(self, labels, predictions):
corr = self._corr_fn(labels, predictions)
if self.corr_type in ['pearson', 'spearman']:
corr = corr[0]
return corr
def __call__(self, predictions, labels):
""" Accumulate statistics for a set of predictions and labels.
Values depend on correlation type; Could be binary or multivalued. This is handled by sklearn.
Args:
predictions: Tensor or np.array
labels: Tensor or np.array of same shape as predictions
"""
# Convert from Tensor if necessary
if isinstance(predictions, torch.Tensor):
predictions = predictions.cpu().numpy()
if isinstance(labels, torch.Tensor):
labels = labels.cpu().numpy()
# Verify shape match
assert predictions.shape == labels.shape, ("Predictions and labels must"
" have matching shape. Got:"
" preds=%s, labels=%s" % (
str(predictions.shape),
str(labels.shape)))
if self.corr_type == 'matthews':
assert predictions.dtype in [np.int32, np.int64, int]
assert labels.dtype in [np.int32, np.int64, int]
predictions = list(predictions.flatten())
labels = list(labels.flatten())
self._predictions += predictions
self._labels += labels
def get_metric(self, reset=False):
correlation = self._correlation(self._labels, self._predictions)
if reset:
self.reset()
return correlation
@overrides
def reset(self):
self._predictions = []
self._labels = []
| [
"numpy.trace",
"allennlp.training.metrics.metric.Metric.register",
"numpy.zeros",
"numpy.isnan",
"numpy.arange",
"numpy.dot",
"numpy.sqrt"
] | [((296, 327), 'allennlp.training.metrics.metric.Metric.register', 'Metric.register', (['"""fastMatthews"""'], {}), "('fastMatthews')\n", (311, 327), False, 'from allennlp.training.metrics.metric import Metric\n'), ((2497, 2527), 'allennlp.training.metrics.metric.Metric.register', 'Metric.register', (['"""correlation"""'], {}), "('correlation')\n", (2512, 2527), False, 'from allennlp.training.metrics.metric import Metric\n'), ((1671, 1700), 'numpy.trace', 'np.trace', (['C'], {'dtype': 'np.float64'}), '(C, dtype=np.float64)\n', (1679, 1700), True, 'import numpy as np\n'), ((1977, 1990), 'numpy.isnan', 'np.isnan', (['mcc'], {}), '(mcc)\n', (1985, 1990), True, 'import numpy as np\n'), ((2408, 2466), 'numpy.zeros', 'np.zeros', (['(self.n_classes, self.n_classes)'], {'dtype': 'np.int64'}), '((self.n_classes, self.n_classes), dtype=np.int64)\n', (2416, 2466), True, 'import numpy as np\n'), ((1776, 1796), 'numpy.dot', 'np.dot', (['t_sum', 'p_sum'], {}), '(t_sum, p_sum)\n', (1782, 1796), True, 'import numpy as np\n'), ((1833, 1853), 'numpy.dot', 'np.dot', (['p_sum', 'p_sum'], {}), '(p_sum, p_sum)\n', (1839, 1853), True, 'import numpy as np\n'), ((1890, 1910), 'numpy.dot', 'np.dot', (['t_sum', 't_sum'], {}), '(t_sum, t_sum)\n', (1896, 1910), True, 'import numpy as np\n'), ((1936, 1964), 'numpy.sqrt', 'np.sqrt', (['(cov_ytyt * cov_ypyp)'], {}), '(cov_ytyt * cov_ypyp)\n', (1943, 1964), True, 'import numpy as np\n'), ((1262, 1303), 'numpy.arange', 'np.arange', (['self.n_classes'], {'dtype': 'np.int32'}), '(self.n_classes, dtype=np.int32)\n', (1271, 1303), True, 'import numpy as np\n')] |
from CTL.tests.packedTest import PackedTest
from CTL.tensor.contract.tensorGraph import TensorGraph
from CTL.tensor.tensor import Tensor
from CTL.tensor.contract.link import makeLink
from CTL.tensor.contract.optimalContract import makeTensorGraph, contractWithSequence
import CTL.funcs.funcs as funcs
import numpy as np
class TestTensorGraph(PackedTest):
def __init__(self, methodName = 'runTest'):
super().__init__(methodName = methodName, name = 'TensorGraph')
def test_TensorGraph(self):
shapeA = (300, 4, 5)
shapeB = (300, 6)
shapeC = (4, 6, 5)
a = Tensor(shape = shapeA, labels = ['a300', 'b4', 'c5'], data = np.ones(shapeA))
b = Tensor(shape = shapeB, labels = ['a300', 'd6'], data = np.ones(shapeB))
c = Tensor(shape = shapeC, labels = ['b4', 'd6', 'c5'], data = np.ones(shapeC))
makeLink(a.getLeg('a300'), b.getLeg('a300'))
makeLink(a.getLeg('b4'), c.getLeg('b4'))
makeLink(a.getLeg('c5'), c.getLeg('c5'))
makeLink(b.getLeg('d6'), c.getLeg('d6'))
tensorList = [a, b, c]
tensorGraph = makeTensorGraph(tensorList)
# if we use typical dim, then contract between 0 and 2 first is preferred
# and this is not true if we consider the real bond dimension 300
seq = tensorGraph.optimalContractSequence(typicalDim = None)
self.assertListEqual(seq, [(0, 1), (2, 0)])
self.assertEqual(tensorGraph.optimalCostResult(), 36120)
seq = tensorGraph.optimalContractSequence(typicalDim = None, bf = True)
self.assertEqual(tensorGraph.optimalCostResult(), 36120)
# res1 = contractWithSequence(tensorList, seq = seq)
seq = tensorGraph.optimalContractSequence(typicalDim = 10)
self.assertListEqual(seq, [(0, 2), (1, 0)])
self.assertEqual(tensorGraph.optimalCostResult(), 10100)
seq = tensorGraph.optimalContractSequence(typicalDim = 10, bf = True)
self.assertEqual(tensorGraph.optimalCostResult(), 10100)
res2 = contractWithSequence(tensorList, seq = seq)
self.assertEqual(res2.a ** 2, funcs.tupleProduct(shapeA) * funcs.tupleProduct(shapeB) * funcs.tupleProduct(shapeC))
# problem: now the tensor network can only be contracted once
# this can be solved by create another (copyable) FiniteTensorNetwork object
# which traces all the bonds and legs, and can be easily copied
| [
"CTL.funcs.funcs.tupleProduct",
"CTL.tensor.contract.optimalContract.contractWithSequence",
"numpy.ones",
"CTL.tensor.contract.optimalContract.makeTensorGraph"
] | [((1110, 1137), 'CTL.tensor.contract.optimalContract.makeTensorGraph', 'makeTensorGraph', (['tensorList'], {}), '(tensorList)\n', (1125, 1137), False, 'from CTL.tensor.contract.optimalContract import makeTensorGraph, contractWithSequence\n'), ((2035, 2076), 'CTL.tensor.contract.optimalContract.contractWithSequence', 'contractWithSequence', (['tensorList'], {'seq': 'seq'}), '(tensorList, seq=seq)\n', (2055, 2076), False, 'from CTL.tensor.contract.optimalContract import makeTensorGraph, contractWithSequence\n'), ((665, 680), 'numpy.ones', 'np.ones', (['shapeA'], {}), '(shapeA)\n', (672, 680), True, 'import numpy as np\n'), ((749, 764), 'numpy.ones', 'np.ones', (['shapeB'], {}), '(shapeB)\n', (756, 764), True, 'import numpy as np\n'), ((837, 852), 'numpy.ones', 'np.ones', (['shapeC'], {}), '(shapeC)\n', (844, 852), True, 'import numpy as np\n'), ((2175, 2201), 'CTL.funcs.funcs.tupleProduct', 'funcs.tupleProduct', (['shapeC'], {}), '(shapeC)\n', (2193, 2201), True, 'import CTL.funcs.funcs as funcs\n'), ((2117, 2143), 'CTL.funcs.funcs.tupleProduct', 'funcs.tupleProduct', (['shapeA'], {}), '(shapeA)\n', (2135, 2143), True, 'import CTL.funcs.funcs as funcs\n'), ((2146, 2172), 'CTL.funcs.funcs.tupleProduct', 'funcs.tupleProduct', (['shapeB'], {}), '(shapeB)\n', (2164, 2172), True, 'import CTL.funcs.funcs as funcs\n')] |
#!/usr/bin/env python
"""Tools for CUDA compilation and set-up for Python 3."""
import importlib
import logging
import os
import platform
import re
import shutil
import sys
from distutils.sysconfig import get_python_inc
from subprocess import PIPE, run
from textwrap import dedent
# from pkg_resources import resource_filename
try:
from numpy import get_include as get_numpy_inc
except ImportError:
pass
else:
nphdr = get_numpy_inc() # numpy header path
log = logging.getLogger(__name__)
prefix = sys.prefix
pyhdr = get_python_inc() # Python header paths
minc_c = 3, 5 # minimum required CUDA compute capability
mincc = minc_c[0] * 10 + minc_c[1]
def path_niftypet_local():
"""Get the path to the local (home) folder for NiftyPET resources."""
# if using conda put the resources in the folder with the environment name
if "CONDA_DEFAULT_ENV" in os.environ:
try:
env = re.findall(r"envs[/\\](.*)[/\\]bin[/\\]python", sys.executable)[0]
except IndexError:
env = os.environ["CONDA_DEFAULT_ENV"]
log.info("install> conda environment found: {}".format(env))
else:
env = ""
# create the path for the resources files according to the OS platform
if platform.system() in ("Linux", "Darwin"):
path_resources = os.path.expanduser("~")
elif platform.system() == "Windows":
path_resources = os.getenv("LOCALAPPDATA")
else:
raise ValueError("Unknown operating system: {}".format(platform.system()))
path_resources = os.path.join(path_resources, ".niftypet", env)
return path_resources
def find_cuda():
"""Locate the CUDA environment on the system."""
# search the PATH for NVCC
for fldr in os.environ["PATH"].split(os.pathsep):
cuda_path = os.path.join(fldr, "nvcc")
if os.path.exists(cuda_path):
cuda_path = os.path.dirname(os.path.dirname(cuda_path))
break
cuda_path = None
if cuda_path is None:
log.warning("nvcc compiler could not be found from the PATH!")
return
# serach for the CUDA library path
lcuda_path = os.path.join(cuda_path, "lib64")
if "LD_LIBRARY_PATH" in os.environ:
if lcuda_path in os.environ["LD_LIBRARY_PATH"].split(os.pathsep):
log.info("found CUDA lib64 in LD_LIBRARY_PATH: {}".format(lcuda_path))
elif os.path.isdir(lcuda_path):
log.info("found CUDA lib64 in: {}".format(lcuda_path))
else:
log.warning("folder for CUDA library (64-bit) could not be found!")
return cuda_path, lcuda_path
def dev_setup():
"""figure out what GPU devices are available and choose the supported ones."""
log.info(
dedent(
"""
--------------------------------------------------------------
Setting up CUDA ...
--------------------------------------------------------------"""
)
)
# check first if NiftyPET was already installed and use the choice of GPU
path_resources = path_niftypet_local()
# if so, import the resources and get the constants
if os.path.isfile(os.path.join(path_resources, "resources.py")):
resources = get_resources()
else:
log.error("resources file not found/installed.")
return
# get all constants and check if device is already chosen
Cnt = resources.get_setup()
# if "CCARCH" in Cnt and "DEVID" in Cnt:
# log.info("using this CUDA architecture(s): {}".format(Cnt["CCARCH"]))
# return Cnt["CCARCH"]
from miutil import cuinfo
# map from CUDA device order (CC) to NVML order (PCI bus)
nvml_id = [
i
for _, i in sorted(
((cuinfo.compute_capability(i), i) for i in range(cuinfo.num_devices())),
reverse=True,
)
]
if "DEVID" in Cnt:
devid = int(Cnt["DEVID"])
ccstr = cuinfo.nvcc_flags(nvml_id[devid])
ccs = ["{:d}{:d}".format(*cuinfo.compute_capability(nvml_id[devid]))]
else:
devid = 0
devs = cuinfo.num_devices()
if devs < 1:
return ""
ccstr = ";".join(
sorted(
{
cuinfo.nvcc_flags(i)
for i in range(devs)
if cuinfo.compute_capability(i) >= minc_c
}
)
)
if not ccstr:
return ""
ccs = sorted(
{
"{:d}{:d}".format(*cuinfo.compute_capability(i))
for i in range(devs)
if cuinfo.compute_capability(i) >= minc_c
}
)
# passing this setting to resources.py
fpth = os.path.join(
path_resources, "resources.py"
) # resource_filename(__name__, 'resources/resources.py')
with open(fpth, "r") as f:
rsrc = f.read()
# get the region of keeping in synch with Python
i0 = rsrc.find("# # # start GPU properties # # #")
i1 = rsrc.find("# # # end GPU properties # # #")
# list of constants which will be kept in sych from Python
cnt_dict = {"DEV_ID": str(devid), "CC_ARCH": repr(ccstr)}
# update the resource.py file
with open(fpth, "w") as f:
f.write(rsrc[:i0])
f.write("# # # start GPU properties # # #\n")
for k, v in cnt_dict.items():
f.write(k + " = " + v + "\n")
f.write(rsrc[i1:])
return ccs
def resources_setup(gpu=True):
"""
This function checks CUDA devices, selects some and installs resources.py
"""
log.info("installing file <resources.py> into home directory if it does not exist.")
path_current = os.path.dirname(os.path.realpath(__file__))
# get the path to the local resources.py (on Linux machines it is in ~/.niftypet)
path_resources = path_niftypet_local()
log.info("current path: {}".format(path_current))
# does the local folder for niftypet exists? if not create one.
if not os.path.exists(path_resources):
os.makedirs(path_resources)
# is resources.py in the folder?
if not os.path.isfile(os.path.join(path_resources, "resources.py")):
if os.path.isfile(os.path.join(path_current, "raw", "resources.py")):
shutil.copyfile(
os.path.join(path_current, "raw", "resources.py"),
os.path.join(path_resources, "resources.py"),
)
else:
raise IOError("could not find <resources.py")
else:
log.info(
"<resources.py> already found in local NiftyPET folder: {}".format(
path_resources
)
)
get_resources()
# find available GPU devices, select one or more and output the compilation flags
# return gpuarch for cmake compilation
return dev_setup() if gpu else ""
def get_resources(sys_append=True, reload=True):
path_resources = path_niftypet_local()
if sys_append:
if path_resources not in sys.path:
sys.path.append(path_resources)
try:
import resources
except ImportError:
log.error(
dedent(
"""\
--------------------------------------------------------------------------
NiftyPET resources file <resources.py> could not be imported.
It should be in ~/.niftypet/resources.py (Linux) or
in //Users//USERNAME//AppData//Local//niftypet//resources.py (Windows)
but likely it does not exists.
--------------------------------------------------------------------------"""
)
)
raise
else:
return importlib.reload(resources) if reload else resources
def cmake_cuda(
path_source, path_build, nvcc_flags="", logfile_prefix="py_", msvc_version=""
):
# CUDA installation
log.info(
dedent(
"""
--------------------------------------------------------------
CUDA compilation
--------------------------------------------------------------"""
)
)
if not os.path.isdir(path_build):
os.makedirs(path_build)
path_current = os.path.abspath(os.curdir)
try:
os.chdir(path_build)
# cmake installation commands
cmds = [
[
"cmake",
path_source,
f"-DPython3_ROOT_DIR={sys.prefix}",
f"-DCUDA_NVCC_FLAGS={nvcc_flags}",
],
["cmake", "--build", "./"],
]
if platform.system() == "Windows":
cmds[0] += ["-G", msvc_version]
cmds[1] += ["--config", "Release"]
# run commands with logging
cmakelogs = [
"{logfile_prefix}cmake_config.log",
"{logfile_prefix}cmake_build.log",
]
errs = False
for cmd, cmakelog in zip(cmds, cmakelogs):
log.info("Command:%s", cmd)
p = run(cmd, stdout=PIPE, stderr=PIPE)
stdout = p.stdout.decode("utf-8")
stderr = p.stderr.decode("utf-8")
with open(cmakelog, "w") as fd:
fd.write(stdout)
if p.returncode:
errs = True
log.info(
dedent(
"""
----------- compilation output ----------
%s
------------------ end ------------------"""
),
stdout,
)
if p.stderr:
log.error(
dedent(
"""
--------------- process errors ----------------
%s
--------------------- end ---------------------"""
),
stderr,
)
if errs:
raise SystemError("compilation failed")
finally:
os.chdir(path_current)
| [
"resources.get_setup",
"miutil.cuinfo.compute_capability",
"os.path.join",
"os.chdir",
"sys.path.append",
"os.path.abspath",
"os.path.dirname",
"distutils.sysconfig.get_python_inc",
"os.path.exists",
"miutil.cuinfo.nvcc_flags",
"re.findall",
"numpy.get_include",
"miutil.cuinfo.num_devices",
... | [((475, 502), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (492, 502), False, 'import logging\n'), ((532, 548), 'distutils.sysconfig.get_python_inc', 'get_python_inc', ([], {}), '()\n', (546, 548), False, 'from distutils.sysconfig import get_python_inc\n'), ((431, 446), 'numpy.get_include', 'get_numpy_inc', ([], {}), '()\n', (444, 446), True, 'from numpy import get_include as get_numpy_inc\n'), ((1539, 1585), 'os.path.join', 'os.path.join', (['path_resources', '""".niftypet"""', 'env'], {}), "(path_resources, '.niftypet', env)\n", (1551, 1585), False, 'import os\n'), ((2136, 2168), 'os.path.join', 'os.path.join', (['cuda_path', '"""lib64"""'], {}), "(cuda_path, 'lib64')\n", (2148, 2168), False, 'import os\n'), ((3371, 3392), 'resources.get_setup', 'resources.get_setup', ([], {}), '()\n', (3390, 3392), False, 'import resources\n'), ((4686, 4730), 'os.path.join', 'os.path.join', (['path_resources', '"""resources.py"""'], {}), "(path_resources, 'resources.py')\n", (4698, 4730), False, 'import os\n'), ((8121, 8147), 'os.path.abspath', 'os.path.abspath', (['os.curdir'], {}), '(os.curdir)\n', (8136, 8147), False, 'import os\n'), ((1242, 1259), 'platform.system', 'platform.system', ([], {}), '()\n', (1257, 1259), False, 'import platform\n'), ((1309, 1332), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (1327, 1332), False, 'import os\n'), ((1790, 1816), 'os.path.join', 'os.path.join', (['fldr', '"""nvcc"""'], {}), "(fldr, 'nvcc')\n", (1802, 1816), False, 'import os\n'), ((1828, 1853), 'os.path.exists', 'os.path.exists', (['cuda_path'], {}), '(cuda_path)\n', (1842, 1853), False, 'import os\n'), ((2375, 2400), 'os.path.isdir', 'os.path.isdir', (['lcuda_path'], {}), '(lcuda_path)\n', (2388, 2400), False, 'import os\n'), ((2709, 2915), 'textwrap.dedent', 'dedent', (['"""\n --------------------------------------------------------------\n Setting up CUDA ...\n --------------------------------------------------------------"""'], {}), '(\n """\n --------------------------------------------------------------\n Setting up CUDA ...\n --------------------------------------------------------------"""\n )\n', (2715, 2915), False, 'from textwrap import dedent\n'), ((3133, 3177), 'os.path.join', 'os.path.join', (['path_resources', '"""resources.py"""'], {}), "(path_resources, 'resources.py')\n", (3145, 3177), False, 'import os\n'), ((3898, 3931), 'miutil.cuinfo.nvcc_flags', 'cuinfo.nvcc_flags', (['nvml_id[devid]'], {}), '(nvml_id[devid])\n', (3915, 3931), False, 'from miutil import cuinfo\n'), ((4053, 4073), 'miutil.cuinfo.num_devices', 'cuinfo.num_devices', ([], {}), '()\n', (4071, 4073), False, 'from miutil import cuinfo\n'), ((5663, 5689), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (5679, 5689), False, 'import os\n'), ((5954, 5984), 'os.path.exists', 'os.path.exists', (['path_resources'], {}), '(path_resources)\n', (5968, 5984), False, 'import os\n'), ((5994, 6021), 'os.makedirs', 'os.makedirs', (['path_resources'], {}), '(path_resources)\n', (6005, 6021), False, 'import os\n'), ((7809, 8012), 'textwrap.dedent', 'dedent', (['"""\n --------------------------------------------------------------\n CUDA compilation\n --------------------------------------------------------------"""'], {}), '(\n """\n --------------------------------------------------------------\n CUDA compilation\n --------------------------------------------------------------"""\n )\n', (7815, 8012), False, 'from textwrap import dedent\n'), ((8043, 8068), 'os.path.isdir', 'os.path.isdir', (['path_build'], {}), '(path_build)\n', (8056, 8068), False, 'import os\n'), ((8078, 8101), 'os.makedirs', 'os.makedirs', (['path_build'], {}), '(path_build)\n', (8089, 8101), False, 'import os\n'), ((8165, 8185), 'os.chdir', 'os.chdir', (['path_build'], {}), '(path_build)\n', (8173, 8185), False, 'import os\n'), ((9888, 9910), 'os.chdir', 'os.chdir', (['path_current'], {}), '(path_current)\n', (9896, 9910), False, 'import os\n'), ((1342, 1359), 'platform.system', 'platform.system', ([], {}), '()\n', (1357, 1359), False, 'import platform\n'), ((1399, 1424), 'os.getenv', 'os.getenv', (['"""LOCALAPPDATA"""'], {}), "('LOCALAPPDATA')\n", (1408, 1424), False, 'import os\n'), ((6085, 6129), 'os.path.join', 'os.path.join', (['path_resources', '"""resources.py"""'], {}), "(path_resources, 'resources.py')\n", (6097, 6129), False, 'import os\n'), ((6158, 6207), 'os.path.join', 'os.path.join', (['path_current', '"""raw"""', '"""resources.py"""'], {}), "(path_current, 'raw', 'resources.py')\n", (6170, 6207), False, 'import os\n'), ((6977, 7008), 'sys.path.append', 'sys.path.append', (['path_resources'], {}), '(path_resources)\n', (6992, 7008), False, 'import sys\n'), ((7607, 7634), 'importlib.reload', 'importlib.reload', (['resources'], {}), '(resources)\n', (7623, 7634), False, 'import importlib\n'), ((8490, 8507), 'platform.system', 'platform.system', ([], {}), '()\n', (8505, 8507), False, 'import platform\n'), ((8905, 8939), 'subprocess.run', 'run', (['cmd'], {'stdout': 'PIPE', 'stderr': 'PIPE'}), '(cmd, stdout=PIPE, stderr=PIPE)\n', (8908, 8939), False, 'from subprocess import PIPE, run\n'), ((920, 988), 're.findall', 're.findall', (['"""envs[/\\\\\\\\](.*)[/\\\\\\\\]bin[/\\\\\\\\]python"""', 'sys.executable'], {}), "('envs[/\\\\\\\\](.*)[/\\\\\\\\]bin[/\\\\\\\\]python', sys.executable)\n", (930, 988), False, 'import re\n'), ((1895, 1921), 'os.path.dirname', 'os.path.dirname', (['cuda_path'], {}), '(cuda_path)\n', (1910, 1921), False, 'import os\n'), ((6255, 6304), 'os.path.join', 'os.path.join', (['path_current', '"""raw"""', '"""resources.py"""'], {}), "(path_current, 'raw', 'resources.py')\n", (6267, 6304), False, 'import os\n'), ((6322, 6366), 'os.path.join', 'os.path.join', (['path_resources', '"""resources.py"""'], {}), "(path_resources, 'resources.py')\n", (6334, 6366), False, 'import os\n'), ((7098, 7535), 'textwrap.dedent', 'dedent', (['""" --------------------------------------------------------------------------\n NiftyPET resources file <resources.py> could not be imported.\n It should be in ~/.niftypet/resources.py (Linux) or\n in //Users//USERNAME//AppData//Local//niftypet//resources.py (Windows)\n but likely it does not exists.\n --------------------------------------------------------------------------"""'], {}), '(\n """ --------------------------------------------------------------------------\n NiftyPET resources file <resources.py> could not be imported.\n It should be in ~/.niftypet/resources.py (Linux) or\n in //Users//USERNAME//AppData//Local//niftypet//resources.py (Windows)\n but likely it does not exists.\n --------------------------------------------------------------------------"""\n )\n', (7104, 7535), False, 'from textwrap import dedent\n'), ((9207, 9378), 'textwrap.dedent', 'dedent', (['"""\n ----------- compilation output ----------\n %s\n ------------------ end ------------------"""'], {}), '(\n """\n ----------- compilation output ----------\n %s\n ------------------ end ------------------"""\n )\n', (9213, 9378), False, 'from textwrap import dedent\n'), ((1498, 1515), 'platform.system', 'platform.system', ([], {}), '()\n', (1513, 1515), False, 'import platform\n'), ((3966, 4007), 'miutil.cuinfo.compute_capability', 'cuinfo.compute_capability', (['nvml_id[devid]'], {}), '(nvml_id[devid])\n', (3991, 4007), False, 'from miutil import cuinfo\n'), ((4201, 4221), 'miutil.cuinfo.nvcc_flags', 'cuinfo.nvcc_flags', (['i'], {}), '(i)\n', (4218, 4221), False, 'from miutil import cuinfo\n'), ((9519, 9714), 'textwrap.dedent', 'dedent', (['"""\n --------------- process errors ----------------\n %s\n --------------------- end ---------------------"""'], {}), '(\n """\n --------------- process errors ----------------\n %s\n --------------------- end ---------------------"""\n )\n', (9525, 9714), False, 'from textwrap import dedent\n'), ((3711, 3739), 'miutil.cuinfo.compute_capability', 'cuinfo.compute_capability', (['i'], {}), '(i)\n', (3736, 3739), False, 'from miutil import cuinfo\n'), ((4482, 4510), 'miutil.cuinfo.compute_capability', 'cuinfo.compute_capability', (['i'], {}), '(i)\n', (4507, 4510), False, 'from miutil import cuinfo\n'), ((4568, 4596), 'miutil.cuinfo.compute_capability', 'cuinfo.compute_capability', (['i'], {}), '(i)\n', (4593, 4596), False, 'from miutil import cuinfo\n'), ((3759, 3779), 'miutil.cuinfo.num_devices', 'cuinfo.num_devices', ([], {}), '()\n', (3777, 3779), False, 'from miutil import cuinfo\n'), ((4286, 4314), 'miutil.cuinfo.compute_capability', 'cuinfo.compute_capability', (['i'], {}), '(i)\n', (4311, 4314), False, 'from miutil import cuinfo\n')] |
__author__ = 'eric'
import utils
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
comp_fc7, props, fc7_feats, pool5_feats = utils.load_feature_db()
class_labels = np.zeros(shape=(len(props),), dtype=np.int32)
aspect_labels = np.zeros(shape=(len(props),), dtype=np.int32)
for i in range(len(props)):
class_labels[i] = props[i]['type_id']
aspect_labels[i] = props[i]['aspect_id']
classes = np.unique(class_labels)
# this is only to display the class ID
from sklearn import datasets
digits = datasets.load_digits(n_class=classes.shape[0])
labels = utils.load_db_labels()
# ----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, with_labels=True, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
if with_labels is None:
plt.scatter(X[i, 0], X[i, 1],
color=plt.cm.Set1(class_labels[i] * 1.0 / len(classes)),
s=32)
continue
elif not with_labels:
plt.text(X[i, 0], X[i, 1], str(class_labels[i]),
color=plt.cm.Set1(class_labels[i] * 1.0 / len(classes)),
fontdict={'weight': 'bold', 'size': 30})
elif with_labels:
plt.text(X[i, 0], X[i, 1], labels[class_labels[i]],
color=plt.cm.Set1(class_labels[i] * 1.0 / len(classes)),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([]), plt.yticks([])
plt.ylim([-0.1, 1.1])
plt.xlim([-0.1, 1.1])
if title is not None:
plt.title(title)
n_neighbors = 3
# model = manifold.TSNE(n_components=2, perplexity=5, random_state=0)
# model = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2, method='standard')
# model = manifold.Isomap(n_neighbors, n_components=2)
# model = manifold.SpectralEmbedding(n_components=2, random_state=0,
# eigen_solver="arpack")
# embed_X = model.fit_transform(comp_fc7)
model = lda.LDA(n_components=2)
embed_X = model.fit_transform(comp_fc7, class_labels)
plot_embedding(embed_X, with_labels=False, title='')
| [
"matplotlib.pyplot.title",
"sklearn.datasets.load_digits",
"matplotlib.pyplot.subplot",
"sklearn.lda.LDA",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.figure",
"utils.load_db_labels",
"numpy.min",
"numpy.max",
"utils.load_feature_db",
"m... | [((274, 297), 'utils.load_feature_db', 'utils.load_feature_db', ([], {}), '()\n', (295, 297), False, 'import utils\n'), ((547, 570), 'numpy.unique', 'np.unique', (['class_labels'], {}), '(class_labels)\n', (556, 570), True, 'import numpy as np\n'), ((650, 696), 'sklearn.datasets.load_digits', 'datasets.load_digits', ([], {'n_class': 'classes.shape[0]'}), '(n_class=classes.shape[0])\n', (670, 696), False, 'from sklearn import datasets\n'), ((706, 728), 'utils.load_db_labels', 'utils.load_db_labels', ([], {}), '()\n', (726, 728), False, 'import utils\n'), ((2288, 2311), 'sklearn.lda.LDA', 'lda.LDA', ([], {'n_components': '(2)'}), '(n_components=2)\n', (2295, 2311), False, 'from sklearn import manifold, datasets, decomposition, ensemble, lda, random_projection\n'), ((990, 1002), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1000, 1002), True, 'import matplotlib.pyplot as plt\n'), ((1012, 1028), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (1023, 1028), True, 'import matplotlib.pyplot as plt\n'), ((1769, 1790), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-0.1, 1.1]'], {}), '([-0.1, 1.1])\n', (1777, 1790), True, 'import matplotlib.pyplot as plt\n'), ((1795, 1816), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-0.1, 1.1]'], {}), '([-0.1, 1.1])\n', (1803, 1816), True, 'import matplotlib.pyplot as plt\n'), ((920, 932), 'numpy.min', 'np.min', (['X', '(0)'], {}), '(X, 0)\n', (926, 932), True, 'import numpy as np\n'), ((934, 946), 'numpy.max', 'np.max', (['X', '(0)'], {}), '(X, 0)\n', (940, 946), True, 'import numpy as np\n'), ((1734, 1748), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1744, 1748), True, 'import matplotlib.pyplot as plt\n'), ((1750, 1764), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1760, 1764), True, 'import matplotlib.pyplot as plt\n'), ((1851, 1867), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1860, 1867), True, 'import matplotlib.pyplot as plt\n')] |
import os, struct, math
import numpy as np
import torch
from glob import glob
import cv2
import torch.nn.functional as F
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def get_latest_file(root_dir):
"""Returns path to latest file in a directory."""
list_of_files = glob.glob(os.path.join(root_dir, '*'))
latest_file = max(list_of_files, key=os.path.getctime)
return latest_file
def parse_comma_separated_integers(string):
return list(map(int, string.split(',')))
def convert_image(img):
if not isinstance(img, np.ndarray):
img = np.array(img.cpu().detach().numpy())
img = img.squeeze()
img = img.transpose(1,2,0)
img += 1.
img /= 2.
img *= 2**8 - 1
img = img.round().clip(0, 2**8-1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def write_img(img, path):
cv2.imwrite(path, img.astype(np.uint8))
def in_out_to_param_count(in_out_tuples):
return np.sum([np.prod(in_out) + in_out[-1] for in_out in in_out_tuples])
def parse_intrinsics(filepath, trgt_sidelength=None, invert_y=False):
# Get camera intrinsics
with open(filepath, 'r') as file:
f, cx, cy, _ = map(float, file.readline().split())
grid_barycenter = torch.Tensor(list(map(float, file.readline().split())))
scale = float(file.readline())
height, width = map(float, file.readline().split())
try:
world2cam_poses = int(file.readline())
except ValueError:
world2cam_poses = None
if world2cam_poses is None:
world2cam_poses = False
world2cam_poses = bool(world2cam_poses)
if trgt_sidelength is not None:
cx = cx/width * trgt_sidelength
cy = cy/height * trgt_sidelength
f = trgt_sidelength / height * f
fx = f
if invert_y:
fy = -f
else:
fy = f
# Build the intrinsic matrices
full_intrinsic = np.array([[fx, 0., cx, 0.],
[0., fy, cy, 0],
[0., 0, 1, 0],
[0, 0, 0, 1]])
return full_intrinsic, grid_barycenter, scale, world2cam_poses
def lin2img(tensor):
batch_size, num_samples, channels = tensor.shape
sidelen = np.sqrt(num_samples).astype(int)
return tensor.permute(0,2,1).view(batch_size, channels, sidelen, sidelen)
def num_divisible_by_2(number):
i = 0
while not number%2:
number = number // 2
i += 1
return i
def cond_mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def load_pose(filename):
assert os.path.isfile(filename)
lines = open(filename).read().splitlines()
assert len(lines) == 4
lines = [[x[0],x[1],x[2],x[3]] for x in (x.split(" ") for x in lines)]
return torch.from_numpy(np.asarray(lines).astype(np.float32))
def normalize(img):
return (img - img.min()) / (img.max() - img.min())
def write_image(writer, name, img, iter):
writer.add_image(name, normalize(img.permute([0,3,1,2])), iter)
def print_network(net):
model_parameters = filter(lambda p: p.requires_grad, net.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("%d"%params)
def custom_load(model, path, discriminator=None, overwrite_embeddings=False, overwrite_renderer=False, optimizer=None):
if os.path.isdir(path):
checkpoint_path = sorted(glob(os.path.join(path, "*.pth")))[-1]
else:
checkpoint_path = path
whole_dict = torch.load(checkpoint_path)
if overwrite_embeddings:
del whole_dict['model']['latent_codes.weight']
if overwrite_renderer:
keys_to_remove = [key for key in whole_dict['model'].keys() if 'rendering_net' in key]
for key in keys_to_remove:
print(key)
whole_dict['model'].pop(key, None)
state = model.state_dict()
state.update(whole_dict['model'])
model.load_state_dict(state)
if discriminator:
discriminator.load_state_dict(whole_dict['discriminator'])
if optimizer:
optimizer.load_state_dict(whole_dict['optimizer'])
def custom_save(model, path, discriminator=None, optimizer=None):
whole_dict = {'model':model.state_dict()}
if discriminator:
whole_dict.update({'discriminator':discriminator.state_dict()})
if optimizer:
whole_dict.update({'optimizer':optimizer.state_dict()})
torch.save(whole_dict, path)
def show_images(images, titles=None):
"""Display a list of images in a single figure with matplotlib.
Parameters
---------
images: List of np.arrays compatible with plt.imshow.
cols (Default = 1): Number of columns in figure (number of rows is
set to np.ceil(n_images/float(cols))).
titles: List of titles corresponding to each image. Must have
the same length as titles.
"""
assert ((titles is None) or (len(images) == len(titles)))
cols = np.ceil(np.sqrt(len(images))).astype(int)
n_images = len(images)
if titles is None: titles = ['Image (%d)' % i for i in range(1, n_images + 1)]
fig = plt.figure()
for n, (image, title) in enumerate(zip(images, titles)):
a = fig.add_subplot(np.ceil(n_images / float(cols)), cols, n + 1)
im = a.imshow(image)
a.get_xaxis().set_visible(False)
a.get_yaxis().set_visible(False)
if len(images) < 10:
divider = make_axes_locatable(a)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(im, cax=cax, orientation='vertical')
plt.tight_layout()
# fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)
return fig
| [
"mpl_toolkits.axes_grid1.make_axes_locatable",
"os.path.join",
"os.makedirs",
"cv2.cvtColor",
"os.path.isdir",
"torch.load",
"numpy.asarray",
"os.path.exists",
"torch.save",
"os.path.isfile",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.tight_layout",
"numpy.prod",
"nump... | [((799, 835), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (811, 835), False, 'import cv2\n'), ((1944, 2022), 'numpy.array', 'np.array', (['[[fx, 0.0, cx, 0.0], [0.0, fy, cy, 0], [0.0, 0, 1, 0], [0, 0, 0, 1]]'], {}), '([[fx, 0.0, cx, 0.0], [0.0, fy, cy, 0], [0.0, 0, 1, 0], [0, 0, 0, 1]])\n', (1952, 2022), True, 'import numpy as np\n'), ((2625, 2649), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (2639, 2649), False, 'import os, struct, math\n'), ((3371, 3390), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (3384, 3390), False, 'import os, struct, math\n'), ((3523, 3550), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (3533, 3550), False, 'import torch\n'), ((4430, 4458), 'torch.save', 'torch.save', (['whole_dict', 'path'], {}), '(whole_dict, path)\n', (4440, 4458), False, 'import torch\n'), ((5140, 5152), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5150, 5152), True, 'import matplotlib.pyplot as plt\n'), ((5611, 5629), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5627, 5629), True, 'import matplotlib.pyplot as plt\n'), ((328, 355), 'os.path.join', 'os.path.join', (['root_dir', '"""*"""'], {}), "(root_dir, '*')\n", (340, 355), False, 'import os, struct, math\n'), ((2539, 2559), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2553, 2559), False, 'import os, struct, math\n'), ((2569, 2586), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (2580, 2586), False, 'import os, struct, math\n'), ((2269, 2289), 'numpy.sqrt', 'np.sqrt', (['num_samples'], {}), '(num_samples)\n', (2276, 2289), True, 'import numpy as np\n'), ((5452, 5474), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['a'], {}), '(a)\n', (5471, 5474), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((985, 1000), 'numpy.prod', 'np.prod', (['in_out'], {}), '(in_out)\n', (992, 1000), True, 'import numpy as np\n'), ((2827, 2844), 'numpy.asarray', 'np.asarray', (['lines'], {}), '(lines)\n', (2837, 2844), True, 'import numpy as np\n'), ((3430, 3457), 'os.path.join', 'os.path.join', (['path', '"""*.pth"""'], {}), "(path, '*.pth')\n", (3442, 3457), False, 'import os, struct, math\n')] |
"""Example file for testing
This creates a small testnet with ipaddresses from 192.168.0.0/24,
one switch, and three hosts.
"""
import sys, os
import io
import time
import math
import signal
import numpy as np
import fnmatch
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
try:
del os.environ["START_VEGAS"]
except KeyError:
pass
try:
del os.environ["START_PCC_CLASSIC"]
except KeyError:
pass
try:
del os.environ["START_PCC"]
except KeyError:
pass
try:
del os.environ["ONLY_ONE_FLOW"]
except KeyError:
pass
import subprocess
import virtnet
import statistics
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--bytes_to_capture', type=int, default=100)
parser.add_argument('--delay', type=int, default=100)
parser.add_argument('--rate', type=float, default=8)
parser.add_argument('--time', type=float, default=10)
parser.add_argument('--qdisc', type=str, default="fq")
parser.add_argument('--cport', type=int, default=9000)
parser.add_argument('--buffer_size', type=int, default=10)
parser.add_argument('--how_many_values_per_parameter', type=int, default=5)
parser.add_argument('--run_scenario', type=str, default="")
parser.add_argument('--store_pcaps', action='store_true')
parser.add_argument('--competing_flow', action='store_true')
parser.add_argument('--two_iperfs', action='store_true')
parser.add_argument('--only_iperf', action='store_true')
opt = parser.parse_args()
print(opt)
def run_commands(cmds, Popen=False):
if type(cmds) is not list:
cmds = [cmds]
return_stuff = []
for cmd in cmds:
if type(cmd) is tuple:
cmd, kwargs = cmd
else:
kwargs = {}
try:
print("cmd", cmd)#, "kwargs", kwargs)
if not Popen:
output = subprocess.run(cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True, **kwargs)
# print("output", output)
return_stuff.append(output)
else:
popen = subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs)
return_stuff.append(popen)
except subprocess.CalledProcessError as e:
print(e.cmd, e.returncode, e.output)
raise e
return return_stuff
# print("os.environ", os.environ)
def execute_popen_and_show_result(command, host=None):
parent = host if host is not None else subprocess
print(f"Executing{f' on host {host.name}' if host else ''}", command)
with parent.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) as cmd:
out, err = cmd.stdout.read(), cmd.stderr.read()
if out:
print("out", out.decode("utf-8"))
if err:
print("err", err.decode("utf-8"))
number_of_seconds_the_competing_flow_starts_earlier = 5
def run(vnet, prefix=""):
start_time = int(time.time() * 1000)
"Main functionality"
# print("Calculating pdf...")
# x = np.linspace(-X, X, SAMPLES)
# y = norm.pdf(x, loc=-5)+norm.pdf(x, loc=5, scale=3)
# area = np.trapz(y)*(2*X)/SAMPLES
print("Building network...")
network = vnet.Network("192.168.0.0/24")
switch = vnet.Switch("sw")
hosts = []
for i in range(2):
host = vnet.Host("host{}".format(i))
host.connect(vnet.VirtualLink, switch, "eth0")
# print("switch.interfaces", switch.interfaces)
host["eth0"].add_ip(network)
execute_popen_and_show_result("ethtool -K eth0 gro off", host)
execute_popen_and_show_result("ethtool -K eth0 gso off", host)
execute_popen_and_show_result("ethtool -K eth0 tso off", host)
hosts.append(host)
# print("host", host)
# hosts[0]["eth0"].tc('add', 'netem', delay=DELAY, jitter=SIGMA, dist=y)
# import pdb; pdb.set_trace()
# print("switch.interfaces", switch.interfaces)
for interface in switch.interfaces:
print("interface", interface)
# continue
execute_popen_and_show_result(f"ethtool -K {interface} gro off")
execute_popen_and_show_result(f"ethtool -K {interface} gso off")
execute_popen_and_show_result(f"ethtool -K {interface} tso off")
run_commands([f"tc qdisc add dev {interface} root handle 1: netem{f' delay {int(round(opt.delay/2))}ms'}", f"tc qdisc add dev {interface} parent 1: handle 2: htb default 21", f"tc class add dev {interface} parent 2: classid 2:21 htb rate {opt.rate if interface=='host10' else 100}mbit", f"tc qdisc add dev {interface} parent 2:21 handle 3: {opt.qdisc if interface=='host10' else 'fq'}{f' flow_limit {int(math.ceil(opt.buffer_size))}' if (interface=='host10' and opt.qdisc=='fq') else ''}{f' limit {int(math.ceil(opt.buffer_size))}' if (interface=='host10' and opt.qdisc=='pfifo') else ''}"])
vnet.update_hosts()
for i in range(len(hosts)):
with hosts[i].Popen("tc qdisc show dev eth0".split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) as qdisc_info:
qdisc_info_output = qdisc_info.stdout.read().decode("utf-8").split("\n")
print(f"qdisc_info_output host {i}", qdisc_info_output)
with hosts[0].Popen("ping -c 100 -i 0 host1".split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) as ping:
ping_output = ping.stdout.read().decode("utf-8").split("\n")
ping_output = [float(item.split()[-2][5:]) for item in ping_output if "time=" in item]
mean_rtt = statistics.mean(ping_output)
print("mean rtt", mean_rtt)
assert mean_rtt >= opt.delay, f"mean_rtt: {mean_rtt}, opt.delay: {opt.delay}"
protocol_for_main_flow = "tcp"
if not opt.only_iperf:
if not opt.two_iperfs:
protocol_for_main_flow = "udp"
server_popen = hosts[1].Popen(f"./app/pccserver recv {opt.cport}".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
server_popen = hosts[1].Popen("iperf3 -V -4 -s -p 5211".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if opt.competing_flow:
server_popen_iperf = hosts[1].Popen("iperf3 -V -4 -s".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
os.environ["file_name_for_logging"] = f"pcaps/{opt.qdisc}_{opt.delay}_{opt.rate}_{opt.time}_{start_time}.txt"
if opt.store_pcaps:
os.makedirs("pcaps", exist_ok=True)
tcpdump_sender_popens = []
tcpdump_receiver_popens = []
if not opt.only_iperf:
tcpdump_sender_popens.append(hosts[0].Popen(f"/usr/sbin/tcpdump -s {opt.bytes_to_capture} -i eth0 -w pcaps/sender_{prefix}_{protocol_for_main_flow}_port{opt.cport}_{opt.qdisc}_{opt.delay}_{opt.rate}_{opt.buffer_size}_{opt.time}_{start_time}.pcap dst port {opt.cport} or src port {opt.cport}".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE))
tcpdump_receiver_popens.append(hosts[1].Popen(f"/usr/sbin/tcpdump -s {opt.bytes_to_capture} -i eth0 -w pcaps/receiver_{prefix}_{protocol_for_main_flow}_port{opt.cport}_{opt.qdisc}_{opt.delay}_{opt.rate}_{opt.buffer_size}_{opt.time}_{start_time}.pcap dst port {opt.cport} or src port {opt.cport}".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE))
if opt.competing_flow:
tcpdump_sender_popens.append(hosts[0].Popen(f"/usr/sbin/tcpdump -s {opt.bytes_to_capture} -i eth0 -w pcaps/sender_{prefix}_tcp_port{opt.cport+10}_{opt.qdisc}_{opt.delay}_{opt.rate}_{opt.time}_{start_time}.pcap tcp and dst port {opt.cport+10} or src port {opt.cport+10}".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE))
tcpdump_receiver_popens.append(hosts[1].Popen(f"/usr/sbin/tcpdump -s {opt.bytes_to_capture} -i eth0 -w pcaps/receiver_{prefix}_tcp_port{opt.cport+10}_{opt.qdisc}_{opt.delay}_{opt.rate}_{opt.time}_{start_time}.pcap tcp and dst port {opt.cport+10} or src port {opt.cport+10}".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE))
if opt.competing_flow:
client_popen_iperf = hosts[0].Popen(f"iperf3 -V -4 -t {opt.time+number_of_seconds_the_competing_flow_starts_earlier} --cport {opt.cport+10} -c host1".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
time.sleep(number_of_seconds_the_competing_flow_starts_earlier)
if not opt.only_iperf:
if not opt.two_iperfs:
client_popen = hosts[0].Popen(f"./app/pccclient send host1 {opt.cport}".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# client_popen = hosts[0].Popen(f"./app/pccclient send host1 {opt.cport}", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
# client_popen = hosts[0].Popen(f"gdb --args ./app/pccclient send host1 {opt.cport}", shell=True)
else:
client_popen = hosts[0].Popen(f"iperf3 -V -4 -t {opt.time+number_of_seconds_the_competing_flow_starts_earlier} -p 5211 --cport {opt.cport} -c host1".split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# client_popen.communicate()
time.sleep(opt.time)
if not opt.only_iperf:
# print("Terminating")
print("returncode before", client_popen.returncode)
client_popen.terminate()
print("returncode after", client_popen.returncode)
# import pdb; pdb.set_trace()
out, err = client_popen.stdout.read(), client_popen.stderr.read()
if out:
print("client out", out.decode("utf-8"))
if err:
print("client err", err.decode("utf-8"))
client_out = out
if opt.competing_flow:
client_popen_iperf.terminate()
out, err = client_popen_iperf.stdout.read(), client_popen_iperf.stderr.read()
if out:
print("client iperf out", out.decode("utf-8"))
if err:
print("client iperf err", err.decode("utf-8"))
else:
client_out = b""
if not opt.only_iperf:
server_popen.terminate()
out, err = server_popen.stdout.read(), server_popen.stderr.read()
if out:
print("server out", out.decode("utf-8"))
if err:
print("server err", err.decode("utf-8"))
if opt.competing_flow:
server_popen_iperf.terminate()
out, err = server_popen_iperf.stdout.read(), server_popen_iperf.stderr.read()
if out:
print("server iperf out", out.decode("utf-8"))
if err:
print("server iperf err", err.decode("utf-8"))
if opt.store_pcaps:
for tcpdump_sender_popen in tcpdump_sender_popens:
tcpdump_sender_popen.terminate()
out, err = tcpdump_sender_popen.stdout.read(), tcpdump_sender_popen.stderr.read()
if out:
print("tcpdump out", out.decode("utf-8"))
if err:
print("tcpdump err", err.decode("utf-8"))
for tcpdump_receiver_popen in tcpdump_receiver_popens:
tcpdump_receiver_popen.terminate()
out, err = tcpdump_receiver_popen.stdout.read(), tcpdump_receiver_popen.stderr.read()
if out:
print("tcpdump out", out.decode("utf-8"))
if err:
print("tcpdump err", err.decode("utf-8"))
subprocess.check_output("chmod -R o+rw pcaps".split())
return client_out.decode("utf-8"), start_time
if opt.run_scenario == "":
with virtnet.Manager() as context:
run(context)
elif opt.run_scenario == "accuracy":
import sklearn.metrics
results_dict = {}
for bw_index, bw in enumerate(np.linspace(5,50,opt.how_many_values_per_parameter)):
for delay_index, delay in enumerate(np.linspace(10,100,opt.how_many_values_per_parameter)):
for buffer_index, buffer in enumerate(np.linspace(1,100,opt.how_many_values_per_parameter)):
for fq_index, fq in enumerate([False, True]):
opt.rate = int(round(bw))
opt.delay = int(round(delay))
opt.buffer_size = int(round(buffer))
opt.qdisc = "fq" if fq else "pfifo"
opt.time = 10
with virtnet.Manager() as context:
client_output, timestamp = run(context, "accuracy")
assert client_output != ""
contained_vegas = "Starting Vegas" in client_output
contained_pcc = "Starting PCC Classic" in client_output
results_dict[(bw, delay, buffer, fq)] = (contained_vegas, contained_pcc)
invalids = []
false_predictions = []
predictions = []
for (bw, delay, buffer, fq), (is_vegas, is_pcc) in results_dict.items():
is_invalid = (not is_vegas and not is_pcc)
if is_invalid:
invalids.append(((bw, delay, buffer, fq), (is_vegas, is_pcc)))
if not is_invalid:
predictions.append((fq, is_vegas))
if fq != is_vegas:
false_predictions.append(((bw, delay, buffer, fq), is_vegas))
print("invalids", len(invalids), "total", len(results_dict))
print("invalids", invalids)
confusion_matrix_input = list(zip(*predictions))
accuracy_score = sklearn.metrics.accuracy_score(*confusion_matrix_input)
print("accuracy_score", accuracy_score)
confusion_matrix = sklearn.metrics.confusion_matrix(*confusion_matrix_input)
print("confusion_matrix", confusion_matrix)
print("false_predictions", false_predictions)
elif opt.run_scenario == "evaluation":
results_dict = {}
opt.store_pcaps = True
for bw_index, bw in enumerate(np.linspace(5,50,opt.how_many_values_per_parameter)):
for delay_index, delay in enumerate(np.linspace(10,100,opt.how_many_values_per_parameter)):
for buffer_index, buffer in enumerate(np.linspace(1,100,opt.how_many_values_per_parameter)):
fq = True
opt.rate = int(round(bw))
opt.delay = int(round(delay))
opt.buffer_size = int(round(buffer))
opt.qdisc = "fq" if fq else "pfifo"
opt.time = 30
with virtnet.Manager() as context:
client_output, timestamp = run(context, "accuracy")
contained_vegas = "Starting Vegas" in client_output
assert opt.store_pcaps
files = []
for file in os.listdir('pcaps'):
if fnmatch.fnmatch(file, f'sender_*{timestamp}.pcap'):
files.append(file)
assert len(files) == 1, len(files)
command = f"python3 ./plot_rtt_and_bandwidth.py {files[0]} no_plotting"
# print("command", command)
output = subprocess.check_output(command.split())
# print("parsing output", output)
output_lines = output.decode("utf-8").split("\n")[:2]
throughput = float(output_lines[0].split(" ")[-1])
rtt = float(output_lines[1].split(" ")[-1])
print("throughput", throughput, "rtt", rtt)
results_dict[(bw, delay, buffer)] = (throughput, rtt, contained_vegas)
all_throughputs, all_delays, contained_vegas = zip(*results_dict.values())
print("total len", len(all_throughputs))
print("mean throughput", statistics.mean(all_throughputs), "stdev throughput", statistics.stdev(all_throughputs))
print("mean rtt", statistics.mean(all_delays), "stdev rtt", statistics.stdev(all_delays))
print("detection accuracy", sum(contained_vegas)/len(contained_vegas))
elif opt.run_scenario == "competing_flow":
opt.competing_flow = True
opt.time = 20
opt.store_pcaps = True
opt.buffer_size = 100
opt.rate = 50
opt.delay = 10
try:
del os.environ["START_VEGAS"]
except KeyError:
pass
try:
del os.environ["START_PCC_CLASSIC"]
except KeyError:
pass
try:
del os.environ["START_PCC"]
except KeyError:
pass
print("Starting fq experiment")
opt.qdisc = "fq"
opt.two_iperfs = False
with virtnet.Manager() as context:
client_output, timestamp = run(context, "competing_flow_fq")
try:
del os.environ["START_VEGAS"]
except KeyError:
pass
try:
del os.environ["START_PCC_CLASSIC"]
except KeyError:
pass
try:
del os.environ["START_PCC"]
except KeyError:
pass
opt.two_iperfs = True
print("Starting pfifo experiment")
opt.qdisc = "pfifo"
with virtnet.Manager() as context:
client_output, timestamp = run(context, "competing_flow_pfifo")
try:
del os.environ["START_VEGAS"]
except KeyError:
pass
try:
del os.environ["START_PCC_CLASSIC"]
except KeyError:
pass
try:
del os.environ["START_PCC"]
except KeyError:
pass
# os.environ["START_PCC_CLASSIC"] = "1"
opt.two_iperfs = True
# print("Starting fq experiment with PCC_CLASSIC")
print("Starting fq experiment with Cubic")
opt.qdisc = "fq"
with virtnet.Manager() as context:
client_output, timestamp = run(context, "competing_flow_fq_pcc")
try:
del os.environ["START_VEGAS"]
except KeyError:
pass
try:
del os.environ["START_PCC_CLASSIC"]
except KeyError:
pass
try:
del os.environ["START_PCC"]
except KeyError:
pass
opt.two_iperfs = False
os.environ["START_VEGAS"] = "1"
opt.qdisc = "pfifo"
print("Starting pfifo experiment with VEGAS")
with virtnet.Manager() as context:
client_output, timestamp = run(context, "competing_flow_pfifo_vegas")
elif opt.run_scenario == "just_one_flow":
opt.time = 20
opt.store_pcaps = True
opt.buffer_size = 100
opt.rate = 10
opt.delay = 10
os.environ["ONLY_ONE_FLOW"] = "1"
print("ours experiment")
opt.qdisc = "fq"
with virtnet.Manager() as context:
client_output, timestamp = run(context, "just_one_flow_vegas")
print("cubic experiment")
opt.qdisc = "fq"
opt.only_iperf = True
opt.competing_flow = True
with virtnet.Manager() as context:
client_output, timestamp = run(context, "just_one_flow_cubic") | [
"os.makedirs",
"argparse.ArgumentParser",
"math.ceil",
"os.path.dirname",
"statistics.stdev",
"time.sleep",
"time.time",
"statistics.mean",
"numpy.linspace",
"fnmatch.fnmatch",
"os.listdir",
"virtnet.Manager"
] | [((630, 655), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (653, 655), False, 'import argparse\n'), ((8439, 8459), 'time.sleep', 'time.sleep', (['opt.time'], {}), '(opt.time)\n', (8449, 8459), False, 'import time\n'), ((5102, 5130), 'statistics.mean', 'statistics.mean', (['ping_output'], {}), '(ping_output)\n', (5117, 5130), False, 'import statistics\n'), ((5903, 5938), 'os.makedirs', 'os.makedirs', (['"""pcaps"""'], {'exist_ok': '(True)'}), "('pcaps', exist_ok=True)\n", (5914, 5938), False, 'import sys, os\n'), ((7689, 7752), 'time.sleep', 'time.sleep', (['number_of_seconds_the_competing_flow_starts_earlier'], {}), '(number_of_seconds_the_competing_flow_starts_earlier)\n', (7699, 7752), False, 'import time\n'), ((10447, 10464), 'virtnet.Manager', 'virtnet.Manager', ([], {}), '()\n', (10462, 10464), False, 'import virtnet\n'), ((278, 303), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (293, 303), False, 'import sys, os\n'), ((2704, 2715), 'time.time', 'time.time', ([], {}), '()\n', (2713, 2715), False, 'import time\n'), ((10603, 10656), 'numpy.linspace', 'np.linspace', (['(5)', '(50)', 'opt.how_many_values_per_parameter'], {}), '(5, 50, opt.how_many_values_per_parameter)\n', (10614, 10656), True, 'import numpy as np\n'), ((10695, 10750), 'numpy.linspace', 'np.linspace', (['(10)', '(100)', 'opt.how_many_values_per_parameter'], {}), '(10, 100, opt.how_many_values_per_parameter)\n', (10706, 10750), True, 'import numpy as np\n'), ((12341, 12394), 'numpy.linspace', 'np.linspace', (['(5)', '(50)', 'opt.how_many_values_per_parameter'], {}), '(5, 50, opt.how_many_values_per_parameter)\n', (12352, 12394), True, 'import numpy as np\n'), ((13751, 13783), 'statistics.mean', 'statistics.mean', (['all_throughputs'], {}), '(all_throughputs)\n', (13766, 13783), False, 'import statistics\n'), ((13805, 13838), 'statistics.stdev', 'statistics.stdev', (['all_throughputs'], {}), '(all_throughputs)\n', (13821, 13838), False, 'import statistics\n'), ((13859, 13886), 'statistics.mean', 'statistics.mean', (['all_delays'], {}), '(all_delays)\n', (13874, 13886), False, 'import statistics\n'), ((13901, 13929), 'statistics.stdev', 'statistics.stdev', (['all_delays'], {}), '(all_delays)\n', (13917, 13929), False, 'import statistics\n'), ((10792, 10846), 'numpy.linspace', 'np.linspace', (['(1)', '(100)', 'opt.how_many_values_per_parameter'], {}), '(1, 100, opt.how_many_values_per_parameter)\n', (10803, 10846), True, 'import numpy as np\n'), ((12433, 12488), 'numpy.linspace', 'np.linspace', (['(10)', '(100)', 'opt.how_many_values_per_parameter'], {}), '(10, 100, opt.how_many_values_per_parameter)\n', (12444, 12488), True, 'import numpy as np\n'), ((14443, 14460), 'virtnet.Manager', 'virtnet.Manager', ([], {}), '()\n', (14458, 14460), False, 'import virtnet\n'), ((14816, 14833), 'virtnet.Manager', 'virtnet.Manager', ([], {}), '()\n', (14831, 14833), False, 'import virtnet\n'), ((15290, 15307), 'virtnet.Manager', 'virtnet.Manager', ([], {}), '()\n', (15305, 15307), False, 'import virtnet\n'), ((15712, 15729), 'virtnet.Manager', 'virtnet.Manager', ([], {}), '()\n', (15727, 15729), False, 'import virtnet\n'), ((12530, 12584), 'numpy.linspace', 'np.linspace', (['(1)', '(100)', 'opt.how_many_values_per_parameter'], {}), '(1, 100, opt.how_many_values_per_parameter)\n', (12541, 12584), True, 'import numpy as np\n'), ((12975, 12994), 'os.listdir', 'os.listdir', (['"""pcaps"""'], {}), "('pcaps')\n", (12985, 12994), False, 'import sys, os\n'), ((16038, 16055), 'virtnet.Manager', 'virtnet.Manager', ([], {}), '()\n', (16053, 16055), False, 'import virtnet\n'), ((16235, 16252), 'virtnet.Manager', 'virtnet.Manager', ([], {}), '()\n', (16250, 16252), False, 'import virtnet\n'), ((11076, 11093), 'virtnet.Manager', 'virtnet.Manager', ([], {}), '()\n', (11091, 11093), False, 'import virtnet\n'), ((12772, 12789), 'virtnet.Manager', 'virtnet.Manager', ([], {}), '()\n', (12787, 12789), False, 'import virtnet\n'), ((13004, 13054), 'fnmatch.fnmatch', 'fnmatch.fnmatch', (['file', 'f"""sender_*{timestamp}.pcap"""'], {}), "(file, f'sender_*{timestamp}.pcap')\n", (13019, 13054), False, 'import fnmatch\n'), ((4322, 4348), 'math.ceil', 'math.ceil', (['opt.buffer_size'], {}), '(opt.buffer_size)\n', (4331, 4348), False, 'import math\n'), ((4420, 4446), 'math.ceil', 'math.ceil', (['opt.buffer_size'], {}), '(opt.buffer_size)\n', (4429, 4446), False, 'import math\n')] |
import matplotlib.pyplot as plt
import os
import numpy as np
from argparse import ArgumentParser
from functools import partial
from scipy import stats
from collections import namedtuple, OrderedDict, defaultdict
from typing import Any, Dict, List, Optional, DefaultDict
from adaptiveleak.utils.constants import POLICIES, ENCODING
from adaptiveleak.utils.file_utils import read_json_gz
from adaptiveleak.analysis.plot_utils import COLORS, to_label, geometric_mean, MARKER, MARKER_SIZE, LINE_WIDTH, PLOT_STYLE
from adaptiveleak.analysis.plot_utils import PLOT_SIZE, AXIS_FONT, LEGEND_FONT, TITLE_FONT
from adaptiveleak.analysis.plot_utils import extract_results, iterate_policy_folders, dataset_label
def aggregate_for_collect_level(sim_results: Dict[str, Dict[str, Dict[float, float]]]) -> Dict[str, float]:
model_results: DefaultDict[str, List[float]] = defaultdict(list)
for dataset_name, dataset_results in sim_results.items():
# Get the baseline results, list of error values
uniform_results = dataset_results['uniform_standard']
for policy_name, policy_results in dataset_results.items():
# Compute the average normalized error for this data-set
norm_errors = [(policy_results[r] - uniform_results[r]) for r in sorted(policy_results.keys())]
avg_norm_error = np.average(norm_errors)
model_results[policy_name].append(avg_norm_error)
result: Dict[str, float] = dict()
for policy_name, norm_errors in model_results.items():
if 'unshifted' not in policy_name:
result[policy_name] = np.average(norm_errors)
return result
def plot(level_results: Dict[str, Dict[str, float]], levels: List[str], output_file: Optional[str]):
with plt.style.context(PLOT_STYLE):
fig, ax = plt.subplots(figsize=PLOT_SIZE)
xs = list(range(len(levels)))
for policy_name, policy_results in level_results.items():
norm_errors: List[float] = [level_results[policy_name][level] for level in levels]
ax.plot(xs, norm_errors, marker=MARKER, linewidth=LINE_WIDTH, markersize=MARKER_SIZE, label=to_label(policy_name), color=COLORS[policy_name])
#print(' & '.join(labels))
#print(' & '.join((('{0:.5f}'.format(x) if i != min_error else '\\textbf{{{0:.5f}}}'.format(x)) for i, x in enumerate(agg_errors))))
ax.set_xlabel('Collect Energy Level', fontsize=AXIS_FONT)
ax.set_ylabel('Avg MAE Normalized to Uniform', fontsize=AXIS_FONT)
ax.set_title('Average Reconstruction Error for Collection Energy Levels', fontsize=TITLE_FONT)
ax.legend(fontsize=LEGEND_FONT)
if output_file is None:
plt.show()
else:
plt.savefig(output_file, bbox_inches='tight')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--levels', type=str, nargs='+', required=True)
parser.add_argument('--datasets', type=str, nargs='+', required=True)
parser.add_argument('--output-file', type=str)
args = parser.parse_args()
extract_fn = partial(extract_results, field='mae', aggregate_mode=None)
level_results: DefaultDict[str, Dict[str, float]] = defaultdict(dict)
for level in args.levels:
dataset_results: Dict[str, Dict[str, Dict[float, float]]] = dict()
for dataset_name in args.datasets:
policy_folders = list(iterate_policy_folders([level], dataset=dataset_name))
sim_results = {name: res for name, res in map(extract_fn, policy_folders)}
dataset_results[dataset_name] = sim_results
agg_results = aggregate_for_collect_level(dataset_results)
for policy_name, error in agg_results.items():
level_results[policy_name][level] = error
plot(level_results, levels=args.levels, output_file=args.output_file)
| [
"functools.partial",
"numpy.average",
"argparse.ArgumentParser",
"matplotlib.pyplot.show",
"adaptiveleak.analysis.plot_utils.iterate_policy_folders",
"matplotlib.pyplot.style.context",
"collections.defaultdict",
"adaptiveleak.analysis.plot_utils.to_label",
"matplotlib.pyplot.subplots",
"matplotlib... | [((862, 879), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (873, 879), False, 'from collections import namedtuple, OrderedDict, defaultdict\n'), ((2849, 2865), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (2863, 2865), False, 'from argparse import ArgumentParser\n'), ((3112, 3170), 'functools.partial', 'partial', (['extract_results'], {'field': '"""mae"""', 'aggregate_mode': 'None'}), "(extract_results, field='mae', aggregate_mode=None)\n", (3119, 3170), False, 'from functools import partial\n'), ((3228, 3245), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (3239, 3245), False, 'from collections import namedtuple, OrderedDict, defaultdict\n'), ((1772, 1801), 'matplotlib.pyplot.style.context', 'plt.style.context', (['PLOT_STYLE'], {}), '(PLOT_STYLE)\n', (1789, 1801), True, 'import matplotlib.pyplot as plt\n'), ((1821, 1852), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'PLOT_SIZE'}), '(figsize=PLOT_SIZE)\n', (1833, 1852), True, 'import matplotlib.pyplot as plt\n'), ((1351, 1374), 'numpy.average', 'np.average', (['norm_errors'], {}), '(norm_errors)\n', (1361, 1374), True, 'import numpy as np\n'), ((1616, 1639), 'numpy.average', 'np.average', (['norm_errors'], {}), '(norm_errors)\n', (1626, 1639), True, 'import numpy as np\n'), ((2716, 2726), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2724, 2726), True, 'import matplotlib.pyplot as plt\n'), ((2753, 2798), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output_file'], {'bbox_inches': '"""tight"""'}), "(output_file, bbox_inches='tight')\n", (2764, 2798), True, 'import matplotlib.pyplot as plt\n'), ((3438, 3491), 'adaptiveleak.analysis.plot_utils.iterate_policy_folders', 'iterate_policy_folders', (['[level]'], {'dataset': 'dataset_name'}), '([level], dataset=dataset_name)\n', (3460, 3491), False, 'from adaptiveleak.analysis.plot_utils import extract_results, iterate_policy_folders, dataset_label\n'), ((2158, 2179), 'adaptiveleak.analysis.plot_utils.to_label', 'to_label', (['policy_name'], {}), '(policy_name)\n', (2166, 2179), False, 'from adaptiveleak.analysis.plot_utils import COLORS, to_label, geometric_mean, MARKER, MARKER_SIZE, LINE_WIDTH, PLOT_STYLE\n')] |
import os
import numpy as np
import math
import rasterio.features
import shapely.ops
import shapely.wkt
import shapely.geometry
import pandas as pd
import cv2
from scipy import ndimage as ndi
from skimage.morphology import watershed
from tqdm import tqdm
from fire import Fire
import matplotlib.pyplot as plt
import shutil
from shapely.geometry import shape
from shapely.geometry import Polygon
import geopandas as gpd
from multiprocessing.pool import Pool
from multiprocessing import cpu_count
def calculate_iou(pred_poly, test_data_GDF):
"""Get the best intersection over union for a predicted polygon.
Adapted from: https://github.com/CosmiQ/solaris/blob/master/solaris/eval/iou.py, but
keeps index of test_data_GDF
Arguments
---------
pred_poly : :py:class:`shapely.Polygon`
Prediction polygon to test.
test_data_GDF : :py:class:`geopandas.GeoDataFrame`
GeoDataFrame of ground truth polygons to test ``pred_poly`` against.
Returns
-------
iou_GDF : :py:class:`geopandas.GeoDataFrame`
A subset of ``test_data_GDF`` that overlaps ``pred_poly`` with an added
column ``iou_score`` which indicates the intersection over union value.
"""
# Fix bowties and self-intersections
if not pred_poly.is_valid:
pred_poly = pred_poly.buffer(0.0)
precise_matches = test_data_GDF[test_data_GDF.intersects(pred_poly)]
iou_row_list = []
for idx, row in precise_matches.iterrows():
# Load ground truth polygon and check exact iou
test_poly = row.geometry
# Ignore invalid polygons for now
if pred_poly.is_valid and test_poly.is_valid:
intersection = pred_poly.intersection(test_poly).area
union = pred_poly.union(test_poly).area
# Calculate iou
iou_score = intersection / float(union)
gt_idx = idx
else:
iou_score = 0
gt_idx = -1
row['iou_score'] = iou_score
row['gt_idx'] = gt_idx
iou_row_list.append(row)
iou_GDF = gpd.GeoDataFrame(iou_row_list)
return iou_GDF
def map_wrapper(x):
'''For multi-threading'''
return x[0](*(x[1:]))
def track_footprint_identifiers(json_dir, out_dir,
min_iou=0.25, iou_field='iou_score', id_field='Id',
reverse_order=False,
verbose=True, super_verbose=False):
'''
Track footprint identifiers in the deep time stack.
We need to track the global gdf instead of just the gdf of t-1.
'''
os.makedirs(out_dir, exist_ok=True)
# set columns for master gdf
gdf_master_columns = [id_field, iou_field, 'area', 'geometry']
json_files = sorted([f
for f in os.listdir(os.path.join(json_dir))
if f.endswith('.geojson') and os.path.exists(os.path.join(json_dir, f))])
# start at the end and work backwards?
if reverse_order:
json_files = json_files[::-1]
# check if only partical matching has been done (this will cause errors)
out_files_tmp = sorted([z for z in os.listdir(out_dir) if z.endswith('.geojson')])
if len(out_files_tmp) > 0:
if len(out_files_tmp) != len(json_files):
raise Exception("\nError in:", out_dir, "with N =", len(out_files_tmp),
"files, need to purge this folder and restart matching!\n")
return
elif len(out_files_tmp) == len(json_files):
print("\nDir:", os.path.basename(out_dir), "N files:", len(json_files),
"directory matching completed, skipping...")
return
else:
print("\nMatching json_dir: ", os.path.basename(json_dir), "N json:", len(json_files))
gdf_dict = {}
for j, f in enumerate(json_files):
name_root = f.split('.')[0]
json_path = os.path.join(json_dir, f)
output_path = os.path.join(out_dir, f)
if verbose and ((j % 1) == 0):
print(" ", j, "/", len(json_files), "for", os.path.basename(json_dir), "=", name_root)
# gdf
gdf_now = gpd.read_file(json_path)
# drop value if it exists
# gdf_now = gdf_now.drop(columns=['value'])
# get area
gdf_now['area'] = gdf_now['geometry'].area
# initialize iou, id
gdf_now[iou_field] = -1
gdf_now[id_field] = -1
# sort by reverse area
gdf_now.sort_values(by=['area'], ascending=False, inplace=True)
gdf_now = gdf_now.reset_index(drop=True)
# reorder columns (if needed)
gdf_now = gdf_now[gdf_master_columns]
id_set = set([])
if verbose:
print("\n")
print("", j, "file_name:", f)
print(" ", "gdf_now.columns:", gdf_now.columns)
if j == 0:
# Establish initial footprints at Epoch0
# set id
gdf_now[id_field] = gdf_now.index.values
gdf_now[iou_field] = 0
n_new = len(gdf_now)
n_matched = 0
id_set = set(gdf_now[id_field].values)
gdf_master_Out = gdf_now.copy(deep=True)
# gdf_dict[f] = gdf_now
else:
# match buildings in epochT to epochT-1
# see: https://github.com/CosmiQ/solaris/blob/master/solaris/eval/base.py
# print("gdf_master;", gdf_dict['master']) #gdf_master)
gdf_master_Out = gdf_dict['master'].copy(deep=True)
gdf_master_Edit = gdf_dict['master'].copy(deep=True)
if verbose:
print(" len gdf_now:", len(gdf_now), "len(gdf_master):", len(gdf_master_Out),
"max master id:", np.max(gdf_master_Out[id_field]))
print(" gdf_master_Edit.columns:", gdf_master_Edit.columns)
new_id = np.max(gdf_master_Edit[id_field]) + 1
# if verbose:
# print("new_id:", new_id)
idx = 0
n_new = 0
n_matched = 0
for pred_idx, pred_row in gdf_now.iterrows():
if verbose:
if (idx % 1000) == 0:
print(" ", name_root, idx, "/", len(gdf_now))
if super_verbose:
# print(" ", i, j, idx, "/", len(gdf_now))
print(" ", idx, "/", len(gdf_now))
idx += 1
pred_poly = pred_row.geometry
# if super_verbose:
# print(" pred_poly.exterior.coords:", list(pred_poly.exterior.coords))
# get iou overlap
iou_GDF = calculate_iou(pred_poly, gdf_master_Edit)
# iou_GDF = iou.calculate_iou(pred_poly, gdf_master_Edit)
# print("iou_GDF:", iou_GDF)
# Get max iou
if not iou_GDF.empty:
max_iou_row = iou_GDF.loc[iou_GDF['iou_score'].idxmax(axis=0, skipna=True)]
# sometimes we are get an erroneous id of 0, caused by nan area,
# so check for this
max_area = max_iou_row.geometry.area
if max_area == 0 or math.isnan(max_area):
# print("nan area!", max_iou_row, "returning...")
raise Exception("\n Nan area!:", max_iou_row, "returning...")
return
id_match = max_iou_row[id_field]
if id_match in id_set:
print("Already seen id! returning...")
raise Exception("\n Already seen id!", id_match, "returning...")
return
# print("iou_GDF:", iou_GDF)
if max_iou_row['iou_score'] >= min_iou:
if super_verbose:
print(" pred_idx:", pred_idx, "match_id:", max_iou_row[id_field],
"max iou:", max_iou_row['iou_score'])
# we have a successful match, so set iou, and id
gdf_now.loc[pred_row.name, iou_field] = max_iou_row['iou_score']
gdf_now.loc[pred_row.name, id_field] = id_match
# drop matched polygon in ground truth
gdf_master_Edit = gdf_master_Edit.drop(max_iou_row.name, axis=0)
n_matched += 1
# # update gdf_master geometry?
# # Actually let's leave the geometry the same so it doesn't move around...
# gdf_master_Out.at[max_iou_row['gt_idx'], 'geometry'] = pred_poly
# gdf_master_Out.at[max_iou_row['gt_idx'], 'area'] = pred_poly.area
# gdf_master_Out.at[max_iou_row['gt_idx'], iou_field] = max_iou_row['iou_score']
else:
# no match,
if super_verbose:
print(" Minimal match! - pred_idx:", pred_idx, "match_id:",
max_iou_row[id_field], "max iou:", max_iou_row['iou_score'])
print(" Using new id:", new_id)
if (new_id in id_set) or (new_id == 0):
raise Exception("trying to add an id that already exists, returning!")
return
gdf_now.loc[pred_row.name, iou_field] = 0
gdf_now.loc[pred_row.name, id_field] = new_id
id_set.add(new_id)
# update master, cols = [id_field, iou_field, 'area', 'geometry']
gdf_master_Out.loc[new_id] = [new_id, 0, pred_poly.area, pred_poly]
new_id += 1
n_new += 1
else:
# no match (same exact code as right above)
if super_verbose:
print(" pred_idx:", pred_idx, "no overlap, new_id:", new_id)
if (new_id in id_set) or (new_id == 0):
raise Exception("trying to add an id that already exists, returning!")
return
gdf_now.loc[pred_row.name, iou_field] = 0
gdf_now.loc[pred_row.name, id_field] = new_id
id_set.add(new_id)
# update master, cols = [id_field, iou_field, 'area', 'geometry']
gdf_master_Out.loc[new_id] = [new_id, 0, pred_poly.area, pred_poly]
new_id += 1
n_new += 1
# print("gdf_now:", gdf_now)
gdf_dict[f] = gdf_now
gdf_dict['master'] = gdf_master_Out
# save!
if len(gdf_now) > 0:
gdf_now.to_file(output_path, driver="GeoJSON")
else:
print("Empty dataframe, writing empty gdf", output_path)
open(output_path, 'a').close()
if verbose:
print(" ", "N_new, N_matched:", n_new, n_matched)
return
pred_top_dir = '/wdata/'
# min_iou = 0.3
min_iou = 0.1
iou_field = 'iou_score'
id_field = 'Id'
reverse_order = False
verbose = True
super_verbose = False
n_threads = cpu_count()
json_dir_name = 'jsons_predicts/'
out_dir_name = 'pred_jsons_match/'
if os.path.exists('/wdata/pred_jsons_match/'):
shutil.rmtree('/wdata/pred_jsons_match/')
os.mkdir('/wdata/pred_jsons_match/')
aois = os.listdir('/wdata/jsons_predicts/')
print("aois:", aois)
print("Gather data for matching...")
params = []
for aoi in aois:
print(aoi)
json_dir = os.path.join(pred_top_dir, json_dir_name, aoi, )
out_dir = os.path.join(pred_top_dir, out_dir_name, aoi)
# check if we started matching...
if os.path.exists(out_dir):
# print(" outdir exists:", outdir)
json_files = sorted([f
for f in os.listdir(os.path.join(json_dir))
if f.endswith('.geojson') and os.path.exists(os.path.join(json_dir, f))])
out_files_tmp = sorted([z for z in os.listdir(out_dir) if z.endswith('.geojson')])
if len(out_files_tmp) > 0:
if len(out_files_tmp) == len(json_files):
print("Dir:", os.path.basename(out_dir), "N files:", len(json_files),
"directory matching completed, skipping...")
continue
elif len(out_files_tmp) != len(json_files):
# raise Exception("Incomplete matching in:", out_dir, "with N =", len(out_files_tmp),
# "files (should have N_gt =",
# len(json_files), "), need to purge this folder and restart matching!")
print("Incomplete matching in:", out_dir, "with N =", len(out_files_tmp),
"files (should have N_gt =",
len(json_files), "), purging this folder and restarting matching!")
purge_cmd = 'rm -r ' + out_dir
print(" purge_cmd:", purge_cmd)
if len(out_dir) > 20:
purge_cmd = 'rm -r ' + out_dir
else:
raise Exception("out_dir too short, maybe deleting something unintentionally...")
break
os.system(purge_cmd)
else:
pass
# track_footprint_identifiers(json_dir, out_dir, min_iou, iou_field, id_field, reverse_order, verbose, super_verbose)
params.append([track_footprint_identifiers, json_dir, out_dir, min_iou,
iou_field, id_field, reverse_order, verbose, super_verbose])
# print(params[0])
print("Len params:", len(params))
n_cpus = cpu_count()
pool = Pool(n_cpus)
for _ in tqdm(pool.imap_unordered(map_wrapper, params), total=len(params)):
pass | [
"os.mkdir",
"math.isnan",
"os.makedirs",
"os.path.basename",
"os.path.exists",
"os.system",
"geopandas.GeoDataFrame",
"numpy.max",
"geopandas.read_file",
"multiprocessing.pool.Pool",
"shutil.rmtree",
"os.path.join",
"os.listdir",
"multiprocessing.cpu_count"
] | [((11288, 11299), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (11297, 11299), False, 'from multiprocessing import cpu_count\n'), ((11373, 11415), 'os.path.exists', 'os.path.exists', (['"""/wdata/pred_jsons_match/"""'], {}), "('/wdata/pred_jsons_match/')\n", (11387, 11415), False, 'import os\n'), ((11463, 11499), 'os.mkdir', 'os.mkdir', (['"""/wdata/pred_jsons_match/"""'], {}), "('/wdata/pred_jsons_match/')\n", (11471, 11499), False, 'import os\n'), ((11508, 11544), 'os.listdir', 'os.listdir', (['"""/wdata/jsons_predicts/"""'], {}), "('/wdata/jsons_predicts/')\n", (11518, 11544), False, 'import os\n'), ((13765, 13776), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (13774, 13776), False, 'from multiprocessing import cpu_count\n'), ((13784, 13796), 'multiprocessing.pool.Pool', 'Pool', (['n_cpus'], {}), '(n_cpus)\n', (13788, 13796), False, 'from multiprocessing.pool import Pool\n'), ((2065, 2095), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['iou_row_list'], {}), '(iou_row_list)\n', (2081, 2095), True, 'import geopandas as gpd\n'), ((2595, 2630), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (2606, 2630), False, 'import os\n'), ((11421, 11462), 'shutil.rmtree', 'shutil.rmtree', (['"""/wdata/pred_jsons_match/"""'], {}), "('/wdata/pred_jsons_match/')\n", (11434, 11462), False, 'import shutil\n'), ((11664, 11710), 'os.path.join', 'os.path.join', (['pred_top_dir', 'json_dir_name', 'aoi'], {}), '(pred_top_dir, json_dir_name, aoi)\n', (11676, 11710), False, 'import os\n'), ((11727, 11772), 'os.path.join', 'os.path.join', (['pred_top_dir', 'out_dir_name', 'aoi'], {}), '(pred_top_dir, out_dir_name, aoi)\n', (11739, 11772), False, 'import os\n'), ((11819, 11842), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (11833, 11842), False, 'import os\n'), ((3906, 3931), 'os.path.join', 'os.path.join', (['json_dir', 'f'], {}), '(json_dir, f)\n', (3918, 3931), False, 'import os\n'), ((3954, 3978), 'os.path.join', 'os.path.join', (['out_dir', 'f'], {}), '(out_dir, f)\n', (3966, 3978), False, 'import os\n'), ((4152, 4176), 'geopandas.read_file', 'gpd.read_file', (['json_path'], {}), '(json_path)\n', (4165, 4176), True, 'import geopandas as gpd\n'), ((3735, 3761), 'os.path.basename', 'os.path.basename', (['json_dir'], {}), '(json_dir)\n', (3751, 3761), False, 'import os\n'), ((3148, 3167), 'os.listdir', 'os.listdir', (['out_dir'], {}), '(out_dir)\n', (3158, 3167), False, 'import os\n'), ((4075, 4101), 'os.path.basename', 'os.path.basename', (['json_dir'], {}), '(json_dir)\n', (4091, 4101), False, 'import os\n'), ((5859, 5892), 'numpy.max', 'np.max', (['gdf_master_Edit[id_field]'], {}), '(gdf_master_Edit[id_field])\n', (5865, 5892), True, 'import numpy as np\n'), ((2805, 2827), 'os.path.join', 'os.path.join', (['json_dir'], {}), '(json_dir)\n', (2817, 2827), False, 'import os\n'), ((3548, 3573), 'os.path.basename', 'os.path.basename', (['out_dir'], {}), '(out_dir)\n', (3564, 3573), False, 'import os\n'), ((5725, 5757), 'numpy.max', 'np.max', (['gdf_master_Out[id_field]'], {}), '(gdf_master_Out[id_field])\n', (5731, 5757), True, 'import numpy as np\n'), ((12138, 12157), 'os.listdir', 'os.listdir', (['out_dir'], {}), '(out_dir)\n', (12148, 12157), False, 'import os\n'), ((12305, 12330), 'os.path.basename', 'os.path.basename', (['out_dir'], {}), '(out_dir)\n', (12321, 12330), False, 'import os\n'), ((13361, 13381), 'os.system', 'os.system', (['purge_cmd'], {}), '(purge_cmd)\n', (13370, 13381), False, 'import os\n'), ((2899, 2924), 'os.path.join', 'os.path.join', (['json_dir', 'f'], {}), '(json_dir, f)\n', (2911, 2924), False, 'import os\n'), ((7206, 7226), 'math.isnan', 'math.isnan', (['max_area'], {}), '(max_area)\n', (7216, 7226), False, 'import math\n'), ((11968, 11990), 'os.path.join', 'os.path.join', (['json_dir'], {}), '(json_dir)\n', (11980, 11990), False, 'import os\n'), ((12066, 12091), 'os.path.join', 'os.path.join', (['json_dir', 'f'], {}), '(json_dir, f)\n', (12078, 12091), False, 'import os\n')] |
from math import ceil
import numpy as np
class GSOM:
def __init__(self, initial_map_size, parent_quantization_error, t1, data_size, weights_map, parent_dataset, neuron_builder):
assert parent_dataset is not None, "Provided dataset is empty"
self.__neuron_builder = neuron_builder
self.__data_size = data_size
self.__t1 = t1
self.__parent_quantization_error = parent_quantization_error
self.__initial_map_size = initial_map_size
self.__parent_dataset = parent_dataset
self.weights_map = [weights_map]
self.neurons = self.__build_neurons_list()
def winner_neuron(self, data):
number_of_data = 1 if (len(data.shape) == 1) else data.shape[0]
distances = np.empty(shape=(number_of_data, len(self.neurons.values())))
neurons_list = list(self.neurons.values())
for idx, neuron in enumerate(neurons_list):
distances[:, idx] = neuron.activation(data)
winner_neuron_per_data = distances.argmin(axis=1)
support_stuff = [[position for position in np.where(winner_neuron_per_data == neuron_idx)[0]]
for neuron_idx in range(len(neurons_list))]
winner_neurons = [neurons_list[idx] for idx in winner_neuron_per_data]
return winner_neurons, support_stuff
def train(self, epochs, initial_gaussian_sigma, initial_learning_rate, decay,
dataset_percentage, min_dataset_size, seed, maxiter):
_iter = 0
can_grow = True
while can_grow and (_iter < maxiter):
self.__neurons_training(decay, epochs, initial_learning_rate, initial_gaussian_sigma,
dataset_percentage, min_dataset_size, seed)
_iter += 1
can_grow = self.__can_grow()
if can_grow:
self.grow()
if can_grow:
self.__map_data_to_neurons()
return self
def __neurons_training(self, decay, epochs, learning_rate, sigma, dataset_percentage, min_dataset_size, seed):
lr = learning_rate
s = sigma
for iteration in range(epochs):
for data in self.__training_data(seed, dataset_percentage, min_dataset_size):
self.__update_neurons(data, lr, s)
lr *= decay
s *= decay
def __update_neurons(self, data, learning_rate, sigma):
gauss_kernel = self.__gaussian_kernel(self.winner_neuron(data)[0][0], sigma)
for neuron in self.neurons.values():
weight = neuron.weight_vector()
weight += learning_rate * gauss_kernel[neuron.position] * (data - weight)
self.weights_map[0][neuron.position] = weight
def __gaussian_kernel(self, winner_neuron, gaussian_sigma):
# computing gaussian kernel
winner_row, winner_col = winner_neuron.position
s = 2 * (gaussian_sigma ** 2)
gauss_col = np.power(np.arange(self.map_shape()[1]) - winner_col, 2) / s
gauss_row = np.power(np.arange(self.map_shape()[0]) - winner_row, 2) / s
return np.outer(np.exp(-1 * gauss_row), np.exp(-1 * gauss_col))
def __can_grow(self):
self.__map_data_to_neurons()
MQE = 0.0
mapped_neurons = 0
changed_neurons = 0
for neuron in self.neurons.values():
changed_neurons += 1 if neuron.has_changed_from_previous_epoch() else 0
if neuron.has_dataset():
MQE += neuron.compute_quantization_error()
mapped_neurons += 1
return ((MQE / mapped_neurons) >= (self.__t1 * self.__parent_quantization_error)) and \
(changed_neurons > int(np.round(mapped_neurons/5)))
def __map_data_to_neurons(self):
self.__clear_neurons_dataset()
# finding the new association for each neuron
_, support_stuff = self.winner_neuron(self.__parent_dataset)
neurons = list(self.neurons.values())
for idx, data_idxs in enumerate(support_stuff):
neurons[idx].replace_dataset(self.__parent_dataset[data_idxs, :])
def __clear_neurons_dataset(self):
for neuron in self.neurons.values():
neuron.clear_dataset()
def __find_error_neuron(self,):
# self.__map_data_to_neurons()
quantization_errors = list()
for neuron in self.neurons.values():
quantization_error = -np.inf
if neuron.has_dataset():
quantization_error = neuron.compute_quantization_error()
quantization_errors.append(quantization_error)
idx = np.unravel_index(np.argmax(quantization_errors), dims=self.map_shape())
return self.neurons[idx]
def __find_most_dissimilar_neuron(self, error_neuron):
weight_distances = dict()
for neuron in self.neurons.values():
if self.are_neurons_neighbours(error_neuron, neuron):
weight_distances[neuron] = error_neuron.weight_distance_from_other_unit(neuron)
return max(weight_distances, key=weight_distances.get)
def grow(self):
error_neuron = self.__find_error_neuron()
dissimilar_neuron = self.__find_most_dissimilar_neuron(error_neuron)
if self.are_in_same_row(error_neuron, dissimilar_neuron):
new_neuron_idxs = self.add_column_between(error_neuron, dissimilar_neuron)
self.__init_new_neurons_weight_vector(new_neuron_idxs, "horizontal")
elif self.are_in_same_column(error_neuron, dissimilar_neuron):
new_neuron_idxs = self.add_row_between(error_neuron, dissimilar_neuron)
self.__init_new_neurons_weight_vector(new_neuron_idxs, "vertical")
else:
raise RuntimeError("Error neuron and the most dissimilar are not adjacent")
def add_column_between(self, error_neuron, dissimilar_neuron):
error_col = error_neuron.position[1]
dissimilar_col = dissimilar_neuron.position[1]
new_column_idx = max(error_col, dissimilar_col)
map_rows, map_cols = self.map_shape()
new_line_idx = [(row, new_column_idx) for row in range(map_rows)]
for row in range(map_rows):
for col in reversed(range(new_column_idx, map_cols)):
new_idx = (row, col + 1)
neuron = self.neurons.pop((row, col))
neuron.position = new_idx
self.neurons[new_idx] = neuron
line = np.zeros(shape=(map_rows, self.__data_size), dtype=np.float32)
self.weights_map[0] = np.insert(self.weights_map[0], new_column_idx, line, axis=1)
return new_line_idx
def add_row_between(self, error_neuron, dissimilar_neuron):
error_row = error_neuron.position[0]
dissimilar_row = dissimilar_neuron.position[0]
new_row_idx = max(error_row, dissimilar_row)
map_rows, map_cols = self.map_shape()
new_line_idx = [(new_row_idx, col) for col in range(map_cols)]
for row in reversed(range(new_row_idx, map_rows)):
for col in range(map_cols):
new_idx = (row + 1, col)
neuron = self.neurons.pop((row, col))
neuron.position = new_idx
self.neurons[new_idx] = neuron
line = np.zeros(shape=(map_cols, self.__data_size), dtype=np.float32)
self.weights_map[0] = np.insert(self.weights_map[0], new_row_idx, line, axis=0)
return new_line_idx
def __init_new_neurons_weight_vector(self, new_neuron_idxs, new_line_direction):
for row, col in new_neuron_idxs:
adjacent_neuron_idxs = self.__get_adjacent_neuron_idxs_by_direction(row, col, new_line_direction)
weight_vector = self.__mean_weight_vector(adjacent_neuron_idxs)
self.weights_map[0][row, col] = weight_vector
self.neurons[(row, col)] = self.__build_neuron((row, col))
def __mean_weight_vector(self, neuron_idxs):
weight_vector = np.zeros(shape=self.__data_size, dtype=np.float32)
for adjacent_idx in neuron_idxs:
weight_vector += 0.5 * self.neurons[adjacent_idx].weight_vector()
return weight_vector
@staticmethod
def __get_adjacent_neuron_idxs_by_direction(row, col, direction):
adjacent_neuron_idxs = list()
if direction == "horizontal":
adjacent_neuron_idxs = [(row, col - 1), (row, col + 1)]
elif direction == "vertical":
adjacent_neuron_idxs = [(row - 1, col), (row + 1, col)]
return adjacent_neuron_idxs
@staticmethod
def are_neurons_neighbours(first_neuron, second_neuron):
return np.linalg.norm(np.asarray(first_neuron.position) - np.asarray(second_neuron.position), ord=1) == 1
@staticmethod
def are_in_same_row(first_neuron, second_neuron):
return abs(first_neuron.position[0] - second_neuron.position[0]) == 0
@staticmethod
def are_in_same_column(first_neuron, second_neuron):
return abs(first_neuron.position[1] - second_neuron.position[1]) == 0
def __build_neurons_list(self):
rows, cols = self.__initial_map_size
return {(x, y): self.__build_neuron((x, y)) for x in range(rows) for y in range(cols)}
def __build_neuron(self, weight_position):
return self.__neuron_builder.new_neuron(self.weights_map, weight_position)
def map_shape(self):
shape = self.weights_map[0].shape
return shape[0], shape[1]
def __training_data(self, seed, dataset_percentage, min_size):
dataset_size = len(self.__parent_dataset)
if dataset_size <= min_size:
iterator = range(dataset_size)
else:
iterator = range(int(ceil(dataset_size * dataset_percentage)))
random_generator = np.random.RandomState(seed)
for _ in iterator:
yield self.__parent_dataset[random_generator.randint(dataset_size)]
| [
"numpy.argmax",
"math.ceil",
"numpy.asarray",
"numpy.zeros",
"numpy.random.RandomState",
"numpy.insert",
"numpy.where",
"numpy.exp",
"numpy.round"
] | [((6445, 6507), 'numpy.zeros', 'np.zeros', ([], {'shape': '(map_rows, self.__data_size)', 'dtype': 'np.float32'}), '(shape=(map_rows, self.__data_size), dtype=np.float32)\n', (6453, 6507), True, 'import numpy as np\n'), ((6538, 6598), 'numpy.insert', 'np.insert', (['self.weights_map[0]', 'new_column_idx', 'line'], {'axis': '(1)'}), '(self.weights_map[0], new_column_idx, line, axis=1)\n', (6547, 6598), True, 'import numpy as np\n'), ((7265, 7327), 'numpy.zeros', 'np.zeros', ([], {'shape': '(map_cols, self.__data_size)', 'dtype': 'np.float32'}), '(shape=(map_cols, self.__data_size), dtype=np.float32)\n', (7273, 7327), True, 'import numpy as np\n'), ((7358, 7415), 'numpy.insert', 'np.insert', (['self.weights_map[0]', 'new_row_idx', 'line'], {'axis': '(0)'}), '(self.weights_map[0], new_row_idx, line, axis=0)\n', (7367, 7415), True, 'import numpy as np\n'), ((7962, 8012), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.__data_size', 'dtype': 'np.float32'}), '(shape=self.__data_size, dtype=np.float32)\n', (7970, 8012), True, 'import numpy as np\n'), ((9762, 9789), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (9783, 9789), True, 'import numpy as np\n'), ((3109, 3131), 'numpy.exp', 'np.exp', (['(-1 * gauss_row)'], {}), '(-1 * gauss_row)\n', (3115, 3131), True, 'import numpy as np\n'), ((3133, 3155), 'numpy.exp', 'np.exp', (['(-1 * gauss_col)'], {}), '(-1 * gauss_col)\n', (3139, 3155), True, 'import numpy as np\n'), ((4624, 4654), 'numpy.argmax', 'np.argmax', (['quantization_errors'], {}), '(quantization_errors)\n', (4633, 4654), True, 'import numpy as np\n'), ((3692, 3720), 'numpy.round', 'np.round', (['(mapped_neurons / 5)'], {}), '(mapped_neurons / 5)\n', (3700, 3720), True, 'import numpy as np\n'), ((8648, 8681), 'numpy.asarray', 'np.asarray', (['first_neuron.position'], {}), '(first_neuron.position)\n', (8658, 8681), True, 'import numpy as np\n'), ((8684, 8718), 'numpy.asarray', 'np.asarray', (['second_neuron.position'], {}), '(second_neuron.position)\n', (8694, 8718), True, 'import numpy as np\n'), ((9692, 9731), 'math.ceil', 'ceil', (['(dataset_size * dataset_percentage)'], {}), '(dataset_size * dataset_percentage)\n', (9696, 9731), False, 'from math import ceil\n'), ((1092, 1138), 'numpy.where', 'np.where', (['(winner_neuron_per_data == neuron_idx)'], {}), '(winner_neuron_per_data == neuron_idx)\n', (1100, 1138), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import glob
import h5py
import copy
import math
import json
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import r2_score
from util import transform_point_cloud, npmat2euler, quat2mat
def clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
def attention(query, key, value, mask=None, dropout=None):
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1).contiguous()) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask==0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def pairwise_distance(src, tgt):
inner = -2 * torch.matmul(src.transpose(2, 1).contiguous(), tgt)
xx = torch.sum(src**2, dim=1, keepdim=True)
yy = torch.sum(tgt**2, dim=1, keepdim=True)
distances = xx.transpose(2, 1).contiguous() + inner + yy
return torch.sqrt(distances)
def knn(x, k):
inner = -2 * torch.matmul(x.transpose(2, 1).contiguous(), x)
xx = torch.sum(x ** 2, dim=1, keepdim=True)
distance = -xx - inner - xx.transpose(2, 1).contiguous()
idx = distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
return idx
def get_graph_feature(x, k=20):
# x = x.squeeze()
x = x.view(*x.size()[:3])
idx = knn(x, k=k) # (batch_size, num_points, k)
batch_size, num_points, _ = idx.size()
device = torch.device('cuda')
idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points
idx = idx + idx_base
idx = idx.view(-1)
_, num_dims, _ = x.size()
x = x.transpose(2, 1).contiguous() # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points)
feature = x.view(batch_size * num_points, -1)[idx, :]
feature = feature.view(batch_size, num_points, k, num_dims)
x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
feature = torch.cat((feature, x), dim=3).permute(0, 3, 1, 2)
return feature
def cycle_consistency(rotation_ab, translation_ab, rotation_ba, translation_ba):
batch_size = rotation_ab.size(0)
identity = torch.eye(3, device=rotation_ab.device).unsqueeze(0).repeat(batch_size, 1, 1)
return F.mse_loss(torch.matmul(rotation_ab, rotation_ba), identity) + F.mse_loss(translation_ab, -translation_ba)
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
def forward(self, src, tgt, src_mask, tgt_mask):
"Take in and process masked src and target sequences."
return self.decode(self.encode(src, src_mask), src_mask,
tgt, tgt_mask)
def encode(self, src, src_mask):
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask):
return self.generator(self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask))
class Generator(nn.Module):
def __init__(self, n_emb_dims):
super(Generator, self).__init__()
self.nn = nn.Sequential(nn.Linear(n_emb_dims, n_emb_dims//2),
nn.BatchNorm1d(n_emb_dims//2),
nn.ReLU(),
nn.Linear(n_emb_dims//2, n_emb_dims//4),
nn.BatchNorm1d(n_emb_dims//4),
nn.ReLU(),
nn.Linear(n_emb_dims//4, n_emb_dims//8),
nn.BatchNorm1d(n_emb_dims//8),
nn.ReLU())
self.proj_rot = nn.Linear(n_emb_dims//8, 4)
self.proj_trans = nn.Linear(n_emb_dims//8, 3)
def forward(self, x):
x = self.nn(x.max(dim=1)[0])
rotation = self.proj_rot(x)
translation = self.proj_trans(x)
rotation = rotation / torch.norm(rotation, p=2, dim=1, keepdim=True)
return rotation, translation
class Encoder(nn.Module):
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class Decoder(nn.Module):
"Generic N layer decoder with masking."
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, memory, src_mask, tgt_mask):
for layer in self.layers:
x = layer(x, memory, src_mask, tgt_mask)
return self.norm(x)
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x-mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
return x + sublayer(self.norm(x))
class EncoderLayer(nn.Module):
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 3)
def forward(self, x, memory, src_mask, tgt_mask):
"Follow Figure 1 (right) for connections."
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](x, self.feed_forward)
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.0):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2).contiguous()
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.leaky_relu(self.w_1(x), negative_slope=0.2)))
class PointNet(nn.Module):
def __init__(self, n_emb_dims=512):
super(PointNet, self).__init__()
self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.conv3 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.conv4 = nn.Conv1d(64, 128, kernel_size=1, bias=False)
self.conv5 = nn.Conv1d(128, n_emb_dims, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(64)
self.bn3 = nn.BatchNorm1d(64)
self.bn4 = nn.BatchNorm1d(128)
self.bn5 = nn.BatchNorm1d(n_emb_dims)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = F.relu(self.bn4(self.conv4(x)))
x = F.relu(self.bn5(self.conv5(x)))
return x
class DGCNN(nn.Module):
def __init__(self, n_emb_dims=512):
super(DGCNN, self).__init__()
self.conv1 = nn.Conv2d(6, 64, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(64*2, 64, kernel_size=1, bias=False)
self.conv3 = nn.Conv2d(64*2, 128, kernel_size=1, bias=False)
self.conv4 = nn.Conv2d(128*2, 256, kernel_size=1, bias=False)
self.conv5 = nn.Conv2d(512, n_emb_dims, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(128)
self.bn4 = nn.BatchNorm2d(256)
self.bn5 = nn.BatchNorm2d(n_emb_dims)
def forward(self, x):
batch_size, num_dims, num_points = x.size()
x = get_graph_feature(x)
x = F.leaky_relu(self.bn1(self.conv1(x)), negative_slope=0.2)
x1 = x.max(dim=-1, keepdim=True)[0]
x = get_graph_feature(x1)
x = F.leaky_relu(self.bn2(self.conv2(x)), negative_slope=0.2)
x2 = x.max(dim=-1, keepdim=True)[0]
x = get_graph_feature(x2)
x = F.leaky_relu(self.bn3(self.conv3(x)), negative_slope=0.2)
x3 = x.max(dim=-1, keepdim=True)[0]
x = get_graph_feature(x3)
x = F.leaky_relu(self.bn4(self.conv4(x)), negative_slope=0.2)
x4 = x.max(dim=-1, keepdim=True)[0]
x = torch.cat((x1, x2, x3, x4), dim=1)
x = F.leaky_relu(self.bn5(self.conv5(x)), negative_slope=0.2).view(batch_size, -1, num_points)
return x
class MLPHead(nn.Module):
def __init__(self, args):
super(MLPHead, self).__init__()
n_emb_dims = args.n_emb_dims
self.n_emb_dims = n_emb_dims
self.nn = nn.Sequential(nn.Linear(n_emb_dims*2, n_emb_dims//2),
nn.BatchNorm1d(n_emb_dims//2),
nn.ReLU(),
nn.Linear(n_emb_dims//2, n_emb_dims//4),
nn.BatchNorm1d(n_emb_dims//4),
nn.ReLU(),
nn.Linear(n_emb_dims//4, n_emb_dims//8),
nn.BatchNorm1d(n_emb_dims//8),
nn.ReLU())
self.proj_rot = nn.Linear(n_emb_dims//8, 4)
self.proj_trans = nn.Linear(n_emb_dims//8, 3)
def forward(self, *input):
src_embedding = input[0]
tgt_embedding = input[1]
embedding = torch.cat((src_embedding, tgt_embedding), dim=1)
embedding = self.nn(embedding.max(dim=-1)[0])
rotation = self.proj_rot(embedding)
rotation = rotation / torch.norm(rotation, p=2, dim=1, keepdim=True)
translation = self.proj_trans(embedding)
return quat2mat(rotation), translation
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, *input):
return input
class Transformer(nn.Module):
def __init__(self, args):
super(Transformer, self).__init__()
self.n_emb_dims = args.n_emb_dims
self.N = args.n_blocks
self.dropout = args.dropout
self.n_ff_dims = args.n_ff_dims
self.n_heads = args.n_heads
c = copy.deepcopy
attn = MultiHeadedAttention(self.n_heads, self.n_emb_dims)
ff = PositionwiseFeedForward(self.n_emb_dims, self.n_ff_dims, self.dropout)
self.model = EncoderDecoder(Encoder(EncoderLayer(self.n_emb_dims, c(attn), c(ff), self.dropout), self.N),
Decoder(DecoderLayer(self.n_emb_dims, c(attn), c(attn), c(ff), self.dropout), self.N),
nn.Sequential(),
nn.Sequential(),
nn.Sequential())
def forward(self, *input):
src = input[0]
tgt = input[1]
src = src.transpose(2, 1).contiguous()
tgt = tgt.transpose(2, 1).contiguous()
tgt_embedding = self.model(src, tgt, None, None).transpose(2, 1).contiguous()
src_embedding = self.model(tgt, src, None, None).transpose(2, 1).contiguous()
return src_embedding, tgt_embedding
class TemperatureNet(nn.Module):
def __init__(self, args):
super(TemperatureNet, self).__init__()
self.n_emb_dims = args.n_emb_dims
self.temp_factor = args.temp_factor
self.nn = nn.Sequential(nn.Linear(self.n_emb_dims, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 1),
nn.ReLU())
self.feature_disparity = None
def forward(self, *input):
src_embedding = input[0]
tgt_embedding = input[1]
src_embedding = src_embedding.mean(dim=2)
tgt_embedding = tgt_embedding.mean(dim=2)
residual = torch.abs(src_embedding-tgt_embedding)
self.feature_disparity = residual
return torch.clamp(self.nn(residual), 1.0/self.temp_factor, 1.0*self.temp_factor), residual
class SVDHead(nn.Module):
def __init__(self, args):
super(SVDHead, self).__init__()
self.n_emb_dims = args.n_emb_dims
self.cat_sampler = args.cat_sampler
self.reflect = nn.Parameter(torch.eye(3), requires_grad=False)
self.reflect[2, 2] = -1
self.temperature = nn.Parameter(torch.ones(1)*0.5, requires_grad=True)
self.my_iter = torch.ones(1)
def forward(self, *input):
src_embedding = input[0]
tgt_embedding = input[1]
src = input[2]
tgt = input[3]
batch_size, num_dims, num_points = src.size()
temperature = input[4].view(batch_size, 1, 1)
if self.cat_sampler == 'softmax':
d_k = src_embedding.size(1)
scores = torch.matmul(src_embedding.transpose(2, 1).contiguous(), tgt_embedding) / math.sqrt(d_k)
scores = torch.softmax(temperature*scores, dim=2)
elif self.cat_sampler == 'gumbel_softmax':
d_k = src_embedding.size(1)
scores = torch.matmul(src_embedding.transpose(2, 1).contiguous(), tgt_embedding) / math.sqrt(d_k)
scores = scores.view(batch_size*num_points, num_points)
temperature = temperature.repeat(1, num_points, 1).view(-1, 1)
scores = F.gumbel_softmax(scores, tau=temperature, hard=True)
scores = scores.view(batch_size, num_points, num_points)
else:
raise Exception('not implemented')
src_corr = torch.matmul(tgt, scores.transpose(2, 1).contiguous())
src_centered = src - src.mean(dim=2, keepdim=True)
src_corr_centered = src_corr - src_corr.mean(dim=2, keepdim=True)
H = torch.matmul(src_centered, src_corr_centered.transpose(2, 1).contiguous()).cpu()
R = []
for i in range(src.size(0)):
u, s, v = torch.svd(H[i])
r = torch.matmul(v, u.transpose(1, 0)).contiguous()
r_det = torch.det(r).item()
diag = torch.from_numpy(np.array([[1.0, 0, 0],
[0, 1.0, 0],
[0, 0, r_det]]).astype('float32')).to(v.device)
r = torch.matmul(torch.matmul(v, diag), u.transpose(1, 0)).contiguous()
R.append(r)
R = torch.stack(R, dim=0).cuda()
t = torch.matmul(-R, src.mean(dim=2, keepdim=True)) + src_corr.mean(dim=2, keepdim=True)
if self.training:
self.my_iter += 1
return R, t.view(batch_size, 3)
class KeyPointNet(nn.Module):
def __init__(self, num_keypoints):
super(KeyPointNet, self).__init__()
self.num_keypoints = num_keypoints
def forward(self, *input):
src = input[0]
tgt = input[1]
src_embedding = input[2]
tgt_embedding = input[3]
batch_size, num_dims, num_points = src_embedding.size()
src_norm = torch.norm(src_embedding, dim=1, keepdim=True)
tgt_norm = torch.norm(tgt_embedding, dim=1, keepdim=True)
src_topk_idx = torch.topk(src_norm, k=self.num_keypoints, dim=2, sorted=False)[1]
tgt_topk_idx = torch.topk(tgt_norm, k=self.num_keypoints, dim=2, sorted=False)[1]
src_keypoints_idx = src_topk_idx.repeat(1, 3, 1)
tgt_keypoints_idx = tgt_topk_idx.repeat(1, 3, 1)
src_embedding_idx = src_topk_idx.repeat(1, num_dims, 1)
tgt_embedding_idx = tgt_topk_idx.repeat(1, num_dims, 1)
src_keypoints = torch.gather(src, dim=2, index=src_keypoints_idx)
tgt_keypoints = torch.gather(tgt, dim=2, index=tgt_keypoints_idx)
src_embedding = torch.gather(src_embedding, dim=2, index=src_embedding_idx)
tgt_embedding = torch.gather(tgt_embedding, dim=2, index=tgt_embedding_idx)
return src_keypoints, tgt_keypoints, src_embedding, tgt_embedding
class ACPNet(nn.Module):
def __init__(self, args):
super(ACPNet, self).__init__()
self.n_emb_dims = args.n_emb_dims
self.num_keypoints = args.n_keypoints
self.num_subsampled_points = args.n_subsampled_points
self.logger = Logger(args)
if args.emb_nn == 'pointnet':
self.emb_nn = PointNet(n_emb_dims=self.n_emb_dims)
elif args.emb_nn == 'dgcnn':
self.emb_nn = DGCNN(n_emb_dims=self.n_emb_dims)
else:
raise Exception('Not implemented')
if args.attention == 'identity':
self.attention = Identity()
elif args.attention == 'transformer':
self.attention = Transformer(args=args)
else:
raise Exception("Not implemented")
self.temp_net = TemperatureNet(args)
if args.head == 'mlp':
self.head = MLPHead(args=args)
elif args.head == 'svd':
self.head = SVDHead(args=args)
else:
raise Exception('Not implemented')
if self.num_keypoints != self.num_subsampled_points:
self.keypointnet = KeyPointNet(num_keypoints=self.num_keypoints)
else:
self.keypointnet = Identity()
def forward(self, *input):
src, tgt, src_embedding, tgt_embedding, temperature, feature_disparity = self.predict_embedding(*input)
rotation_ab, translation_ab = self.head(src_embedding, tgt_embedding, src, tgt, temperature)
rotation_ba, translation_ba = self.head(tgt_embedding, src_embedding, tgt, src, temperature)
return rotation_ab, translation_ab, rotation_ba, translation_ba, feature_disparity
def predict_embedding(self, *input):
src = input[0]
tgt = input[1]
src_embedding = self.emb_nn(src)
tgt_embedding = self.emb_nn(tgt)
src_embedding_p, tgt_embedding_p = self.attention(src_embedding, tgt_embedding)
src_embedding = src_embedding + src_embedding_p
tgt_embedding = tgt_embedding + tgt_embedding_p
src, tgt, src_embedding, tgt_embedding = self.keypointnet(src, tgt, src_embedding, tgt_embedding)
temperature, feature_disparity = self.temp_net(src_embedding, tgt_embedding)
return src, tgt, src_embedding, tgt_embedding, temperature, feature_disparity
def predict_keypoint_correspondence(self, *input):
src, tgt, src_embedding, tgt_embedding, temperature, _ = self.predict_embedding(*input)
batch_size, num_dims, num_points = src.size()
d_k = src_embedding.size(1)
scores = torch.matmul(src_embedding.transpose(2, 1).contiguous(), tgt_embedding) / math.sqrt(d_k)
scores = scores.view(batch_size*num_points, num_points)
temperature = temperature.repeat(1, num_points, 1).view(-1, 1)
scores = F.gumbel_softmax(scores, tau=temperature, hard=True)
scores = scores.view(batch_size, num_points, num_points)
return src, tgt, scores
class PRNet(nn.Module):
def __init__(self, args):
super(PRNet, self).__init__()
self.num_iters = args.n_iters
self.logger = Logger(args)
self.discount_factor = args.discount_factor
self.acpnet = ACPNet(args)
self.model_path = args.model_path
self.feature_alignment_loss = args.feature_alignment_loss
self.cycle_consistency_loss = args.cycle_consistency_loss
if self.model_path is not '':
self.load(self.model_path)
if torch.cuda.device_count() > 1:
self.acpnet = nn.DataParallel(self.acpnet)
def forward(self, *input):
rotation_ab, translation_ab, rotation_ba, translation_ba, feature_disparity = self.acpnet(*input)
return rotation_ab, translation_ab, rotation_ba, translation_ba, feature_disparity
def predict(self, src, tgt, n_iters=3):
batch_size = src.size(0)
rotation_ab_pred = torch.eye(3, device=src.device, dtype=torch.float32).view(1, 3, 3).repeat(batch_size, 1, 1)
translation_ab_pred = torch.zeros(3, device=src.device, dtype=torch.float32).view(1, 3).repeat(batch_size, 1)
for i in range(n_iters):
rotation_ab_pred_i, translation_ab_pred_i, rotation_ba_pred_i, translation_ba_pred_i, _ \
= self.forward(src, tgt)
rotation_ab_pred = torch.matmul(rotation_ab_pred_i, rotation_ab_pred)
translation_ab_pred = torch.matmul(rotation_ab_pred_i, translation_ab_pred.unsqueeze(2)).squeeze(2) \
+ translation_ab_pred_i
src = transform_point_cloud(src, rotation_ab_pred_i, translation_ab_pred_i)
return rotation_ab_pred, translation_ab_pred
def _train_one_batch(self, src, tgt, rotation_ab, translation_ab, opt):
opt.zero_grad()
batch_size = src.size(0)
identity = torch.eye(3, device=src.device).unsqueeze(0).repeat(batch_size, 1, 1)
rotation_ab_pred = torch.eye(3, device=src.device, dtype=torch.float32).view(1, 3, 3).repeat(batch_size, 1, 1)
translation_ab_pred = torch.zeros(3, device=src.device, dtype=torch.float32).view(1, 3).repeat(batch_size, 1)
rotation_ba_pred = torch.eye(3, device=src.device, dtype=torch.float32).view(1, 3, 3).repeat(batch_size, 1, 1)
translation_ba_pred = torch.zeros(3, device=src.device, dtype=torch.float32).view(1, 3).repeat(batch_size, 1)
total_loss = 0
total_feature_alignment_loss = 0
total_cycle_consistency_loss = 0
total_scale_consensus_loss = 0
for i in range(self.num_iters):
rotation_ab_pred_i, translation_ab_pred_i, rotation_ba_pred_i, translation_ba_pred_i, \
feature_disparity = self.forward(src, tgt)
rotation_ab_pred = torch.matmul(rotation_ab_pred_i, rotation_ab_pred)
translation_ab_pred = torch.matmul(rotation_ab_pred_i, translation_ab_pred.unsqueeze(2)).squeeze(2) \
+ translation_ab_pred_i
rotation_ba_pred = torch.matmul(rotation_ba_pred_i, rotation_ba_pred)
translation_ba_pred = torch.matmul(rotation_ba_pred_i, translation_ba_pred.unsqueeze(2)).squeeze(2) \
+ translation_ba_pred_i
loss = (F.mse_loss(torch.matmul(rotation_ab_pred.transpose(2, 1), rotation_ab), identity) \
+ F.mse_loss(translation_ab_pred, translation_ab)) * self.discount_factor**i
feature_alignment_loss = feature_disparity.mean() * self.feature_alignment_loss * self.discount_factor**i
cycle_consistency_loss = cycle_consistency(rotation_ab_pred_i, translation_ab_pred_i,
rotation_ba_pred_i, translation_ba_pred_i) \
* self.cycle_consistency_loss * self.discount_factor**i
scale_consensus_loss = 0
total_feature_alignment_loss += feature_alignment_loss
total_cycle_consistency_loss += cycle_consistency_loss
total_loss = total_loss + loss + feature_alignment_loss + cycle_consistency_loss + scale_consensus_loss
src = transform_point_cloud(src, rotation_ab_pred_i, translation_ab_pred_i)
total_loss.backward()
opt.step()
return total_loss.item(), total_feature_alignment_loss.item(), total_cycle_consistency_loss.item(), \
total_scale_consensus_loss, rotation_ab_pred, translation_ab_pred
def _test_one_batch(self, src, tgt, rotation_ab, translation_ab):
batch_size = src.size(0)
identity = torch.eye(3, device=src.device).unsqueeze(0).repeat(batch_size, 1, 1)
rotation_ab_pred = torch.eye(3, device=src.device, dtype=torch.float32).view(1, 3, 3).repeat(batch_size, 1, 1)
translation_ab_pred = torch.zeros(3, device=src.device, dtype=torch.float32).view(1, 3).repeat(batch_size, 1)
rotation_ba_pred = torch.eye(3, device=src.device, dtype=torch.float32).view(1, 3, 3).repeat(batch_size, 1, 1)
translation_ba_pred = torch.zeros(3, device=src.device, dtype=torch.float32).view(1, 3).repeat(batch_size, 1)
total_loss = 0
total_feature_alignment_loss = 0
total_cycle_consistency_loss = 0
total_scale_consensus_loss = 0
for i in range(self.num_iters):
rotation_ab_pred_i, translation_ab_pred_i, rotation_ba_pred_i, translation_ba_pred_i, \
feature_disparity = self.forward(src, tgt)
rotation_ab_pred = torch.matmul(rotation_ab_pred_i, rotation_ab_pred)
translation_ab_pred = torch.matmul(rotation_ab_pred_i, translation_ab_pred.unsqueeze(2)).squeeze(2) \
+ translation_ab_pred_i
rotation_ba_pred = torch.matmul(rotation_ba_pred_i, rotation_ba_pred)
translation_ba_pred = torch.matmul(rotation_ba_pred_i, translation_ba_pred.unsqueeze(2)).squeeze(2) \
+ translation_ba_pred_i
loss = (F.mse_loss(torch.matmul(rotation_ab_pred.transpose(2, 1), rotation_ab), identity) \
+ F.mse_loss(translation_ab_pred, translation_ab)) * self.discount_factor ** i
feature_alignment_loss = feature_disparity.mean() * self.feature_alignment_loss * self.discount_factor ** i
cycle_consistency_loss = cycle_consistency(rotation_ab_pred_i, translation_ab_pred_i,
rotation_ba_pred_i, translation_ba_pred_i) \
* self.cycle_consistency_loss * self.discount_factor ** i
scale_consensus_loss = 0
total_feature_alignment_loss += feature_alignment_loss
total_cycle_consistency_loss += cycle_consistency_loss
total_loss = total_loss + loss + feature_alignment_loss + cycle_consistency_loss + scale_consensus_loss
src = transform_point_cloud(src, rotation_ab_pred_i, translation_ab_pred_i)
return total_loss.item(), total_feature_alignment_loss.item(), total_cycle_consistency_loss.item(), \
total_scale_consensus_loss, rotation_ab_pred, translation_ab_pred
def _train_one_epoch(self, epoch, train_loader, opt):
self.train()
total_loss = 0
rotations_ab = []
translations_ab = []
rotations_ab_pred = []
translations_ab_pred = []
eulers_ab = []
num_examples = 0
total_feature_alignment_loss = 0.0
total_cycle_consistency_loss = 0.0
total_scale_consensus_loss = 0.0
for data in tqdm(train_loader):
src, tgt, rotation_ab, translation_ab, rotation_ba, translation_ba, euler_ab, euler_ba = [d.cuda()
for d in data]
loss, feature_alignment_loss, cycle_consistency_loss, scale_consensus_loss,\
rotation_ab_pred, translation_ab_pred = self._train_one_batch(src, tgt, rotation_ab, translation_ab,
opt)
batch_size = src.size(0)
num_examples += batch_size
total_loss = total_loss + loss * batch_size
total_feature_alignment_loss = total_feature_alignment_loss + feature_alignment_loss * batch_size
total_cycle_consistency_loss = total_cycle_consistency_loss + cycle_consistency_loss * batch_size
total_scale_consensus_loss = total_scale_consensus_loss + scale_consensus_loss * batch_size
rotations_ab.append(rotation_ab.detach().cpu().numpy())
translations_ab.append(translation_ab.detach().cpu().numpy())
rotations_ab_pred.append(rotation_ab_pred.detach().cpu().numpy())
translations_ab_pred.append(translation_ab_pred.detach().cpu().numpy())
eulers_ab.append(euler_ab.cpu().numpy())
avg_loss = total_loss / num_examples
avg_feature_alignment_loss = total_feature_alignment_loss / num_examples
avg_cycle_consistency_loss = total_cycle_consistency_loss / num_examples
avg_scale_consensus_loss = total_scale_consensus_loss / num_examples
rotations_ab = np.concatenate(rotations_ab, axis=0)
translations_ab = np.concatenate(translations_ab, axis=0)
rotations_ab_pred = np.concatenate(rotations_ab_pred, axis=0)
translations_ab_pred = np.concatenate(translations_ab_pred, axis=0)
eulers_ab = np.degrees(np.concatenate(eulers_ab, axis=0))
eulers_ab_pred = npmat2euler(rotations_ab_pred)
r_ab_mse = np.mean((eulers_ab-eulers_ab_pred)**2)
r_ab_rmse = np.sqrt(r_ab_mse)
r_ab_mae = np.mean(np.abs(eulers_ab-eulers_ab_pred))
t_ab_mse = np.mean((translations_ab-translations_ab_pred)**2)
t_ab_rmse = np.sqrt(t_ab_mse)
t_ab_mae = np.mean(np.abs(translations_ab-translations_ab_pred))
r_ab_r2_score = r2_score(eulers_ab, eulers_ab_pred)
t_ab_r2_score = r2_score(translations_ab, translations_ab_pred)
info = {'arrow': 'A->B',
'epoch': epoch,
'stage': 'train',
'loss': avg_loss,
'feature_alignment_loss': avg_feature_alignment_loss,
'cycle_consistency_loss': avg_cycle_consistency_loss,
'scale_consensus_loss': avg_scale_consensus_loss,
'r_ab_mse': r_ab_mse,
'r_ab_rmse': r_ab_rmse,
'r_ab_mae': r_ab_mae,
't_ab_mse': t_ab_mse,
't_ab_rmse': t_ab_rmse,
't_ab_mae': t_ab_mae,
'r_ab_r2_score': r_ab_r2_score,
't_ab_r2_score': t_ab_r2_score}
self.logger.write(info)
return info
def _test_one_epoch(self, epoch, test_loader):
self.eval()
total_loss = 0
rotations_ab = []
translations_ab = []
rotations_ab_pred = []
translations_ab_pred = []
eulers_ab = []
num_examples = 0
total_feature_alignment_loss = 0.0
total_cycle_consistency_loss = 0.0
total_scale_consensus_loss = 0.0
for data in tqdm(test_loader):
src, tgt, rotation_ab, translation_ab, rotation_ba, translation_ba, euler_ab, euler_ba = [d.cuda()
for d in data]
loss, feature_alignment_loss, cycle_consistency_loss, scale_consensus_loss, \
rotation_ab_pred, translation_ab_pred = self._test_one_batch(src, tgt, rotation_ab, translation_ab)
batch_size = src.size(0)
num_examples += batch_size
total_loss = total_loss + loss * batch_size
total_feature_alignment_loss = total_feature_alignment_loss + feature_alignment_loss * batch_size
total_cycle_consistency_loss = total_cycle_consistency_loss + cycle_consistency_loss * batch_size
total_scale_consensus_loss = total_scale_consensus_loss + scale_consensus_loss * batch_size
rotations_ab.append(rotation_ab.detach().cpu().numpy())
translations_ab.append(translation_ab.detach().cpu().numpy())
rotations_ab_pred.append(rotation_ab_pred.detach().cpu().numpy())
translations_ab_pred.append(translation_ab_pred.detach().cpu().numpy())
eulers_ab.append(euler_ab.cpu().numpy())
avg_loss = total_loss / num_examples
avg_feature_alignment_loss = total_feature_alignment_loss / num_examples
avg_cycle_consistency_loss = total_cycle_consistency_loss / num_examples
avg_scale_consensus_loss = total_scale_consensus_loss / num_examples
rotations_ab = np.concatenate(rotations_ab, axis=0)
translations_ab = np.concatenate(translations_ab, axis=0)
rotations_ab_pred = np.concatenate(rotations_ab_pred, axis=0)
translations_ab_pred = np.concatenate(translations_ab_pred, axis=0)
eulers_ab = np.degrees(np.concatenate(eulers_ab, axis=0))
eulers_ab_pred = npmat2euler(rotations_ab_pred)
r_ab_mse = np.mean((eulers_ab - eulers_ab_pred) ** 2)
r_ab_rmse = np.sqrt(r_ab_mse)
r_ab_mae = np.mean(np.abs(eulers_ab - eulers_ab_pred))
t_ab_mse = np.mean((translations_ab - translations_ab_pred) ** 2)
t_ab_rmse = np.sqrt(t_ab_mse)
t_ab_mae = np.mean(np.abs(translations_ab - translations_ab_pred))
r_ab_r2_score = r2_score(eulers_ab, eulers_ab_pred)
t_ab_r2_score = r2_score(translations_ab, translations_ab_pred)
info = {'arrow': 'A->B',
'epoch': epoch,
'stage': 'test',
'loss': avg_loss,
'feature_alignment_loss': avg_feature_alignment_loss,
'cycle_consistency_loss': avg_cycle_consistency_loss,
'scale_consensus_loss': avg_scale_consensus_loss,
'r_ab_mse': r_ab_mse,
'r_ab_rmse': r_ab_rmse,
'r_ab_mae': r_ab_mae,
't_ab_mse': t_ab_mse,
't_ab_rmse': t_ab_rmse,
't_ab_mae': t_ab_mae,
'r_ab_r2_score': r_ab_r2_score,
't_ab_r2_score': t_ab_r2_score}
self.logger.write(info)
return info
def save(self, path):
if torch.cuda.device_count() > 1:
torch.save(self.acpnet.module.state_dict(), path)
else:
torch.save(self.acpnet.state_dict(), path)
def load(self, path):
self.acpnet.load_state_dict(torch.load(path))
class Logger:
def __init__(self, args):
self.path = 'checkpoints/' + args.exp_name
self.fw = open(self.path+'/log', 'a')
self.fw.write(str(args))
self.fw.write('\n')
self.fw.flush()
print(str(args))
with open(os.path.join(self.path, 'args.txt'), 'w') as f:
json.dump(args.__dict__, f, indent=2)
def write(self, info):
arrow = info['arrow']
epoch = info['epoch']
stage = info['stage']
loss = info['loss']
feature_alignment_loss = info['feature_alignment_loss']
cycle_consistency_loss = info['cycle_consistency_loss']
scale_consensus_loss = info['scale_consensus_loss']
r_ab_mse = info['r_ab_mse']
r_ab_rmse = info['r_ab_rmse']
r_ab_mae = info['r_ab_mae']
t_ab_mse = info['t_ab_mse']
t_ab_rmse = info['t_ab_rmse']
t_ab_mae = info['t_ab_mae']
r_ab_r2_score = info['r_ab_r2_score']
t_ab_r2_score = info['t_ab_r2_score']
text = '%s:: Stage: %s, Epoch: %d, Loss: %f, Feature_alignment_loss: %f, Cycle_consistency_loss: %f, ' \
'Scale_consensus_loss: %f, Rot_MSE: %f, Rot_RMSE: %f, ' \
'Rot_MAE: %f, Rot_R2: %f, Trans_MSE: %f, ' \
'Trans_RMSE: %f, Trans_MAE: %f, Trans_R2: %f\n' % \
(arrow, stage, epoch, loss, feature_alignment_loss, cycle_consistency_loss, scale_consensus_loss,
r_ab_mse, r_ab_rmse, r_ab_mae,
r_ab_r2_score, t_ab_mse, t_ab_rmse, t_ab_mae, t_ab_r2_score)
self.fw.write(text)
self.fw.flush()
print(text)
def close(self):
self.fw.close()
if __name__ == '__main__':
print('hello world')
| [
"torch.nn.Dropout",
"torch.eye",
"numpy.abs",
"torch.sqrt",
"sklearn.metrics.r2_score",
"torch.cat",
"util.transform_point_cloud",
"torch.cuda.device_count",
"util.npmat2euler",
"numpy.mean",
"torch.arange",
"torch.device",
"numpy.sqrt",
"os.path.join",
"torch.ones",
"torch.gather",
... | [((690, 715), 'torch.nn.functional.softmax', 'F.softmax', (['scores'], {'dim': '(-1)'}), '(scores, dim=-1)\n', (699, 715), True, 'import torch.nn.functional as F\n'), ((937, 977), 'torch.sum', 'torch.sum', (['(src ** 2)'], {'dim': '(1)', 'keepdim': '(True)'}), '(src ** 2, dim=1, keepdim=True)\n', (946, 977), False, 'import torch\n'), ((985, 1025), 'torch.sum', 'torch.sum', (['(tgt ** 2)'], {'dim': '(1)', 'keepdim': '(True)'}), '(tgt ** 2, dim=1, keepdim=True)\n', (994, 1025), False, 'import torch\n'), ((1096, 1117), 'torch.sqrt', 'torch.sqrt', (['distances'], {}), '(distances)\n', (1106, 1117), False, 'import torch\n'), ((1209, 1247), 'torch.sum', 'torch.sum', (['(x ** 2)'], {'dim': '(1)', 'keepdim': '(True)'}), '(x ** 2, dim=1, keepdim=True)\n', (1218, 1247), False, 'import torch\n'), ((1591, 1611), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (1603, 1611), False, 'import torch\n'), ((586, 600), 'math.sqrt', 'math.sqrt', (['d_k'], {}), '(d_k)\n', (595, 600), False, 'import math\n'), ((788, 815), 'torch.matmul', 'torch.matmul', (['p_attn', 'value'], {}), '(p_attn, value)\n', (800, 815), False, 'import torch\n'), ((2528, 2571), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['translation_ab', '(-translation_ba)'], {}), '(translation_ab, -translation_ba)\n', (2538, 2571), True, 'import torch.nn.functional as F\n'), ((4135, 4164), 'torch.nn.Linear', 'nn.Linear', (['(n_emb_dims // 8)', '(4)'], {}), '(n_emb_dims // 8, 4)\n', (4144, 4164), True, 'import torch.nn as nn\n'), ((4189, 4218), 'torch.nn.Linear', 'nn.Linear', (['(n_emb_dims // 8)', '(3)'], {}), '(n_emb_dims // 8, 3)\n', (4198, 4218), True, 'import torch.nn as nn\n'), ((5785, 5804), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (5795, 5804), True, 'import torch.nn as nn\n'), ((7551, 7572), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (7561, 7572), True, 'import torch.nn as nn\n'), ((8642, 8666), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_ff'], {}), '(d_model, d_ff)\n', (8651, 8666), True, 'import torch.nn as nn\n'), ((8686, 8710), 'torch.nn.Linear', 'nn.Linear', (['d_ff', 'd_model'], {}), '(d_ff, d_model)\n', (8695, 8710), True, 'import torch.nn as nn\n'), ((8734, 8753), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (8744, 8753), True, 'import torch.nn as nn\n'), ((8997, 9040), 'torch.nn.Conv1d', 'nn.Conv1d', (['(3)', '(64)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(3, 64, kernel_size=1, bias=False)\n', (9006, 9040), True, 'import torch.nn as nn\n'), ((9062, 9106), 'torch.nn.Conv1d', 'nn.Conv1d', (['(64)', '(64)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(64, 64, kernel_size=1, bias=False)\n', (9071, 9106), True, 'import torch.nn as nn\n'), ((9128, 9172), 'torch.nn.Conv1d', 'nn.Conv1d', (['(64)', '(64)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(64, 64, kernel_size=1, bias=False)\n', (9137, 9172), True, 'import torch.nn as nn\n'), ((9194, 9239), 'torch.nn.Conv1d', 'nn.Conv1d', (['(64)', '(128)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(64, 128, kernel_size=1, bias=False)\n', (9203, 9239), True, 'import torch.nn as nn\n'), ((9261, 9314), 'torch.nn.Conv1d', 'nn.Conv1d', (['(128)', 'n_emb_dims'], {'kernel_size': '(1)', 'bias': '(False)'}), '(128, n_emb_dims, kernel_size=1, bias=False)\n', (9270, 9314), True, 'import torch.nn as nn\n'), ((9334, 9352), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (9348, 9352), True, 'import torch.nn as nn\n'), ((9372, 9390), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (9386, 9390), True, 'import torch.nn as nn\n'), ((9410, 9428), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (9424, 9428), True, 'import torch.nn as nn\n'), ((9448, 9467), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (9462, 9467), True, 'import torch.nn as nn\n'), ((9487, 9513), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['n_emb_dims'], {}), '(n_emb_dims)\n', (9501, 9513), True, 'import torch.nn as nn\n'), ((9903, 9946), 'torch.nn.Conv2d', 'nn.Conv2d', (['(6)', '(64)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(6, 64, kernel_size=1, bias=False)\n', (9912, 9946), True, 'import torch.nn as nn\n'), ((9968, 10016), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64 * 2)', '(64)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(64 * 2, 64, kernel_size=1, bias=False)\n', (9977, 10016), True, 'import torch.nn as nn\n'), ((10036, 10085), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64 * 2)', '(128)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(64 * 2, 128, kernel_size=1, bias=False)\n', (10045, 10085), True, 'import torch.nn as nn\n'), ((10105, 10155), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128 * 2)', '(256)'], {'kernel_size': '(1)', 'bias': '(False)'}), '(128 * 2, 256, kernel_size=1, bias=False)\n', (10114, 10155), True, 'import torch.nn as nn\n'), ((10175, 10228), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', 'n_emb_dims'], {'kernel_size': '(1)', 'bias': '(False)'}), '(512, n_emb_dims, kernel_size=1, bias=False)\n', (10184, 10228), True, 'import torch.nn as nn\n'), ((10248, 10266), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (10262, 10266), True, 'import torch.nn as nn\n'), ((10286, 10304), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (10300, 10304), True, 'import torch.nn as nn\n'), ((10324, 10343), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (10338, 10343), True, 'import torch.nn as nn\n'), ((10363, 10382), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (10377, 10382), True, 'import torch.nn as nn\n'), ((10402, 10428), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['n_emb_dims'], {}), '(n_emb_dims)\n', (10416, 10428), True, 'import torch.nn as nn\n'), ((11116, 11150), 'torch.cat', 'torch.cat', (['(x1, x2, x3, x4)'], {'dim': '(1)'}), '((x1, x2, x3, x4), dim=1)\n', (11125, 11150), False, 'import torch\n'), ((12004, 12033), 'torch.nn.Linear', 'nn.Linear', (['(n_emb_dims // 8)', '(4)'], {}), '(n_emb_dims // 8, 4)\n', (12013, 12033), True, 'import torch.nn as nn\n'), ((12058, 12087), 'torch.nn.Linear', 'nn.Linear', (['(n_emb_dims // 8)', '(3)'], {}), '(n_emb_dims // 8, 3)\n', (12067, 12087), True, 'import torch.nn as nn\n'), ((12204, 12252), 'torch.cat', 'torch.cat', (['(src_embedding, tgt_embedding)'], {'dim': '(1)'}), '((src_embedding, tgt_embedding), dim=1)\n', (12213, 12252), False, 'import torch\n'), ((14929, 14969), 'torch.abs', 'torch.abs', (['(src_embedding - tgt_embedding)'], {}), '(src_embedding - tgt_embedding)\n', (14938, 14969), False, 'import torch\n'), ((15501, 15514), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (15511, 15514), False, 'import torch\n'), ((18011, 18057), 'torch.norm', 'torch.norm', (['src_embedding'], {'dim': '(1)', 'keepdim': '(True)'}), '(src_embedding, dim=1, keepdim=True)\n', (18021, 18057), False, 'import torch\n'), ((18077, 18123), 'torch.norm', 'torch.norm', (['tgt_embedding'], {'dim': '(1)', 'keepdim': '(True)'}), '(tgt_embedding, dim=1, keepdim=True)\n', (18087, 18123), False, 'import torch\n'), ((18571, 18620), 'torch.gather', 'torch.gather', (['src'], {'dim': '(2)', 'index': 'src_keypoints_idx'}), '(src, dim=2, index=src_keypoints_idx)\n', (18583, 18620), False, 'import torch\n'), ((18645, 18694), 'torch.gather', 'torch.gather', (['tgt'], {'dim': '(2)', 'index': 'tgt_keypoints_idx'}), '(tgt, dim=2, index=tgt_keypoints_idx)\n', (18657, 18694), False, 'import torch\n'), ((18728, 18787), 'torch.gather', 'torch.gather', (['src_embedding'], {'dim': '(2)', 'index': 'src_embedding_idx'}), '(src_embedding, dim=2, index=src_embedding_idx)\n', (18740, 18787), False, 'import torch\n'), ((18812, 18871), 'torch.gather', 'torch.gather', (['tgt_embedding'], {'dim': '(2)', 'index': 'tgt_embedding_idx'}), '(tgt_embedding, dim=2, index=tgt_embedding_idx)\n', (18824, 18871), False, 'import torch\n'), ((21770, 21822), 'torch.nn.functional.gumbel_softmax', 'F.gumbel_softmax', (['scores'], {'tau': 'temperature', 'hard': '(True)'}), '(scores, tau=temperature, hard=True)\n', (21786, 21822), True, 'import torch.nn.functional as F\n'), ((29531, 29549), 'tqdm.tqdm', 'tqdm', (['train_loader'], {}), '(train_loader)\n', (29535, 29549), False, 'from tqdm import tqdm\n'), ((31188, 31224), 'numpy.concatenate', 'np.concatenate', (['rotations_ab'], {'axis': '(0)'}), '(rotations_ab, axis=0)\n', (31202, 31224), True, 'import numpy as np\n'), ((31251, 31290), 'numpy.concatenate', 'np.concatenate', (['translations_ab'], {'axis': '(0)'}), '(translations_ab, axis=0)\n', (31265, 31290), True, 'import numpy as np\n'), ((31319, 31360), 'numpy.concatenate', 'np.concatenate', (['rotations_ab_pred'], {'axis': '(0)'}), '(rotations_ab_pred, axis=0)\n', (31333, 31360), True, 'import numpy as np\n'), ((31392, 31436), 'numpy.concatenate', 'np.concatenate', (['translations_ab_pred'], {'axis': '(0)'}), '(translations_ab_pred, axis=0)\n', (31406, 31436), True, 'import numpy as np\n'), ((31528, 31558), 'util.npmat2euler', 'npmat2euler', (['rotations_ab_pred'], {}), '(rotations_ab_pred)\n', (31539, 31558), False, 'from util import transform_point_cloud, npmat2euler, quat2mat\n'), ((31578, 31620), 'numpy.mean', 'np.mean', (['((eulers_ab - eulers_ab_pred) ** 2)'], {}), '((eulers_ab - eulers_ab_pred) ** 2)\n', (31585, 31620), True, 'import numpy as np\n'), ((31637, 31654), 'numpy.sqrt', 'np.sqrt', (['r_ab_mse'], {}), '(r_ab_mse)\n', (31644, 31654), True, 'import numpy as np\n'), ((31735, 31789), 'numpy.mean', 'np.mean', (['((translations_ab - translations_ab_pred) ** 2)'], {}), '((translations_ab - translations_ab_pred) ** 2)\n', (31742, 31789), True, 'import numpy as np\n'), ((31806, 31823), 'numpy.sqrt', 'np.sqrt', (['t_ab_mse'], {}), '(t_ab_mse)\n', (31813, 31823), True, 'import numpy as np\n'), ((31921, 31956), 'sklearn.metrics.r2_score', 'r2_score', (['eulers_ab', 'eulers_ab_pred'], {}), '(eulers_ab, eulers_ab_pred)\n', (31929, 31956), False, 'from sklearn.metrics import r2_score\n'), ((31981, 32028), 'sklearn.metrics.r2_score', 'r2_score', (['translations_ab', 'translations_ab_pred'], {}), '(translations_ab, translations_ab_pred)\n', (31989, 32028), False, 'from sklearn.metrics import r2_score\n'), ((33158, 33175), 'tqdm.tqdm', 'tqdm', (['test_loader'], {}), '(test_loader)\n', (33162, 33175), False, 'from tqdm import tqdm\n'), ((34729, 34765), 'numpy.concatenate', 'np.concatenate', (['rotations_ab'], {'axis': '(0)'}), '(rotations_ab, axis=0)\n', (34743, 34765), True, 'import numpy as np\n'), ((34792, 34831), 'numpy.concatenate', 'np.concatenate', (['translations_ab'], {'axis': '(0)'}), '(translations_ab, axis=0)\n', (34806, 34831), True, 'import numpy as np\n'), ((34860, 34901), 'numpy.concatenate', 'np.concatenate', (['rotations_ab_pred'], {'axis': '(0)'}), '(rotations_ab_pred, axis=0)\n', (34874, 34901), True, 'import numpy as np\n'), ((34933, 34977), 'numpy.concatenate', 'np.concatenate', (['translations_ab_pred'], {'axis': '(0)'}), '(translations_ab_pred, axis=0)\n', (34947, 34977), True, 'import numpy as np\n'), ((35069, 35099), 'util.npmat2euler', 'npmat2euler', (['rotations_ab_pred'], {}), '(rotations_ab_pred)\n', (35080, 35099), False, 'from util import transform_point_cloud, npmat2euler, quat2mat\n'), ((35119, 35161), 'numpy.mean', 'np.mean', (['((eulers_ab - eulers_ab_pred) ** 2)'], {}), '((eulers_ab - eulers_ab_pred) ** 2)\n', (35126, 35161), True, 'import numpy as np\n'), ((35182, 35199), 'numpy.sqrt', 'np.sqrt', (['r_ab_mse'], {}), '(r_ab_mse)\n', (35189, 35199), True, 'import numpy as np\n'), ((35282, 35336), 'numpy.mean', 'np.mean', (['((translations_ab - translations_ab_pred) ** 2)'], {}), '((translations_ab - translations_ab_pred) ** 2)\n', (35289, 35336), True, 'import numpy as np\n'), ((35357, 35374), 'numpy.sqrt', 'np.sqrt', (['t_ab_mse'], {}), '(t_ab_mse)\n', (35364, 35374), True, 'import numpy as np\n'), ((35474, 35509), 'sklearn.metrics.r2_score', 'r2_score', (['eulers_ab', 'eulers_ab_pred'], {}), '(eulers_ab, eulers_ab_pred)\n', (35482, 35509), False, 'from sklearn.metrics import r2_score\n'), ((35534, 35581), 'sklearn.metrics.r2_score', 'r2_score', (['translations_ab', 'translations_ab_pred'], {}), '(translations_ab, translations_ab_pred)\n', (35542, 35581), False, 'from sklearn.metrics import r2_score\n'), ((387, 408), 'copy.deepcopy', 'copy.deepcopy', (['module'], {}), '(module)\n', (400, 408), False, 'import copy\n'), ((2170, 2200), 'torch.cat', 'torch.cat', (['(feature, x)'], {'dim': '(3)'}), '((feature, x), dim=3)\n', (2179, 2200), False, 'import torch\n'), ((2476, 2514), 'torch.matmul', 'torch.matmul', (['rotation_ab', 'rotation_ba'], {}), '(rotation_ab, rotation_ba)\n', (2488, 2514), False, 'import torch\n'), ((3609, 3647), 'torch.nn.Linear', 'nn.Linear', (['n_emb_dims', '(n_emb_dims // 2)'], {}), '(n_emb_dims, n_emb_dims // 2)\n', (3618, 3647), True, 'import torch.nn as nn\n'), ((3679, 3710), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(n_emb_dims // 2)'], {}), '(n_emb_dims // 2)\n', (3693, 3710), True, 'import torch.nn as nn\n'), ((3742, 3751), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3749, 3751), True, 'import torch.nn as nn\n'), ((3785, 3828), 'torch.nn.Linear', 'nn.Linear', (['(n_emb_dims // 2)', '(n_emb_dims // 4)'], {}), '(n_emb_dims // 2, n_emb_dims // 4)\n', (3794, 3828), True, 'import torch.nn as nn\n'), ((3858, 3889), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(n_emb_dims // 4)'], {}), '(n_emb_dims // 4)\n', (3872, 3889), True, 'import torch.nn as nn\n'), ((3921, 3930), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3928, 3930), True, 'import torch.nn as nn\n'), ((3964, 4007), 'torch.nn.Linear', 'nn.Linear', (['(n_emb_dims // 4)', '(n_emb_dims // 8)'], {}), '(n_emb_dims // 4, n_emb_dims // 8)\n', (3973, 4007), True, 'import torch.nn as nn\n'), ((4037, 4068), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(n_emb_dims // 8)'], {}), '(n_emb_dims // 8)\n', (4051, 4068), True, 'import torch.nn as nn\n'), ((4100, 4109), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4107, 4109), True, 'import torch.nn as nn\n'), ((4388, 4434), 'torch.norm', 'torch.norm', (['rotation'], {'p': '(2)', 'dim': '(1)', 'keepdim': '(True)'}), '(rotation, p=2, dim=1, keepdim=True)\n', (4398, 4434), False, 'import torch\n'), ((5327, 5347), 'torch.ones', 'torch.ones', (['features'], {}), '(features)\n', (5337, 5347), False, 'import torch\n'), ((5381, 5402), 'torch.zeros', 'torch.zeros', (['features'], {}), '(features)\n', (5392, 5402), False, 'import torch\n'), ((7471, 7498), 'torch.nn.Linear', 'nn.Linear', (['d_model', 'd_model'], {}), '(d_model, d_model)\n', (7480, 7498), True, 'import torch.nn as nn\n'), ((11476, 11518), 'torch.nn.Linear', 'nn.Linear', (['(n_emb_dims * 2)', '(n_emb_dims // 2)'], {}), '(n_emb_dims * 2, n_emb_dims // 2)\n', (11485, 11518), True, 'import torch.nn as nn\n'), ((11548, 11579), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(n_emb_dims // 2)'], {}), '(n_emb_dims // 2)\n', (11562, 11579), True, 'import torch.nn as nn\n'), ((11611, 11620), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11618, 11620), True, 'import torch.nn as nn\n'), ((11654, 11697), 'torch.nn.Linear', 'nn.Linear', (['(n_emb_dims // 2)', '(n_emb_dims // 4)'], {}), '(n_emb_dims // 2, n_emb_dims // 4)\n', (11663, 11697), True, 'import torch.nn as nn\n'), ((11727, 11758), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(n_emb_dims // 4)'], {}), '(n_emb_dims // 4)\n', (11741, 11758), True, 'import torch.nn as nn\n'), ((11790, 11799), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11797, 11799), True, 'import torch.nn as nn\n'), ((11833, 11876), 'torch.nn.Linear', 'nn.Linear', (['(n_emb_dims // 4)', '(n_emb_dims // 8)'], {}), '(n_emb_dims // 4, n_emb_dims // 8)\n', (11842, 11876), True, 'import torch.nn as nn\n'), ((11906, 11937), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(n_emb_dims // 8)'], {}), '(n_emb_dims // 8)\n', (11920, 11937), True, 'import torch.nn as nn\n'), ((11969, 11978), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (11976, 11978), True, 'import torch.nn as nn\n'), ((12381, 12427), 'torch.norm', 'torch.norm', (['rotation'], {'p': '(2)', 'dim': '(1)', 'keepdim': '(True)'}), '(rotation, p=2, dim=1, keepdim=True)\n', (12391, 12427), False, 'import torch\n'), ((12492, 12510), 'util.quat2mat', 'quat2mat', (['rotation'], {}), '(rotation)\n', (12500, 12510), False, 'from util import transform_point_cloud, npmat2euler, quat2mat\n'), ((13412, 13427), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (13425, 13427), True, 'import torch.nn as nn\n'), ((13465, 13480), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (13478, 13480), True, 'import torch.nn as nn\n'), ((13518, 13533), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (13531, 13533), True, 'import torch.nn as nn\n'), ((14153, 14184), 'torch.nn.Linear', 'nn.Linear', (['self.n_emb_dims', '(128)'], {}), '(self.n_emb_dims, 128)\n', (14162, 14184), True, 'import torch.nn as nn\n'), ((14218, 14237), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (14232, 14237), True, 'import torch.nn as nn\n'), ((14271, 14280), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (14278, 14280), True, 'import torch.nn as nn\n'), ((14314, 14333), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(128)'], {}), '(128, 128)\n', (14323, 14333), True, 'import torch.nn as nn\n'), ((14367, 14386), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (14381, 14386), True, 'import torch.nn as nn\n'), ((14420, 14429), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (14427, 14429), True, 'import torch.nn as nn\n'), ((14463, 14482), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(128)'], {}), '(128, 128)\n', (14472, 14482), True, 'import torch.nn as nn\n'), ((14516, 14535), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (14530, 14535), True, 'import torch.nn as nn\n'), ((14569, 14578), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (14576, 14578), True, 'import torch.nn as nn\n'), ((14612, 14629), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(1)'], {}), '(128, 1)\n', (14621, 14629), True, 'import torch.nn as nn\n'), ((14663, 14672), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (14670, 14672), True, 'import torch.nn as nn\n'), ((15332, 15344), 'torch.eye', 'torch.eye', (['(3)'], {}), '(3)\n', (15341, 15344), False, 'import torch\n'), ((15981, 16023), 'torch.softmax', 'torch.softmax', (['(temperature * scores)'], {'dim': '(2)'}), '(temperature * scores, dim=2)\n', (15994, 16023), False, 'import torch\n'), ((16950, 16965), 'torch.svd', 'torch.svd', (['H[i]'], {}), '(H[i])\n', (16959, 16965), False, 'import torch\n'), ((18147, 18210), 'torch.topk', 'torch.topk', (['src_norm'], {'k': 'self.num_keypoints', 'dim': '(2)', 'sorted': '(False)'}), '(src_norm, k=self.num_keypoints, dim=2, sorted=False)\n', (18157, 18210), False, 'import torch\n'), ((18237, 18300), 'torch.topk', 'torch.topk', (['tgt_norm'], {'k': 'self.num_keypoints', 'dim': '(2)', 'sorted': '(False)'}), '(tgt_norm, k=self.num_keypoints, dim=2, sorted=False)\n', (18247, 18300), False, 'import torch\n'), ((21603, 21617), 'math.sqrt', 'math.sqrt', (['d_k'], {}), '(d_k)\n', (21612, 21617), False, 'import math\n'), ((22437, 22462), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (22460, 22462), False, 'import torch\n'), ((22494, 22522), 'torch.nn.DataParallel', 'nn.DataParallel', (['self.acpnet'], {}), '(self.acpnet)\n', (22509, 22522), True, 'import torch.nn as nn\n'), ((23274, 23324), 'torch.matmul', 'torch.matmul', (['rotation_ab_pred_i', 'rotation_ab_pred'], {}), '(rotation_ab_pred_i, rotation_ab_pred)\n', (23286, 23324), False, 'import torch\n'), ((23515, 23584), 'util.transform_point_cloud', 'transform_point_cloud', (['src', 'rotation_ab_pred_i', 'translation_ab_pred_i'], {}), '(src, rotation_ab_pred_i, translation_ab_pred_i)\n', (23536, 23584), False, 'from util import transform_point_cloud, npmat2euler, quat2mat\n'), ((24709, 24759), 'torch.matmul', 'torch.matmul', (['rotation_ab_pred_i', 'rotation_ab_pred'], {}), '(rotation_ab_pred_i, rotation_ab_pred)\n', (24721, 24759), False, 'import torch\n'), ((24964, 25014), 'torch.matmul', 'torch.matmul', (['rotation_ba_pred_i', 'rotation_ba_pred'], {}), '(rotation_ba_pred_i, rotation_ba_pred)\n', (24976, 25014), False, 'import torch\n'), ((26102, 26171), 'util.transform_point_cloud', 'transform_point_cloud', (['src', 'rotation_ab_pred_i', 'translation_ab_pred_i'], {}), '(src, rotation_ab_pred_i, translation_ab_pred_i)\n', (26123, 26171), False, 'from util import transform_point_cloud, npmat2euler, quat2mat\n'), ((27452, 27502), 'torch.matmul', 'torch.matmul', (['rotation_ab_pred_i', 'rotation_ab_pred'], {}), '(rotation_ab_pred_i, rotation_ab_pred)\n', (27464, 27502), False, 'import torch\n'), ((27707, 27757), 'torch.matmul', 'torch.matmul', (['rotation_ba_pred_i', 'rotation_ba_pred'], {}), '(rotation_ba_pred_i, rotation_ba_pred)\n', (27719, 27757), False, 'import torch\n'), ((28852, 28921), 'util.transform_point_cloud', 'transform_point_cloud', (['src', 'rotation_ab_pred_i', 'translation_ab_pred_i'], {}), '(src, rotation_ab_pred_i, translation_ab_pred_i)\n', (28873, 28921), False, 'from util import transform_point_cloud, npmat2euler, quat2mat\n'), ((31468, 31501), 'numpy.concatenate', 'np.concatenate', (['eulers_ab'], {'axis': '(0)'}), '(eulers_ab, axis=0)\n', (31482, 31501), True, 'import numpy as np\n'), ((31682, 31716), 'numpy.abs', 'np.abs', (['(eulers_ab - eulers_ab_pred)'], {}), '(eulers_ab - eulers_ab_pred)\n', (31688, 31716), True, 'import numpy as np\n'), ((31851, 31897), 'numpy.abs', 'np.abs', (['(translations_ab - translations_ab_pred)'], {}), '(translations_ab - translations_ab_pred)\n', (31857, 31897), True, 'import numpy as np\n'), ((35009, 35042), 'numpy.concatenate', 'np.concatenate', (['eulers_ab'], {'axis': '(0)'}), '(eulers_ab, axis=0)\n', (35023, 35042), True, 'import numpy as np\n'), ((35227, 35261), 'numpy.abs', 'np.abs', (['(eulers_ab - eulers_ab_pred)'], {}), '(eulers_ab - eulers_ab_pred)\n', (35233, 35261), True, 'import numpy as np\n'), ((35402, 35448), 'numpy.abs', 'np.abs', (['(translations_ab - translations_ab_pred)'], {}), '(translations_ab - translations_ab_pred)\n', (35408, 35448), True, 'import numpy as np\n'), ((36339, 36364), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (36362, 36364), False, 'import torch\n'), ((36564, 36580), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (36574, 36580), False, 'import torch\n'), ((36913, 36950), 'json.dump', 'json.dump', (['args.__dict__', 'f'], {'indent': '(2)'}), '(args.__dict__, f, indent=2)\n', (36922, 36950), False, 'import json\n'), ((1628, 1670), 'torch.arange', 'torch.arange', (['(0)', 'batch_size'], {'device': 'device'}), '(0, batch_size, device=device)\n', (1640, 1670), False, 'import torch\n'), ((15439, 15452), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (15449, 15452), False, 'import torch\n'), ((15945, 15959), 'math.sqrt', 'math.sqrt', (['d_k'], {}), '(d_k)\n', (15954, 15959), False, 'import math\n'), ((16387, 16439), 'torch.nn.functional.gumbel_softmax', 'F.gumbel_softmax', (['scores'], {'tau': 'temperature', 'hard': '(True)'}), '(scores, tau=temperature, hard=True)\n', (16403, 16439), True, 'import torch.nn.functional as F\n'), ((17403, 17424), 'torch.stack', 'torch.stack', (['R'], {'dim': '(0)'}), '(R, dim=0)\n', (17414, 17424), False, 'import torch\n'), ((36853, 36888), 'os.path.join', 'os.path.join', (['self.path', '"""args.txt"""'], {}), "(self.path, 'args.txt')\n", (36865, 36888), False, 'import os\n'), ((2376, 2415), 'torch.eye', 'torch.eye', (['(3)'], {'device': 'rotation_ab.device'}), '(3, device=rotation_ab.device)\n', (2385, 2415), False, 'import torch\n'), ((16208, 16222), 'math.sqrt', 'math.sqrt', (['d_k'], {}), '(d_k)\n', (16217, 16222), False, 'import math\n'), ((17050, 17062), 'torch.det', 'torch.det', (['r'], {}), '(r)\n', (17059, 17062), False, 'import torch\n'), ((25313, 25360), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['translation_ab_pred', 'translation_ab'], {}), '(translation_ab_pred, translation_ab)\n', (25323, 25360), True, 'import torch.nn.functional as F\n'), ((28057, 28104), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['translation_ab_pred', 'translation_ab'], {}), '(translation_ab_pred, translation_ab)\n', (28067, 28104), True, 'import torch.nn.functional as F\n'), ((17311, 17332), 'torch.matmul', 'torch.matmul', (['v', 'diag'], {}), '(v, diag)\n', (17323, 17332), False, 'import torch\n'), ((22857, 22909), 'torch.eye', 'torch.eye', (['(3)'], {'device': 'src.device', 'dtype': 'torch.float32'}), '(3, device=src.device, dtype=torch.float32)\n', (22866, 22909), False, 'import torch\n'), ((22979, 23033), 'torch.zeros', 'torch.zeros', (['(3)'], {'device': 'src.device', 'dtype': 'torch.float32'}), '(3, device=src.device, dtype=torch.float32)\n', (22990, 23033), False, 'import torch\n'), ((23792, 23823), 'torch.eye', 'torch.eye', (['(3)'], {'device': 'src.device'}), '(3, device=src.device)\n', (23801, 23823), False, 'import torch\n'), ((23890, 23942), 'torch.eye', 'torch.eye', (['(3)'], {'device': 'src.device', 'dtype': 'torch.float32'}), '(3, device=src.device, dtype=torch.float32)\n', (23899, 23942), False, 'import torch\n'), ((24012, 24066), 'torch.zeros', 'torch.zeros', (['(3)'], {'device': 'src.device', 'dtype': 'torch.float32'}), '(3, device=src.device, dtype=torch.float32)\n', (24023, 24066), False, 'import torch\n'), ((24128, 24180), 'torch.eye', 'torch.eye', (['(3)'], {'device': 'src.device', 'dtype': 'torch.float32'}), '(3, device=src.device, dtype=torch.float32)\n', (24137, 24180), False, 'import torch\n'), ((24250, 24304), 'torch.zeros', 'torch.zeros', (['(3)'], {'device': 'src.device', 'dtype': 'torch.float32'}), '(3, device=src.device, dtype=torch.float32)\n', (24261, 24304), False, 'import torch\n'), ((26535, 26566), 'torch.eye', 'torch.eye', (['(3)'], {'device': 'src.device'}), '(3, device=src.device)\n', (26544, 26566), False, 'import torch\n'), ((26633, 26685), 'torch.eye', 'torch.eye', (['(3)'], {'device': 'src.device', 'dtype': 'torch.float32'}), '(3, device=src.device, dtype=torch.float32)\n', (26642, 26685), False, 'import torch\n'), ((26755, 26809), 'torch.zeros', 'torch.zeros', (['(3)'], {'device': 'src.device', 'dtype': 'torch.float32'}), '(3, device=src.device, dtype=torch.float32)\n', (26766, 26809), False, 'import torch\n'), ((26871, 26923), 'torch.eye', 'torch.eye', (['(3)'], {'device': 'src.device', 'dtype': 'torch.float32'}), '(3, device=src.device, dtype=torch.float32)\n', (26880, 26923), False, 'import torch\n'), ((26993, 27047), 'torch.zeros', 'torch.zeros', (['(3)'], {'device': 'src.device', 'dtype': 'torch.float32'}), '(3, device=src.device, dtype=torch.float32)\n', (27004, 27047), False, 'import torch\n'), ((17106, 17157), 'numpy.array', 'np.array', (['[[1.0, 0, 0], [0, 1.0, 0], [0, 0, r_det]]'], {}), '([[1.0, 0, 0], [0, 1.0, 0], [0, 0, r_det]])\n', (17114, 17157), True, 'import numpy as np\n')] |
# Imports
import time
import sys
import numpy as np
# %%
def breath_animation(
animation_duration=30,
inhale_symbol="O",
exhale_symbol=".",
inhale_seconds=5,
exhale_seconds=5,
field_width=70,
):
"""
Parameters
----------
animation_duration : int
Number of seconds in the animation
inhale_symbol : char
The character symbol for inhaling
exhale_symbol : char
The character symbol for exhaling
inhale_seconds : int
Number of seconds per inhale
exhale_seconds : int
Number of seconds per exhale
field_width : int
The number of characters in the inhale/exhale print field
Returns
-------
None
"""
inhale_marker_times = np.linspace(0, inhale_seconds, field_width)
inhale_symbol_num = np.ceil(
(np.sin(np.linspace(-np.pi / 2, np.pi / 2, field_width)) + 1) * field_width / 2
)
exhale_marker_times = np.linspace(
inhale_seconds, inhale_seconds + exhale_seconds, field_width
)
exhale_symbol_num = inhale_symbol_num * -1 + field_width # Inverse Signal
start_time = time.time()
diff_time = time.time() - start_time
mod_time = diff_time % inhale_seconds
previous_marker_time = 0
breath_status = "inhale"
print_ready = False
while diff_time <= animation_duration:
if breath_status == ">>> INHALE >>>":
marker_time = np.where(inhale_marker_times >= mod_time)[0][0]
if marker_time > previous_marker_time:
inhale_num = int(inhale_symbol_num[marker_time])
exhale_num = field_width - inhale_num
print_ready = True
previous_marker_time = marker_time
diff_time = time.time() - start_time
mod_time = diff_time % (inhale_seconds + exhale_seconds)
if mod_time > inhale_seconds:
breath_status = "<<< exhale <<<"
previous_marker_time = 0
else:
marker_time = np.where(exhale_marker_times >= mod_time)[0][0]
if marker_time > previous_marker_time:
inhale_num = int(exhale_symbol_num[marker_time])
exhale_num = field_width - inhale_num
print_ready = True
previous_marker_time = marker_time
diff_time = time.time() - start_time
mod_time = diff_time % (inhale_seconds + exhale_seconds)
if mod_time < inhale_seconds:
breath_status = ">>> INHALE >>>"
previous_marker_time = 0
if print_ready:
state = inhale_symbol * inhale_num + exhale_symbol * exhale_num
output_string = breath_status + " [" + state + "] " + breath_status
sys.stdout.write("\r" + output_string)
sys.stdout.flush()
print_ready = False
return
#%%
if __name__ == "__main__":
breath_animation()
| [
"sys.stdout.write",
"time.time",
"numpy.where",
"sys.stdout.flush",
"numpy.linspace"
] | [((747, 790), 'numpy.linspace', 'np.linspace', (['(0)', 'inhale_seconds', 'field_width'], {}), '(0, inhale_seconds, field_width)\n', (758, 790), True, 'import numpy as np\n'), ((945, 1018), 'numpy.linspace', 'np.linspace', (['inhale_seconds', '(inhale_seconds + exhale_seconds)', 'field_width'], {}), '(inhale_seconds, inhale_seconds + exhale_seconds, field_width)\n', (956, 1018), True, 'import numpy as np\n'), ((1130, 1141), 'time.time', 'time.time', ([], {}), '()\n', (1139, 1141), False, 'import time\n'), ((1158, 1169), 'time.time', 'time.time', ([], {}), '()\n', (1167, 1169), False, 'import time\n'), ((2774, 2812), 'sys.stdout.write', 'sys.stdout.write', (["('\\r' + output_string)"], {}), "('\\r' + output_string)\n", (2790, 2812), False, 'import sys\n'), ((2825, 2843), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2841, 2843), False, 'import sys\n'), ((1756, 1767), 'time.time', 'time.time', ([], {}), '()\n', (1765, 1767), False, 'import time\n'), ((2354, 2365), 'time.time', 'time.time', ([], {}), '()\n', (2363, 2365), False, 'import time\n'), ((1426, 1467), 'numpy.where', 'np.where', (['(inhale_marker_times >= mod_time)'], {}), '(inhale_marker_times >= mod_time)\n', (1434, 1467), True, 'import numpy as np\n'), ((2024, 2065), 'numpy.where', 'np.where', (['(exhale_marker_times >= mod_time)'], {}), '(exhale_marker_times >= mod_time)\n', (2032, 2065), True, 'import numpy as np\n'), ((840, 887), 'numpy.linspace', 'np.linspace', (['(-np.pi / 2)', '(np.pi / 2)', 'field_width'], {}), '(-np.pi / 2, np.pi / 2, field_width)\n', (851, 887), True, 'import numpy as np\n')] |
# Lab 4 Multi-variable linear regression
import tensorflow as tf
import numpy as np
tf.set_random_seed(777)
# numpy 내장 함수
xy = np.loadtxt('data-01-test-score.csv', delimiter=',', dtype=np.float32)
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]
#print(xy)
# shape과 data 확인
print(x_data.shape, x_data, len(x_data)) # 25x3, length : 25
print(y_data.shape, y_data) # 25x1
# placeholder 변수 생성
X = tf.placeholder(tf.float32, shape=[None, 3]) # predictor 3개
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([3, 1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
# regression model
hypothesis = tf.matmul(X, W) + b
# Simplified cost/loss function
cost = tf.reduce_mean(tf.square(hypothesis - Y))
# Minimize
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)
# Launch the graph in a session.
sess = tf.Session()
# Initializes global variables in the graph.
sess.run(tf.global_variables_initializer())
for step in range(2001):
cost_val, hy_val, _ = sess.run(
[cost, hypothesis, train], feed_dict={X: x_data, Y: y_data})
if step % 10 == 0:
print(step, "Cost: ", cost_val, "\nPrediction:\n", hy_val)
# 새로운 데이터로 predict (점수 예측)
print("Your score will be ", sess.run(
hypothesis, feed_dict={X: [[100, 70, 101]]}))
print("Other scores will be ", sess.run(hypothesis,
feed_dict={X: [[60, 70, 110], [90, 100, 80]]})) # 2개 데이터 한 번에 predict
'''
Your score will be [[ 181.73277283]]
Other scores will be [[ 145.86265564]
[ 187.23129272]]
''' | [
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.set_random_seed",
"tensorflow.placeholder",
"tensorflow.matmul",
"tensorflow.random_normal",
"numpy.loadtxt",
"tensorflow.square",
"tensorflow.train.GradientDescentOptimizer"
] | [((84, 107), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(777)'], {}), '(777)\n', (102, 107), True, 'import tensorflow as tf\n'), ((128, 197), 'numpy.loadtxt', 'np.loadtxt', (['"""data-01-test-score.csv"""'], {'delimiter': '""","""', 'dtype': 'np.float32'}), "('data-01-test-score.csv', delimiter=',', dtype=np.float32)\n", (138, 197), True, 'import numpy as np\n'), ((397, 440), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 3]'}), '(tf.float32, shape=[None, 3])\n', (411, 440), True, 'import tensorflow as tf\n'), ((464, 507), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, 1]'}), '(tf.float32, shape=[None, 1])\n', (478, 507), True, 'import tensorflow as tf\n'), ((777, 831), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': '(1e-05)'}), '(learning_rate=1e-05)\n', (810, 831), True, 'import tensorflow as tf\n'), ((905, 917), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (915, 917), True, 'import tensorflow as tf\n'), ((525, 549), 'tensorflow.random_normal', 'tf.random_normal', (['[3, 1]'], {}), '([3, 1])\n', (541, 549), True, 'import tensorflow as tf\n'), ((582, 603), 'tensorflow.random_normal', 'tf.random_normal', (['[1]'], {}), '([1])\n', (598, 603), True, 'import tensorflow as tf\n'), ((651, 666), 'tensorflow.matmul', 'tf.matmul', (['X', 'W'], {}), '(X, W)\n', (660, 666), True, 'import tensorflow as tf\n'), ((726, 751), 'tensorflow.square', 'tf.square', (['(hypothesis - Y)'], {}), '(hypothesis - Y)\n', (735, 751), True, 'import tensorflow as tf\n'), ((972, 1005), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1003, 1005), True, 'import tensorflow as tf\n')] |
import cv2
import numpy as np
from planarH import computeH
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def compute_extrinsics(K,H):
H_hat = np.matmul(np.linalg.inv(K),H)
rotation_hat = H_hat[:,:2]
[U,S,V] = np.linalg.svd(rotation_hat)
S = np.array([[1,0],[0,1],[0,0]])
new_rotation = np.matmul(U,np.matmul(S,V))
rotation_hat3 = np.cross(new_rotation[:,0], new_rotation[:,1])
rotation_hat3 = np.reshape(rotation_hat3, (3,1))
R = np.append(new_rotation, rotation_hat3, 1)
det_R = np.linalg.det(R)
if det_R == -1:
R[:,2] = -1*R[:,2]
lambda_val = 0
for i in range(0,3):
for j in range(0,2):
lambda_val += rotation_hat[i,j]/R[i,j]
lambda_val = lambda_val/6
t = H_hat[:,2]/lambda_val
t = np.reshape(t, (3,1))
return R,t
def project_extrinsics(K,W,R,t):
extrinsic_matrix = np.append(R, t, 1)
a = np.ones([len(W[2,:]),1]).T
W = np.append(W, a, 0)
X_projected = np.matmul(K,np.matmul(extrinsic_matrix, W))
X_projected[2,np.where(X_projected[2,:]==0)[0]] = 1
X_projected = X_projected/X_projected[2,:]
X = X_projected[:2,:]
return X
def display_points(K,R,t):
W_new = np.loadtxt('../data/sphere.txt')
shift = [5.2,11.2,-6.85/2]
# shift = [0,0,0]
W_new[0,:] += shift[0]
W_new[1,:] += shift[1]
W_new[2,:] += shift[2]
X = project_extrinsics(K,W_new,R,t)
sphere_x, sphere_y = [], []
for i in range(X.shape[1]):
sphere_x.append(int(X[0,i]))
sphere_y.append(int(X[1,i]))
im = cv2.imread('../data/prince_book.jpeg')
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
implot = plt.imshow(im)
plt.plot(sphere_x, sphere_y, '-', color='yellow')
# plt.axis('off')
# plt.savefig("../results/ar.png", bbox_inches='tight')
plt.show()
if __name__ == "__main__":
K = np.array([[3043.72,0,1196],[0,3043.72,1604],[0,0,1]])
W = np.array([[0,18.2,18.2,0],[0,0,26,26],[0,0,0,0]])
X = np.array([[483, 1704, 2175, 67], [810, 781, 2217, 2286]])
H = computeH(X, W[:2,:])
R,t = compute_extrinsics(K,H)
print (R,t)
X_projected = project_extrinsics(K,W,R,t)
display_points(K,R,t) | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"cv2.cvtColor",
"matplotlib.pyplot.imshow",
"numpy.cross",
"numpy.append",
"cv2.imread",
"numpy.linalg.svd",
"numpy.array",
"numpy.reshape",
"numpy.loadtxt",
"numpy.linalg.inv",
"numpy.matmul",
"numpy.linalg.det",
"numpy.where",
"plan... | [((241, 268), 'numpy.linalg.svd', 'np.linalg.svd', (['rotation_hat'], {}), '(rotation_hat)\n', (254, 268), True, 'import numpy as np\n'), ((277, 311), 'numpy.array', 'np.array', (['[[1, 0], [0, 1], [0, 0]]'], {}), '([[1, 0], [0, 1], [0, 0]])\n', (285, 311), True, 'import numpy as np\n'), ((374, 422), 'numpy.cross', 'np.cross', (['new_rotation[:, 0]', 'new_rotation[:, 1]'], {}), '(new_rotation[:, 0], new_rotation[:, 1])\n', (382, 422), True, 'import numpy as np\n'), ((441, 474), 'numpy.reshape', 'np.reshape', (['rotation_hat3', '(3, 1)'], {}), '(rotation_hat3, (3, 1))\n', (451, 474), True, 'import numpy as np\n'), ((482, 523), 'numpy.append', 'np.append', (['new_rotation', 'rotation_hat3', '(1)'], {}), '(new_rotation, rotation_hat3, 1)\n', (491, 523), True, 'import numpy as np\n'), ((536, 552), 'numpy.linalg.det', 'np.linalg.det', (['R'], {}), '(R)\n', (549, 552), True, 'import numpy as np\n'), ((794, 815), 'numpy.reshape', 'np.reshape', (['t', '(3, 1)'], {}), '(t, (3, 1))\n', (804, 815), True, 'import numpy as np\n'), ((887, 905), 'numpy.append', 'np.append', (['R', 't', '(1)'], {}), '(R, t, 1)\n', (896, 905), True, 'import numpy as np\n'), ((949, 967), 'numpy.append', 'np.append', (['W', 'a', '(0)'], {}), '(W, a, 0)\n', (958, 967), True, 'import numpy as np\n'), ((1216, 1248), 'numpy.loadtxt', 'np.loadtxt', (['"""../data/sphere.txt"""'], {}), "('../data/sphere.txt')\n", (1226, 1248), True, 'import numpy as np\n'), ((1572, 1610), 'cv2.imread', 'cv2.imread', (['"""../data/prince_book.jpeg"""'], {}), "('../data/prince_book.jpeg')\n", (1582, 1610), False, 'import cv2\n'), ((1620, 1655), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (1632, 1655), False, 'import cv2\n'), ((1669, 1683), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im'], {}), '(im)\n', (1679, 1683), True, 'import matplotlib.pyplot as plt\n'), ((1688, 1737), 'matplotlib.pyplot.plot', 'plt.plot', (['sphere_x', 'sphere_y', '"""-"""'], {'color': '"""yellow"""'}), "(sphere_x, sphere_y, '-', color='yellow')\n", (1696, 1737), True, 'import matplotlib.pyplot as plt\n'), ((1824, 1834), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1832, 1834), True, 'import matplotlib.pyplot as plt\n'), ((1871, 1932), 'numpy.array', 'np.array', (['[[3043.72, 0, 1196], [0, 3043.72, 1604], [0, 0, 1]]'], {}), '([[3043.72, 0, 1196], [0, 3043.72, 1604], [0, 0, 1]])\n', (1879, 1932), True, 'import numpy as np\n'), ((1933, 1993), 'numpy.array', 'np.array', (['[[0, 18.2, 18.2, 0], [0, 0, 26, 26], [0, 0, 0, 0]]'], {}), '([[0, 18.2, 18.2, 0], [0, 0, 26, 26], [0, 0, 0, 0]])\n', (1941, 1993), True, 'import numpy as np\n'), ((1991, 2048), 'numpy.array', 'np.array', (['[[483, 1704, 2175, 67], [810, 781, 2217, 2286]]'], {}), '([[483, 1704, 2175, 67], [810, 781, 2217, 2286]])\n', (1999, 2048), True, 'import numpy as np\n'), ((2057, 2078), 'planarH.computeH', 'computeH', (['X', 'W[:2, :]'], {}), '(X, W[:2, :])\n', (2065, 2078), False, 'from planarH import computeH\n'), ((176, 192), 'numpy.linalg.inv', 'np.linalg.inv', (['K'], {}), '(K)\n', (189, 192), True, 'import numpy as np\n'), ((338, 353), 'numpy.matmul', 'np.matmul', (['S', 'V'], {}), '(S, V)\n', (347, 353), True, 'import numpy as np\n'), ((998, 1028), 'numpy.matmul', 'np.matmul', (['extrinsic_matrix', 'W'], {}), '(extrinsic_matrix, W)\n', (1007, 1028), True, 'import numpy as np\n'), ((1048, 1080), 'numpy.where', 'np.where', (['(X_projected[2, :] == 0)'], {}), '(X_projected[2, :] == 0)\n', (1056, 1080), True, 'import numpy as np\n')] |
from warnings import catch_warnings
import numpy as np
import pytest
from pandas import DataFrame, MultiIndex, Series
from pandas.util import testing as tm
@pytest.fixture
def single_level_multiindex():
"""single level MultiIndex"""
return MultiIndex(levels=[['foo', 'bar', 'baz', 'qux']],
labels=[[0, 1, 2, 3]], names=['first'])
@pytest.mark.filterwarnings("ignore:\\n.ix:DeprecationWarning")
class TestMultiIndexLoc(object):
def test_loc_getitem_series(self):
# GH14730
# passing a series as a key with a MultiIndex
index = MultiIndex.from_product([[1, 2, 3], ['A', 'B', 'C']])
x = Series(index=index, data=range(9), dtype=np.float64)
y = Series([1, 3])
expected = Series(
data=[0, 1, 2, 6, 7, 8],
index=MultiIndex.from_product([[1, 3], ['A', 'B', 'C']]),
dtype=np.float64)
result = x.loc[y]
tm.assert_series_equal(result, expected)
result = x.loc[[1, 3]]
tm.assert_series_equal(result, expected)
# GH15424
y1 = Series([1, 3], index=[1, 2])
result = x.loc[y1]
tm.assert_series_equal(result, expected)
empty = Series(data=[], dtype=np.float64)
expected = Series([], index=MultiIndex(
levels=index.levels, labels=[[], []], dtype=np.float64))
result = x.loc[empty]
tm.assert_series_equal(result, expected)
def test_loc_getitem_array(self):
# GH15434
# passing an array as a key with a MultiIndex
index = MultiIndex.from_product([[1, 2, 3], ['A', 'B', 'C']])
x = Series(index=index, data=range(9), dtype=np.float64)
y = np.array([1, 3])
expected = Series(
data=[0, 1, 2, 6, 7, 8],
index=MultiIndex.from_product([[1, 3], ['A', 'B', 'C']]),
dtype=np.float64)
result = x.loc[y]
tm.assert_series_equal(result, expected)
# empty array:
empty = np.array([])
expected = Series([], index=MultiIndex(
levels=index.levels, labels=[[], []], dtype=np.float64))
result = x.loc[empty]
tm.assert_series_equal(result, expected)
# 0-dim array (scalar):
scalar = np.int64(1)
expected = Series(
data=[0, 1, 2],
index=['A', 'B', 'C'],
dtype=np.float64)
result = x.loc[scalar]
tm.assert_series_equal(result, expected)
def test_loc_multiindex(self):
mi_labels = DataFrame(np.random.randn(3, 3),
columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
mi_int = DataFrame(np.random.randn(3, 3),
columns=[[2, 2, 4], [6, 8, 10]],
index=[[4, 4, 8], [8, 10, 12]])
# the first row
rs = mi_labels.loc['i']
with catch_warnings(record=True):
xp = mi_labels.ix['i']
tm.assert_frame_equal(rs, xp)
# 2nd (last) columns
rs = mi_labels.loc[:, 'j']
with catch_warnings(record=True):
xp = mi_labels.ix[:, 'j']
tm.assert_frame_equal(rs, xp)
# corner column
rs = mi_labels.loc['j'].loc[:, 'j']
with catch_warnings(record=True):
xp = mi_labels.ix['j'].ix[:, 'j']
tm.assert_frame_equal(rs, xp)
# with a tuple
rs = mi_labels.loc[('i', 'X')]
with catch_warnings(record=True):
xp = mi_labels.ix[('i', 'X')]
tm.assert_frame_equal(rs, xp)
rs = mi_int.loc[4]
with catch_warnings(record=True):
xp = mi_int.ix[4]
tm.assert_frame_equal(rs, xp)
# missing label
pytest.raises(KeyError, lambda: mi_int.loc[2])
with catch_warnings(record=True):
# GH 21593
pytest.raises(KeyError, lambda: mi_int.ix[2])
def test_loc_multiindex_indexer_none(self):
# GH6788
# multi-index indexer is None (meaning take all)
attributes = ['Attribute' + str(i) for i in range(1)]
attribute_values = ['Value' + str(i) for i in range(5)]
index = MultiIndex.from_product([attributes, attribute_values])
df = 0.1 * np.random.randn(10, 1 * 5) + 0.5
df = DataFrame(df, columns=index)
result = df[attributes]
tm.assert_frame_equal(result, df)
# GH 7349
# loc with a multi-index seems to be doing fallback
df = DataFrame(np.arange(12).reshape(-1, 1),
index=MultiIndex.from_product([[1, 2, 3, 4],
[1, 2, 3]]))
expected = df.loc[([1, 2], ), :]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
def test_loc_multiindex_incomplete(self):
# GH 7399
# incomplete indexers
s = Series(np.arange(15, dtype='int64'),
MultiIndex.from_product([range(5), ['a', 'b', 'c']]))
expected = s.loc[:, 'a':'c']
result = s.loc[0:4, 'a':'c']
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
result = s.loc[:4, 'a':'c']
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
result = s.loc[0:, 'a':'c']
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result, expected)
# GH 7400
# multiindexer gettitem with list of indexers skips wrong element
s = Series(np.arange(15, dtype='int64'),
MultiIndex.from_product([range(5), ['a', 'b', 'c']]))
expected = s.iloc[[6, 7, 8, 12, 13, 14]]
result = s.loc[2:4:2, 'a':'c']
tm.assert_series_equal(result, expected)
def test_get_loc_single_level(self, single_level_multiindex):
single_level = single_level_multiindex
s = Series(np.random.randn(len(single_level)),
index=single_level)
for k in single_level.values:
s[k]
| [
"pandas.DataFrame",
"pandas.util.testing.assert_frame_equal",
"numpy.random.randn",
"pandas.MultiIndex",
"pandas.MultiIndex.from_product",
"pytest.raises",
"numpy.array",
"pandas.Series",
"warnings.catch_warnings",
"numpy.int64",
"numpy.arange",
"pytest.mark.filterwarnings",
"pandas.util.tes... | [((367, 429), 'pytest.mark.filterwarnings', 'pytest.mark.filterwarnings', (['"""ignore:\\\\n.ix:DeprecationWarning"""'], {}), "('ignore:\\\\n.ix:DeprecationWarning')\n", (393, 429), False, 'import pytest\n'), ((252, 345), 'pandas.MultiIndex', 'MultiIndex', ([], {'levels': "[['foo', 'bar', 'baz', 'qux']]", 'labels': '[[0, 1, 2, 3]]', 'names': "['first']"}), "(levels=[['foo', 'bar', 'baz', 'qux']], labels=[[0, 1, 2, 3]],\n names=['first'])\n", (262, 345), False, 'from pandas import DataFrame, MultiIndex, Series\n'), ((591, 644), 'pandas.MultiIndex.from_product', 'MultiIndex.from_product', (["[[1, 2, 3], ['A', 'B', 'C']]"], {}), "([[1, 2, 3], ['A', 'B', 'C']])\n", (614, 644), False, 'from pandas import DataFrame, MultiIndex, Series\n'), ((722, 736), 'pandas.Series', 'Series', (['[1, 3]'], {}), '([1, 3])\n', (728, 736), False, 'from pandas import DataFrame, MultiIndex, Series\n'), ((935, 975), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (957, 975), True, 'from pandas.util import testing as tm\n'), ((1016, 1056), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (1038, 1056), True, 'from pandas.util import testing as tm\n'), ((1089, 1117), 'pandas.Series', 'Series', (['[1, 3]'], {'index': '[1, 2]'}), '([1, 3], index=[1, 2])\n', (1095, 1117), False, 'from pandas import DataFrame, MultiIndex, Series\n'), ((1153, 1193), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (1175, 1193), True, 'from pandas.util import testing as tm\n'), ((1211, 1244), 'pandas.Series', 'Series', ([], {'data': '[]', 'dtype': 'np.float64'}), '(data=[], dtype=np.float64)\n', (1217, 1244), False, 'from pandas import DataFrame, MultiIndex, Series\n'), ((1400, 1440), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (1422, 1440), True, 'from pandas.util import testing as tm\n'), ((1568, 1621), 'pandas.MultiIndex.from_product', 'MultiIndex.from_product', (["[[1, 2, 3], ['A', 'B', 'C']]"], {}), "([[1, 2, 3], ['A', 'B', 'C']])\n", (1591, 1621), False, 'from pandas import DataFrame, MultiIndex, Series\n'), ((1699, 1715), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (1707, 1715), True, 'import numpy as np\n'), ((1914, 1954), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (1936, 1954), True, 'from pandas.util import testing as tm\n'), ((1995, 2007), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2003, 2007), True, 'import numpy as np\n'), ((2163, 2203), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (2185, 2203), True, 'from pandas.util import testing as tm\n'), ((2254, 2265), 'numpy.int64', 'np.int64', (['(1)'], {}), '(1)\n', (2262, 2265), True, 'import numpy as np\n'), ((2285, 2348), 'pandas.Series', 'Series', ([], {'data': '[0, 1, 2]', 'index': "['A', 'B', 'C']", 'dtype': 'np.float64'}), "(data=[0, 1, 2], index=['A', 'B', 'C'], dtype=np.float64)\n", (2291, 2348), False, 'from pandas import DataFrame, MultiIndex, Series\n'), ((2425, 2465), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (2447, 2465), True, 'from pandas.util import testing as tm\n'), ((3014, 3043), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['rs', 'xp'], {}), '(rs, xp)\n', (3035, 3043), True, 'from pandas.util import testing as tm\n'), ((3197, 3226), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['rs', 'xp'], {}), '(rs, xp)\n', (3218, 3226), True, 'from pandas.util import testing as tm\n'), ((3392, 3421), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['rs', 'xp'], {}), '(rs, xp)\n', (3413, 3421), True, 'from pandas.util import testing as tm\n'), ((3577, 3606), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['rs', 'xp'], {}), '(rs, xp)\n', (3598, 3606), True, 'from pandas.util import testing as tm\n'), ((3715, 3744), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['rs', 'xp'], {}), '(rs, xp)\n', (3736, 3744), True, 'from pandas.util import testing as tm\n'), ((3778, 3825), 'pytest.raises', 'pytest.raises', (['KeyError', '(lambda : mi_int.loc[2])'], {}), '(KeyError, lambda : mi_int.loc[2])\n', (3791, 3825), False, 'import pytest\n'), ((4215, 4270), 'pandas.MultiIndex.from_product', 'MultiIndex.from_product', (['[attributes, attribute_values]'], {}), '([attributes, attribute_values])\n', (4238, 4270), False, 'from pandas import DataFrame, MultiIndex, Series\n'), ((4336, 4364), 'pandas.DataFrame', 'DataFrame', (['df'], {'columns': 'index'}), '(df, columns=index)\n', (4345, 4364), False, 'from pandas import DataFrame, MultiIndex, Series\n'), ((4405, 4438), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'df'], {}), '(result, df)\n', (4426, 4438), True, 'from pandas.util import testing as tm\n'), ((4788, 4827), 'pandas.util.testing.assert_frame_equal', 'tm.assert_frame_equal', (['result', 'expected'], {}), '(result, expected)\n', (4809, 4827), True, 'from pandas.util import testing as tm\n'), ((5129, 5169), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (5151, 5169), True, 'from pandas.util import testing as tm\n'), ((5178, 5218), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (5200, 5218), True, 'from pandas.util import testing as tm\n'), ((5264, 5304), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (5286, 5304), True, 'from pandas.util import testing as tm\n'), ((5313, 5353), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (5335, 5353), True, 'from pandas.util import testing as tm\n'), ((5399, 5439), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (5421, 5439), True, 'from pandas.util import testing as tm\n'), ((5448, 5488), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (5470, 5488), True, 'from pandas.util import testing as tm\n'), ((5800, 5840), 'pandas.util.testing.assert_series_equal', 'tm.assert_series_equal', (['result', 'expected'], {}), '(result, expected)\n', (5822, 5840), True, 'from pandas.util import testing as tm\n'), ((2533, 2554), 'numpy.random.randn', 'np.random.randn', (['(3)', '(3)'], {}), '(3, 3)\n', (2548, 2554), True, 'import numpy as np\n'), ((2730, 2751), 'numpy.random.randn', 'np.random.randn', (['(3)', '(3)'], {}), '(3, 3)\n', (2745, 2751), True, 'import numpy as np\n'), ((2942, 2969), 'warnings.catch_warnings', 'catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (2956, 2969), False, 'from warnings import catch_warnings\n'), ((3122, 3149), 'warnings.catch_warnings', 'catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (3136, 3149), False, 'from warnings import catch_warnings\n'), ((3309, 3336), 'warnings.catch_warnings', 'catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (3323, 3336), False, 'from warnings import catch_warnings\n'), ((3498, 3525), 'warnings.catch_warnings', 'catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (3512, 3525), False, 'from warnings import catch_warnings\n'), ((3648, 3675), 'warnings.catch_warnings', 'catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (3662, 3675), False, 'from warnings import catch_warnings\n'), ((3838, 3865), 'warnings.catch_warnings', 'catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (3852, 3865), False, 'from warnings import catch_warnings\n'), ((3902, 3948), 'pytest.raises', 'pytest.raises', (['KeyError', '(lambda : mi_int.ix[2])'], {}), '(KeyError, lambda : mi_int.ix[2])\n', (3915, 3948), False, 'import pytest\n'), ((4943, 4971), 'numpy.arange', 'np.arange', (['(15)'], {'dtype': '"""int64"""'}), "(15, dtype='int64')\n", (4952, 4971), True, 'import numpy as np\n'), ((5601, 5629), 'numpy.arange', 'np.arange', (['(15)'], {'dtype': '"""int64"""'}), "(15, dtype='int64')\n", (5610, 5629), True, 'import numpy as np\n'), ((819, 869), 'pandas.MultiIndex.from_product', 'MultiIndex.from_product', (["[[1, 3], ['A', 'B', 'C']]"], {}), "([[1, 3], ['A', 'B', 'C']])\n", (842, 869), False, 'from pandas import DataFrame, MultiIndex, Series\n'), ((1281, 1347), 'pandas.MultiIndex', 'MultiIndex', ([], {'levels': 'index.levels', 'labels': '[[], []]', 'dtype': 'np.float64'}), '(levels=index.levels, labels=[[], []], dtype=np.float64)\n', (1291, 1347), False, 'from pandas import DataFrame, MultiIndex, Series\n'), ((1798, 1848), 'pandas.MultiIndex.from_product', 'MultiIndex.from_product', (["[[1, 3], ['A', 'B', 'C']]"], {}), "([[1, 3], ['A', 'B', 'C']])\n", (1821, 1848), False, 'from pandas import DataFrame, MultiIndex, Series\n'), ((2044, 2110), 'pandas.MultiIndex', 'MultiIndex', ([], {'levels': 'index.levels', 'labels': '[[], []]', 'dtype': 'np.float64'}), '(levels=index.levels, labels=[[], []], dtype=np.float64)\n', (2054, 2110), False, 'from pandas import DataFrame, MultiIndex, Series\n'), ((4290, 4316), 'numpy.random.randn', 'np.random.randn', (['(10)', '(1 * 5)'], {}), '(10, 1 * 5)\n', (4305, 4316), True, 'import numpy as np\n'), ((4600, 4650), 'pandas.MultiIndex.from_product', 'MultiIndex.from_product', (['[[1, 2, 3, 4], [1, 2, 3]]'], {}), '([[1, 2, 3, 4], [1, 2, 3]])\n', (4623, 4650), False, 'from pandas import DataFrame, MultiIndex, Series\n'), ((4541, 4554), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (4550, 4554), True, 'import numpy as np\n')] |
#-------------------------------------#
# 调用摄像头检测
#-------------------------------------#
from yolo import YOLO
from PIL import Image
import numpy as np
import cv2
import time
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
yolo = YOLO()
# 调用摄像头
capture=cv2.VideoCapture('video/coin7.mp4')
#capture=cv2.VideoCapture(0)
fps = 0.0
while(capture.isOpened):
t1 = time.time()
# 读取某一帧
ret,frame=capture.read()
if not ret:
break
# 格式转变,BGRtoRGB
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
# 转变成Image
frame = Image.fromarray(np.uint8(frame))
# 进行检测
frame,text,classes=yolo.detect_image(frame)
frame = np.array(frame)
# RGBtoBGR满足opencv显示格式
frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)
fps = ( fps + (1./(time.time()-t1)) ) / 2
print("fps= %.2f"%(fps))
frame = cv2.putText(frame, "fps= %.2f"%(fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
cv2.imshow("video",frame)
if cv2.waitKey(30) == ord('q'):
break
capture.release()
cv2.destroyAllWindows() | [
"numpy.uint8",
"cv2.putText",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imshow",
"tensorflow.config.experimental.set_memory_growth",
"time.time",
"cv2.VideoCapture",
"numpy.array",
"cv2.destroyAllWindows",
"tensorflow.config.experimental.list_physical_devices",
"yolo.YOLO"
] | [((226, 277), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (270, 277), True, 'import tensorflow as tf\n'), ((278, 345), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[0]', '(True)'], {}), '(physical_devices[0], True)\n', (318, 345), True, 'import tensorflow as tf\n'), ((415, 421), 'yolo.YOLO', 'YOLO', ([], {}), '()\n', (419, 421), False, 'from yolo import YOLO\n'), ((438, 473), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""video/coin7.mp4"""'], {}), "('video/coin7.mp4')\n", (454, 473), False, 'import cv2\n'), ((1221, 1244), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1242, 1244), False, 'import cv2\n'), ((548, 559), 'time.time', 'time.time', ([], {}), '()\n', (557, 559), False, 'import time\n'), ((668, 706), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (680, 706), False, 'import cv2\n'), ((838, 853), 'numpy.array', 'np.array', (['frame'], {}), '(frame)\n', (846, 853), True, 'import numpy as np\n'), ((894, 932), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2BGR'], {}), '(frame, cv2.COLOR_RGB2BGR)\n', (906, 932), False, 'import cv2\n'), ((1025, 1120), 'cv2.putText', 'cv2.putText', (['frame', "('fps= %.2f' % fps)", '(0, 40)', 'cv2.FONT_HERSHEY_SIMPLEX', '(1)', '(0, 255, 0)', '(2)'], {}), "(frame, 'fps= %.2f' % fps, (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1,\n (0, 255, 0), 2)\n", (1036, 1120), False, 'import cv2\n'), ((1126, 1152), 'cv2.imshow', 'cv2.imshow', (['"""video"""', 'frame'], {}), "('video', frame)\n", (1136, 1152), False, 'import cv2\n'), ((749, 764), 'numpy.uint8', 'np.uint8', (['frame'], {}), '(frame)\n', (757, 764), True, 'import numpy as np\n'), ((1159, 1174), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (1170, 1174), False, 'import cv2\n'), ((961, 972), 'time.time', 'time.time', ([], {}), '()\n', (970, 972), False, 'import time\n')] |
import os
import time
import logging
import argparse
import numpy as np
import torch
from common.logger_utils import initialize_logging
from pytorch.utils import prepare_pt_context, prepare_model
from pytorch.dataset_utils import get_dataset_metainfo
from pytorch.dataset_utils import get_val_data_source
def add_eval_parser_arguments(parser):
parser.add_argument(
"--model",
type=str,
required=True,
help="type of model to use. see model_provider for options")
parser.add_argument(
"--use-pretrained",
action="store_true",
help="enable using pretrained model from github repo")
parser.add_argument(
"--dtype",
type=str,
default="float32",
help="base data type for tensors")
parser.add_argument(
"--resume",
type=str,
default="",
help="resume from previously saved parameters")
parser.add_argument(
"--calc-flops",
dest="calc_flops",
action="store_true",
help="calculate FLOPs")
parser.add_argument(
"--calc-flops-only",
dest="calc_flops_only",
action="store_true",
help="calculate FLOPs without quality estimation")
parser.add_argument(
"--data-subset",
type=str,
default="val",
help="data subset. options are val and test")
parser.add_argument(
"--num-gpus",
type=int,
default=0,
help="number of gpus to use")
parser.add_argument(
"-j",
"--num-data-workers",
dest="num_workers",
default=4,
type=int,
help="number of preprocessing workers")
parser.add_argument(
"--batch-size",
type=int,
default=512,
help="training batch size per device (CPU/GPU)")
parser.add_argument(
"--save-dir",
type=str,
default="",
help="directory of saved models and log-files")
parser.add_argument(
"--logging-file-name",
type=str,
default="train.log",
help="filename of training log")
parser.add_argument(
"--log-packages",
type=str,
default="mxnet, numpy",
help="list of python packages for logging")
parser.add_argument(
"--log-pip-packages",
type=str,
default="mxnet-cu100",
help="list of pip packages for logging")
parser.add_argument(
"--disable-cudnn-autotune",
action="store_true",
help="disable cudnn autotune for segmentation models")
parser.add_argument(
"--show-progress",
action="store_true",
help="show progress bar")
def parse_args():
parser = argparse.ArgumentParser(
description="Evaluate a model for image matching (PyTorch/HPatches)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--dataset",
type=str,
default="HPatches",
help="dataset name")
parser.add_argument(
"--work-dir",
type=str,
default=os.path.join("..", "imgclsmob_data"),
help="path to working directory only for dataset root path preset")
args, _ = parser.parse_known_args()
dataset_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
dataset_metainfo.add_dataset_parser_arguments(
parser=parser,
work_dir_path=args.work_dir)
add_eval_parser_arguments(parser)
args = parser.parse_args()
return args
def warp_keypoints(src_pts, homography):
src_hmg_pts = np.concatenate([src_pts, np.ones((src_pts.shape[0], 1))], axis=1)
dst_hmg_pts = np.dot(src_hmg_pts, np.transpose(homography)).squeeze(axis=2)
dst_pts = dst_hmg_pts[:, :2] / dst_hmg_pts[:, 2:]
return dst_pts
def calc_filter_mask(pts, shape):
mask = (pts[:, 0] >= 0) & (pts[:, 0] < shape[0]) & (pts[:, 1] >= 0) & (pts[:, 1] < shape[1])
return mask
def select_k_best(pts,
confs,
max_count=300):
inds = confs.argsort()[::-1][:max_count]
return pts[inds, :], confs[inds]
def calc_repeatability_np(src_pts,
src_confs,
dst_pts,
dst_confs,
homography,
src_shape,
dst_shape,
distance_thresh=3):
pred_src_pts = warp_keypoints(dst_pts, np.linalg.inv(homography))
pred_src_mask = calc_filter_mask(pred_src_pts, src_shape)
label_dst_pts, label_dst_confs = dst_pts[pred_src_mask, :], dst_confs[pred_src_mask]
pred_dst_pts = warp_keypoints(src_pts, homography)
pred_dst_mask = calc_filter_mask(pred_dst_pts, dst_shape)
pred_dst_pts, pred_dst_confs = pred_dst_pts[pred_dst_mask, :], src_confs[pred_dst_mask]
label_dst_pts, label_dst_confs = select_k_best(label_dst_pts, label_dst_confs)
pred_dst_pts, pred_dst_confs = select_k_best(pred_dst_pts, pred_dst_confs)
n_pred = pred_dst_pts.shape[0]
n_label = label_dst_pts.shape[0]
label_dst_pts = np.stack([label_dst_pts[:, 0], label_dst_pts[:, 1], label_dst_confs], axis=1)
pred_dst_pts = np.stack([pred_dst_pts[:, 0], pred_dst_pts[:, 1], pred_dst_confs], axis=1)
pred_dst_pts = np.expand_dims(pred_dst_pts, 1)
label_dst_pts = np.expand_dims(label_dst_pts, 0)
norm = np.linalg.norm(pred_dst_pts - label_dst_pts, ord=None, axis=2)
count1 = 0
count2 = 0
if n_label != 0:
min1 = np.min(norm, axis=1)
count1 = np.sum(min1 <= distance_thresh)
if n_pred != 0:
min2 = np.min(norm, axis=0)
count2 = np.sum(min2 <= distance_thresh)
if n_pred + n_label > 0:
repeatability = (count1 + count2) / (n_pred + n_label)
else:
repeatability = 0
return n_pred, n_label, repeatability
def calc_detector_repeatability(test_data,
net,
use_cuda):
tic = time.time()
repeatabilities = []
n1s = []
n2s = []
with torch.no_grad():
for data_src, data_dst, target in test_data:
if use_cuda:
data_src = data_src.cuda(non_blocking=True)
data_dst = data_dst.cuda(non_blocking=True)
src_pts, src_confs, src_desc_map = net(data_src)
dst_pts, dst_confs, dst_desc_map = net(data_dst)
src_shape = data_src.cpu().detach().numpy().shape[2:]
dst_shape = data_dst.cpu().detach().numpy().shape[2:]
for i in range(len(src_pts)):
homography = target.cpu().detach().numpy()
src_pts_np = src_pts[i].cpu().detach().numpy()
src_confs_np = src_confs[i].cpu().detach().numpy()
dst_pts_np = dst_pts[i].cpu().detach().numpy()
dst_confs_np = dst_confs[i].cpu().detach().numpy()
n1, n2, repeatability = calc_repeatability_np(
src_pts_np,
src_confs_np,
dst_pts_np,
dst_confs_np,
homography,
src_shape,
dst_shape)
n1s.append(n1)
n2s.append(n2)
repeatabilities.append(repeatability)
logging.info("Average number of points in the first image: {}".format(np.mean(n1s)))
logging.info("Average number of points in the second image: {}".format(np.mean(n2s)))
logging.info("The repeatability: {:.4f}".format(np.mean(repeatabilities)))
logging.info("Time cost: {:.4f} sec".format(time.time() - tic))
def main():
args = parse_args()
os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
assert (args.batch_size == 1)
_, log_file_exist = initialize_logging(
logging_dir_path=args.save_dir,
logging_file_name=args.logging_file_name,
script_args=args,
log_packages=args.log_packages,
log_pip_packages=args.log_pip_packages)
ds_metainfo = get_dataset_metainfo(dataset_name=args.dataset)
ds_metainfo.update(args=args)
use_cuda, batch_size = prepare_pt_context(
num_gpus=args.num_gpus,
batch_size=args.batch_size)
net = prepare_model(
model_name=args.model,
use_pretrained=args.use_pretrained,
pretrained_model_file_path=args.resume.strip(),
use_cuda=use_cuda,
net_extra_kwargs=ds_metainfo.net_extra_kwargs,
load_ignore_extra=False,
num_classes=args.num_classes,
in_channels=args.in_channels,
remove_module=False)
test_data = get_val_data_source(
ds_metainfo=ds_metainfo,
batch_size=args.batch_size,
num_workers=args.num_workers)
calc_detector_repeatability(
test_data=test_data,
net=net,
use_cuda=use_cuda)
if __name__ == "__main__":
main()
| [
"numpy.stack",
"numpy.sum",
"argparse.ArgumentParser",
"numpy.transpose",
"numpy.expand_dims",
"pytorch.dataset_utils.get_dataset_metainfo",
"time.time",
"pytorch.utils.prepare_pt_context",
"common.logger_utils.initialize_logging",
"numpy.min",
"numpy.ones",
"numpy.linalg.norm",
"pytorch.dat... | [((2718, 2876), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluate a model for image matching (PyTorch/HPatches)"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description=\n 'Evaluate a model for image matching (PyTorch/HPatches)',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (2741, 2876), False, 'import argparse\n'), ((3265, 3312), 'pytorch.dataset_utils.get_dataset_metainfo', 'get_dataset_metainfo', ([], {'dataset_name': 'args.dataset'}), '(dataset_name=args.dataset)\n', (3285, 3312), False, 'from pytorch.dataset_utils import get_dataset_metainfo\n'), ((5099, 5176), 'numpy.stack', 'np.stack', (['[label_dst_pts[:, 0], label_dst_pts[:, 1], label_dst_confs]'], {'axis': '(1)'}), '([label_dst_pts[:, 0], label_dst_pts[:, 1], label_dst_confs], axis=1)\n', (5107, 5176), True, 'import numpy as np\n'), ((5196, 5270), 'numpy.stack', 'np.stack', (['[pred_dst_pts[:, 0], pred_dst_pts[:, 1], pred_dst_confs]'], {'axis': '(1)'}), '([pred_dst_pts[:, 0], pred_dst_pts[:, 1], pred_dst_confs], axis=1)\n', (5204, 5270), True, 'import numpy as np\n'), ((5291, 5322), 'numpy.expand_dims', 'np.expand_dims', (['pred_dst_pts', '(1)'], {}), '(pred_dst_pts, 1)\n', (5305, 5322), True, 'import numpy as np\n'), ((5343, 5375), 'numpy.expand_dims', 'np.expand_dims', (['label_dst_pts', '(0)'], {}), '(label_dst_pts, 0)\n', (5357, 5375), True, 'import numpy as np\n'), ((5387, 5449), 'numpy.linalg.norm', 'np.linalg.norm', (['(pred_dst_pts - label_dst_pts)'], {'ord': 'None', 'axis': '(2)'}), '(pred_dst_pts - label_dst_pts, ord=None, axis=2)\n', (5401, 5449), True, 'import numpy as np\n'), ((5998, 6009), 'time.time', 'time.time', ([], {}), '()\n', (6007, 6009), False, 'import time\n'), ((7786, 7977), 'common.logger_utils.initialize_logging', 'initialize_logging', ([], {'logging_dir_path': 'args.save_dir', 'logging_file_name': 'args.logging_file_name', 'script_args': 'args', 'log_packages': 'args.log_packages', 'log_pip_packages': 'args.log_pip_packages'}), '(logging_dir_path=args.save_dir, logging_file_name=args.\n logging_file_name, script_args=args, log_packages=args.log_packages,\n log_pip_packages=args.log_pip_packages)\n', (7804, 7977), False, 'from common.logger_utils import initialize_logging\n'), ((8029, 8076), 'pytorch.dataset_utils.get_dataset_metainfo', 'get_dataset_metainfo', ([], {'dataset_name': 'args.dataset'}), '(dataset_name=args.dataset)\n', (8049, 8076), False, 'from pytorch.dataset_utils import get_dataset_metainfo\n'), ((8139, 8209), 'pytorch.utils.prepare_pt_context', 'prepare_pt_context', ([], {'num_gpus': 'args.num_gpus', 'batch_size': 'args.batch_size'}), '(num_gpus=args.num_gpus, batch_size=args.batch_size)\n', (8157, 8209), False, 'from pytorch.utils import prepare_pt_context, prepare_model\n'), ((8621, 8727), 'pytorch.dataset_utils.get_val_data_source', 'get_val_data_source', ([], {'ds_metainfo': 'ds_metainfo', 'batch_size': 'args.batch_size', 'num_workers': 'args.num_workers'}), '(ds_metainfo=ds_metainfo, batch_size=args.batch_size,\n num_workers=args.num_workers)\n', (8640, 8727), False, 'from pytorch.dataset_utils import get_val_data_source\n'), ((4454, 4479), 'numpy.linalg.inv', 'np.linalg.inv', (['homography'], {}), '(homography)\n', (4467, 4479), True, 'import numpy as np\n'), ((5517, 5537), 'numpy.min', 'np.min', (['norm'], {'axis': '(1)'}), '(norm, axis=1)\n', (5523, 5537), True, 'import numpy as np\n'), ((5555, 5586), 'numpy.sum', 'np.sum', (['(min1 <= distance_thresh)'], {}), '(min1 <= distance_thresh)\n', (5561, 5586), True, 'import numpy as np\n'), ((5622, 5642), 'numpy.min', 'np.min', (['norm'], {'axis': '(0)'}), '(norm, axis=0)\n', (5628, 5642), True, 'import numpy as np\n'), ((5660, 5691), 'numpy.sum', 'np.sum', (['(min2 <= distance_thresh)'], {}), '(min2 <= distance_thresh)\n', (5666, 5691), True, 'import numpy as np\n'), ((6070, 6085), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6083, 6085), False, 'import torch\n'), ((3087, 3123), 'os.path.join', 'os.path.join', (['""".."""', '"""imgclsmob_data"""'], {}), "('..', 'imgclsmob_data')\n", (3099, 3123), False, 'import os\n'), ((3597, 3627), 'numpy.ones', 'np.ones', (['(src_pts.shape[0], 1)'], {}), '((src_pts.shape[0], 1))\n', (3604, 3627), True, 'import numpy as np\n'), ((7383, 7395), 'numpy.mean', 'np.mean', (['n1s'], {}), '(n1s)\n', (7390, 7395), True, 'import numpy as np\n'), ((7473, 7485), 'numpy.mean', 'np.mean', (['n2s'], {}), '(n2s)\n', (7480, 7485), True, 'import numpy as np\n'), ((7540, 7564), 'numpy.mean', 'np.mean', (['repeatabilities'], {}), '(repeatabilities)\n', (7547, 7564), True, 'import numpy as np\n'), ((3676, 3700), 'numpy.transpose', 'np.transpose', (['homography'], {}), '(homography)\n', (3688, 3700), True, 'import numpy as np\n'), ((7615, 7626), 'time.time', 'time.time', ([], {}), '()\n', (7624, 7626), False, 'import time\n')] |
# Pionniers du TJ, benissiez-moi par votre Esprits Saints!
from constraints import generate_pairs
from CPKMeans import CPKMeans
import numpy as np
import csv
def test_dataset(points, labels, num_clust):
must_link, cannot_link = generate_pairs(labels, num_clust, percentage = 0.01)
cpkmeans = CPKMeans(points, num_clust, must_link, cannot_link)
cpkmeans(ground_truth = labels, num_iteration = 20)
def load_csv(filename):
csvfile = open(filename, newline = "")
reader = csv.reader(csvfile)
points = []
labels = []
label_dict = {}
label_cnt = 0
for row in reader:
this_point = row[ : -1]
this_point = np.array(this_point).astype("float32")
this_label = row[-1]
if this_label not in label_dict:
label_dict[this_label] = label_cnt
label_cnt += 1
this_label = label_dict[this_label]
points.append(this_point)
labels.append(this_label)
csvfile.close()
points = np.array(points)
return points, labels, label_cnt
def main(filename):
points, labels, num_clust = load_csv(filename)
test_dataset(points, labels, num_clust)
main("iris.csv") | [
"numpy.array",
"CPKMeans.CPKMeans",
"csv.reader",
"constraints.generate_pairs"
] | [((229, 279), 'constraints.generate_pairs', 'generate_pairs', (['labels', 'num_clust'], {'percentage': '(0.01)'}), '(labels, num_clust, percentage=0.01)\n', (243, 279), False, 'from constraints import generate_pairs\n'), ((294, 345), 'CPKMeans.CPKMeans', 'CPKMeans', (['points', 'num_clust', 'must_link', 'cannot_link'], {}), '(points, num_clust, must_link, cannot_link)\n', (302, 345), False, 'from CPKMeans import CPKMeans\n'), ((474, 493), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (484, 493), False, 'import csv\n'), ((887, 903), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (895, 903), True, 'import numpy as np\n'), ((613, 633), 'numpy.array', 'np.array', (['this_point'], {}), '(this_point)\n', (621, 633), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# for animation
from matplotlib.animation import FuncAnimation
import matplotlib.pyplot as plt
import numpy as np
import distutils.util
# functions defined for model required by fastai
from fastai.vision.all import *
import sys
# Needed to import pycaster from relative path
sys.path.append("../PycastWorld")
from pycaster import PycastWorld, Turn, Walk
sys.path.append("../Models")
sys.path.append("../MazeGen")
from MazeUtils import read_maze_file, percent_through_maze, bfs_dist_maze, is_on_path
sys.path.append("../Notebooks")
from RNN_classes_funcs_Marchese import *
from cmd_classes_funcs_Marchese import *
def parent_to_deg(f):
parent = parent_label(f)
if parent == "left":
return 90.0
elif parent == "right":
return -90.0
else:
return 0.0
def sin_cos_loss(preds, targs):
rad_targs = targs / 180 * np.pi
x_targs = torch.cos(rad_targs)
y_targs = torch.sin(rad_targs)
x_preds = preds[:, 0]
y_preds = preds[:, 1]
return ((x_preds - x_targs) ** 2 + (y_preds - y_targs) ** 2).mean()
def within_angle(preds, targs, angle):
rad_targs = targs / 180 * np.pi
angle_pred = torch.atan2(preds[:, 1], preds[:, 0])
abs_diff = torch.abs(rad_targs - angle_pred)
angle_diff = torch.where(
abs_diff > np.pi, np.pi * 2.0 - abs_diff, abs_diff)
return torch.where(angle_diff < angle, 1.0, 0.0).mean()
def within_45_deg(preds, targs):
return within_angle(preds, targs, np.pi / 4)
def within_30_deg(preds, targs):
return within_angle(preds, targs, np.pi / 6)
def within_15_deg(preds, targs):
return within_angle(preds, targs, np.pi / 12)
def name_to_deg(f):
label = f.name[6:-4]
if label == "left":
return 90.0
elif label == "right":
return -90.0
else:
return 0.0
def get_label(o):
return o.name[6:-4]
def get_pair_2(o):
curr_im_num = Path(o).name[:5]
if not int(curr_im_num):
prev_im_num = curr_im_num
else:
prev_im_num = int(curr_im_num) - 1
prev_im = None
for item in Path(o).parent.ls():
if int(item.name[:5]) == prev_im_num:
prev_im = item
if prev_im is None:
prev_im = Path(o)
assert prev_im != None
img1 = Image.open(o).convert("RGB")
img2 = Image.open(prev_im).convert("RGB")
img1_arr = np.array(img1, dtype=np.uint8)
img2_arr = np.array(img2, dtype=np.uint8)
new_shape = list(img1_arr.shape)
new_shape[-1] = new_shape[-1] * 2
img3_arr = np.zeros(new_shape, dtype=np.uint8)
img3_arr[:, :, :3] = img1_arr
img3_arr[:, :, 3:] = img2_arr
return img3_arr.T.astype(np.float32)
# helper functions
def stacked_input(prev_im, curr_im):
if prev_im is None:
prev_im = curr_im
new_shape = list(curr_im.shape)
new_shape[-1] = new_shape[-1] * 2
stacked_im = np.zeros(new_shape, dtype=np.uint8)
stacked_im[:, :, :3] = curr_im
stacked_im[:, :, 3:] = prev_im
return stacked_im.T.astype(np.float32)
def reg_predict(pred_coords):
# print(f"type: {type(pred_coords[1])} ")
# print(f"pred_coord[1]: {pred_coords} ")
pred_angle = np.arctan2(pred_coords[1], pred_coords[0]) / np.pi * 180
pred_angle = pred_angle % (360)
if pred_angle > 53 and pred_angle <= 180:
return "left"
elif pred_angle > 180 and pred_angle < 307:
return "right"
else:
return "straight"
def filename_to_class(filename: str) -> str:
angle = float(filename.split("_")[1].split(".")[0].replace("p", "."))
if angle > 0:
return "left"
elif angle < 0:
return "right"
else:
return "forward"
# Animation function. TODO: make it output an embedded HTML figure
def animate(image_frames, name, dir_name):
print("Animating...")
"""
Generate a GIF animation of the saved frames
Keyword arguments:
image_frames -- array of frames
name -- name of model
dir_name -- name of directory
"""
now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
else:
os.system(dir_name)
# save_path = os.path.abspath(dir_name)
# name = str(name).split("/")[-1][:-4]
# fig, ax = plt.subplots()
# ln = plt.imshow(image_frames[0])
# def init():
# ln.set_data(image_frames[0])
# return [ln]
# def update(frame):
# ln.set_array(frame)
# return [ln]
# ani = FuncAnimation(fig, update, image_frames, init_func=init, blit=True)
# ani.save(os.path.join(save_path, name + "_" + str(now) + ".mp4"), fps=60)
def get_fig_filename(prefix: str, label: str, ext: str, rep: int) -> str:
fig_filename = f"{prefix}-{label}-{rep}.{ext}"
print(label, "filename :", fig_filename)
return fig_filename
def filename_to_class(filename: str) -> str:
angle = float(filename.split("_")[1].split(".")[0].replace("p", "."))
if angle > 0:
return "left"
elif angle < 0:
return "right"
else:
return "forward"
def prepare_dataloaders(dataset_name: str, prefix: str) -> DataLoaders:
path = DATASET_DIR / dataset_name
files = get_image_files(path)
dls = ImageDataLoaders.from_name_func(
path, files, filename_to_class, valid_pct=VALID_PCT
)
dls.show_batch() # type: ignore
plt.savefig(get_fig_filename(prefix, "batch", "pdf", 0))
return dls # type: ignore
def train_model(
dls: DataLoaders,
model_arch: str,
pretrained: bool,
logname: Path,
modelname: Path,
prefix: str,
rep: int,
):
learn = cnn_learner(
dls,
compared_models[model_arch],
metrics=accuracy,
pretrained=pretrained,
cbs=CSVLogger(fname=logname),
)
if pretrained:
learn.fine_tune(NUM_EPOCHS)
else:
learn.fit_one_cycle(NUM_EPOCHS)
# The follwing line is necessary for pickling
learn.remove_cb(CSVLogger)
learn.export(modelname)
learn.show_results()
plt.savefig(get_fig_filename(prefix, "results", "pdf", rep))
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_top_losses(9, figsize=(15, 10))
plt.savefig(get_fig_filename(prefix, "toplosses", "pdf", rep))
interp.plot_confusion_matrix(figsize=(10, 10))
plt.savefig(get_fig_filename(prefix, "confusion", "pdf", rep))
def main(argv):
# torch.cuda.set_device(1)
if torch.cuda.is_available():
device = torch.device('cuda')
torch.cuda.set_device(1)
else:
device = torch.device('cpu')
maze = argv[0] if len(argv) > 0 else "../Mazes/maze01.txt"
model = argv[1] if len(argv) > 1 else "../Models/auto-gen-c.pkl"
show_freq = int(argv[2]) if len(
argv) > 2 else 0 # frequency to show frames
directory_name = argv[5] if len(argv) > 5 else "tmp_diagnostics"
# cmd_in = bool(distutils.util.strtobool(argv[6]) if len(argv) > 6 else False
print("DIR NAME: " + directory_name)
model_type = (
argv[3] if len(argv) > 3 else "c"
) # 'c' for classification, 'r' for regresssion
stacked = (
bool(distutils.util.strtobool(argv[4])) if len(argv) > 4 else False
) # True for stacked input
world = PycastWorld(224, 224, maze)
if model_type == "cmd":
model_inf = ConvRNN()
model_inf.load_state_dict(torch.load(model))
else:
path = Path("../")
print("Model: " + model)
model_inf = load_learner(model, cpu=False)
# model_inf = torch.load(model, "cuda:3")
# if "classification-resnet50-pretrained-0.pkl" in
model_inf.eval()
prev_move = None
prev_image_data = None
frame = 0
frame_freq = 1
num_static = 0
prev_x, prev_y = world.x(), world.y()
animation_frames = []
outcome = "At goal? "
stuck = False
# Initialize maximum number of steps in case the robot travels in a
# completely incorrect direction
max_steps = 3500
# Initialize Maze Check
maze_rvs, _, _, maze_directions, _ = read_maze_file(maze)
start_x, start_y, _ = maze_directions[0]
end_x, end_y, _ = maze_directions[-1]
_, maze_path = bfs_dist_maze(maze_rvs, start_x, start_y, end_x, end_y)
on_path = is_on_path(maze_path, int(world.x()), int(world.y()))
print("Predicting...")
while not world.at_goal() and num_static < 5 and on_path:
# Get image
image_data = np.array(world)
# Convert image_data and give to network
if model_type == "c":
if stacked:
move = model_inf.predict(stacked_input(prev_image_data, image_data))[0]
else:
with model_inf.no_bar():
move = model_inf.predict(image_data)[0]
elif model_type == "r":
if stacked:
pred_coords, _, _ = model_inf.predict(stacked_input(prev_image_data, image_data))
else:
# image_data = torch.from_numpy(image_data)
pred_coords, _, _ = model_inf.predict(image_data)
move = reg_predict(pred_coords)
elif model_type == "cmd":
model_inf.eval()
# Predict
tmp_move_indx = 2 if prev_move == "straight" else 1 if prev_move == "right" else 0
img = (tensor(image_data)/255).permute(2, 0, 1).unsqueeze(0)
cmd = tensor([tmp_move_indx])
output = model_inf((img, cmd))
# Assuming we always get batches
if output.size()[0] > 0:
for i in range(output.size()[0]):
# Getting the predicted most probable move
action_index = torch.argmax(output[i])
move = 'left' if action_index == 0 else 'right' if action_index == 1 else 'straight'
else:
# is there any reason for us to believe batch sizes can be empty?
move = 'straight'
elif model_type == "rnn":
model_inf.eval()
# img = (tensor(image_data)/255).permute(2, 0, 1).unsqueeze(0).unsqueeze(0)
move = model_inf.predict(image_data)[0]
# output = model_inf(img)
# Assuming we always get batches
# for i in range(output.size()[0]):
# # Getting the predicted most probable move
# action_index = torch.argmax(output[i])
# move = 'left' if action_index == 0 else 'right' if action_index == 1 else 'straight'
if move == "left" and prev_move == "right":
move = "straight"
elif move == "right" and prev_move == "left":
move = "straight"
# Move in world
if move == "straight" or move == "forward":
world.walk(Walk.Forward)
world.turn(Turn.Stop)
elif move == "left":
world.walk(Walk.Stop)
world.turn(Turn.Left)
else:
world.walk(Walk.Stop)
world.turn(Turn.Right)
prev_move = move
world.update()
curr_x, curr_y = round(world.x(), 5), round(world.y(), 5)
if show_freq != 0 and frame % show_freq == 0:
if int(curr_x) == int(prev_x) and int(curr_y) == int(prev_y):
num_static += 1
else:
maze_path.remove((int(prev_x), int(prev_y)))
num_static = 0
prev_x = curr_x
prev_y = curr_y
# if frame % frame_freq == 0:
# animation_frames.append(image_data.copy())
on_path = is_on_path(maze_path, int(curr_x), int(curr_y))
frame += 1
prev_image_data = image_data
if frame == max_steps:
print("Exceeds step limit")
break
# this chunk gets the completion percentage
lost = False
if num_static >= 5:
stuck = True
if frame >= max_steps:
lost = True
outcome = (
"At Goal? "
+ str(world.at_goal())
+ "\n Stuck? "
+ str(stuck)
+ "\n Exceed step limit? "
+ str(lost)
+ "\n On path? "
+ str(on_path)
)
print(outcome)
completion_per = percent_through_maze(
maze_rvs, int(world.x()), int(
world.y()), start_x, start_y, end_x, end_y
)
animate(animation_frames, model, directory_name)
if num_static >= 5 and not world.at_goal(): # model failed to navigate maze
return frame, False, completion_per
else: # model successfully navigated maze
return frame, True, completion_per
if __name__ == "__main__":
main(sys.argv[1:])
| [
"sys.path.append",
"numpy.arctan2",
"numpy.zeros",
"numpy.array",
"MazeUtils.bfs_dist_maze",
"MazeUtils.read_maze_file",
"pycaster.PycastWorld"
] | [((298, 331), 'sys.path.append', 'sys.path.append', (['"""../PycastWorld"""'], {}), "('../PycastWorld')\n", (313, 331), False, 'import sys\n'), ((378, 406), 'sys.path.append', 'sys.path.append', (['"""../Models"""'], {}), "('../Models')\n", (393, 406), False, 'import sys\n'), ((407, 436), 'sys.path.append', 'sys.path.append', (['"""../MazeGen"""'], {}), "('../MazeGen')\n", (422, 436), False, 'import sys\n'), ((523, 554), 'sys.path.append', 'sys.path.append', (['"""../Notebooks"""'], {}), "('../Notebooks')\n", (538, 554), False, 'import sys\n'), ((2354, 2384), 'numpy.array', 'np.array', (['img1'], {'dtype': 'np.uint8'}), '(img1, dtype=np.uint8)\n', (2362, 2384), True, 'import numpy as np\n'), ((2400, 2430), 'numpy.array', 'np.array', (['img2'], {'dtype': 'np.uint8'}), '(img2, dtype=np.uint8)\n', (2408, 2430), True, 'import numpy as np\n'), ((2522, 2557), 'numpy.zeros', 'np.zeros', (['new_shape'], {'dtype': 'np.uint8'}), '(new_shape, dtype=np.uint8)\n', (2530, 2557), True, 'import numpy as np\n'), ((2869, 2904), 'numpy.zeros', 'np.zeros', (['new_shape'], {'dtype': 'np.uint8'}), '(new_shape, dtype=np.uint8)\n', (2877, 2904), True, 'import numpy as np\n'), ((7259, 7286), 'pycaster.PycastWorld', 'PycastWorld', (['(224)', '(224)', 'maze'], {}), '(224, 224, maze)\n', (7270, 7286), False, 'from pycaster import PycastWorld, Turn, Walk\n'), ((8070, 8090), 'MazeUtils.read_maze_file', 'read_maze_file', (['maze'], {}), '(maze)\n', (8084, 8090), False, 'from MazeUtils import read_maze_file, percent_through_maze, bfs_dist_maze, is_on_path\n'), ((8197, 8252), 'MazeUtils.bfs_dist_maze', 'bfs_dist_maze', (['maze_rvs', 'start_x', 'start_y', 'end_x', 'end_y'], {}), '(maze_rvs, start_x, start_y, end_x, end_y)\n', (8210, 8252), False, 'from MazeUtils import read_maze_file, percent_through_maze, bfs_dist_maze, is_on_path\n'), ((8453, 8468), 'numpy.array', 'np.array', (['world'], {}), '(world)\n', (8461, 8468), True, 'import numpy as np\n'), ((3169, 3211), 'numpy.arctan2', 'np.arctan2', (['pred_coords[1]', 'pred_coords[0]'], {}), '(pred_coords[1], pred_coords[0])\n', (3179, 3211), True, 'import numpy as np\n')] |
# coding: utf-8
""" Neural network with 1 hidden layer for MNIST handwritten digits recognition
===========
PRESENTATION
This Python code is an example of a simple artificial neural network
written from scratch using only :
- the numpy package (for array manipulation)
- the mnist module (to import the database)
Make sure these two modules (from the Pypi library) are installed.
The MNIST database of handwritten digits is used to train the network
===========
STRUCTURE OF THE NEURAL NETWORK
This is a forward propagating network with 3 layers :
- the input layer has 784 nodes (28 x 28 input images are flattened)
- the hidden layer has 16 nodes
- the output layer has 10 nodes
Activation functions used:
- Sigmoid for the hidden layer
- Softmax for the output layer
The loss is calculated with cross entropy
===========
USAGE GUIDE
1. Run the train() function to train the neural network
For each iteration a batch of 32 images is processed
At the end of each iteration the weights and biases are updated
2. Run the accuracy() function to see the network's performance
3. Feel free to change the network's parameters and try to improve accuracy
===========
NOTATIONS
x : input layer
h : hidden layer (before activation)
ha : hidden layer (after activation)
y : output layer (before activation)
ya : output layer (after activation)
w1, w2 : weight matrices 1 and 2
b1, b2 : bias vectors 1 and 2
(u represents one of the parameters above)
len_u : length of vector u
shape_u : shape of matrix u
d_u : derivative of the loss function with respect to u (for a single result)
sum_d_u : derivative of the loss function with respect to u for multiple results in a batch
batch_size : number of training samples per batch
n_iterations : total number of iterations for the training process
learn_r : learn rate
n_tests : number of image used to calculate the accuracy
t : target digit (an integer between 0 and 9)
===========
"""
import numpy as np
import mnist
# =====================
# Collecting the MNIST dataset of handwritten digits
# =====================
train_images = mnist.train_images()
train_labels = mnist.train_labels()
test_images = mnist.test_images()
test_labels = mnist.test_labels()
# =====================
# Neural network
# =====================
# Length of each layers (input x, hidden h, output y)
len_x = 28 * 28
len_h = 16
len_y = 10
# Shapes of weight matrices
shape_w1 = (len_x, len_h)
shape_w2 = (len_h, len_y)
# Initialization of weight matrices (w1 and w2) with random numbers
w1 = np.random.uniform(-1, 1, shape_w1) / np.sqrt(len_x)
w2 = np.random.uniform(-1, 1, shape_w2) / np.sqrt(len_h)
# Initialization of bias vectors (b1 and b2) with zeros
b1 = np.full(len_h, 0.)
b2 = np.full(len_y, 0.)
# Training parameters
n_iterations = 200
batch_size = 32
learn_r = 0.05
# Number of test images used to calculate the accuracy
n_tests = 500
def forward_propagation(image):
# Returns the vectors of each layers for a given image
# Input layer
x = image.flatten() / 255
# Hidden layer (activation with sigmoid function)
h = np.dot(x, w1) + b1
ha = 1 / (1 + np.exp(-h))
# Output layer (activation with softmax function)
y = np.dot(ha, w2) + b2
exp_y = np.exp(y)
ya = exp_y / exp_y.sum()
return x, h, ha, y, ya
def loss_function(ya, t):
# Cross-entropy loss for a given output ya and target number t
# This function is not used by the train() function
# The derivatives of the loss are directly calculated in the backpropagation function
return -np.log(ya[t])
def backpropagation(x, h, ha, ya, t):
# Derivatives d_u of the loss with respect to each parameter u
d_b2 = ya
d_b2[t] -= 1
d_w2 = np.outer(ha, d_b2)
d_b1 = np.dot(w2, d_b2) * ha * (1 - ha)
d_w1 = np.outer(x, d_b1)
return d_w1, d_w2, d_b1, d_b2
def train():
# This function updates the weights and biases to try to minimize the loss
for k in range(n_iterations):
# Initialization of the derivatives for the batch
sum_d_w1 = np.zeros(shape_w1)
sum_d_w2 = np.zeros(shape_w2)
sum_d_b1 = np.zeros(len_h)
sum_d_b2 = np.zeros(len_y)
for i in range(batch_size):
# index of the training image and label
index = k * batch_size + i
image = train_images[index]
t = train_labels[index]
x, h, ha, y, ya = forward_propagation(image)
d_w1, d_w2, d_b1, d_b2 = backpropagation(x, h, ha, ya, t)
sum_d_w1 += d_w1
sum_d_w2 += d_w2
sum_d_b1 += d_b1
sum_d_b2 += d_b2
# Updating weights and biases
w1[:] -= learn_r * sum_d_w1
w2[:] -= learn_r * sum_d_w2
b1[:] -= learn_r * sum_d_b1
b2[:] -= learn_r * sum_d_b2
# The [:] notation is used to modify w1, w2, b1 and b2
# Without this notation they are considered as undefined local variables
def test():
# Takes one random image from the test dataset and checks if the
# label and result given by the artificial network are the same
random_number = np.random.randint(0, len(test_labels))
image = test_images[random_number]
label = test_labels[random_number]
x, h, ha, y, ya = forward_propagation(image)
result = ya.argmax()
return result == label
def accuracy():
# Returns the proportion of correctly guessed digits by the network
acc = 0
for i in range(n_tests):
if test():
acc += 1
return acc / n_tests
| [
"mnist.train_images",
"numpy.full",
"mnist.train_labels",
"numpy.random.uniform",
"numpy.outer",
"numpy.log",
"numpy.zeros",
"mnist.test_labels",
"numpy.exp",
"mnist.test_images",
"numpy.dot",
"numpy.sqrt"
] | [((2136, 2156), 'mnist.train_images', 'mnist.train_images', ([], {}), '()\n', (2154, 2156), False, 'import mnist\n'), ((2172, 2192), 'mnist.train_labels', 'mnist.train_labels', ([], {}), '()\n', (2190, 2192), False, 'import mnist\n'), ((2208, 2227), 'mnist.test_images', 'mnist.test_images', ([], {}), '()\n', (2225, 2227), False, 'import mnist\n'), ((2242, 2261), 'mnist.test_labels', 'mnist.test_labels', ([], {}), '()\n', (2259, 2261), False, 'import mnist\n'), ((2748, 2767), 'numpy.full', 'np.full', (['len_h', '(0.0)'], {}), '(len_h, 0.0)\n', (2755, 2767), True, 'import numpy as np\n'), ((2772, 2791), 'numpy.full', 'np.full', (['len_y', '(0.0)'], {}), '(len_y, 0.0)\n', (2779, 2791), True, 'import numpy as np\n'), ((2577, 2611), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'shape_w1'], {}), '(-1, 1, shape_w1)\n', (2594, 2611), True, 'import numpy as np\n'), ((2614, 2628), 'numpy.sqrt', 'np.sqrt', (['len_x'], {}), '(len_x)\n', (2621, 2628), True, 'import numpy as np\n'), ((2634, 2668), 'numpy.random.uniform', 'np.random.uniform', (['(-1)', '(1)', 'shape_w2'], {}), '(-1, 1, shape_w2)\n', (2651, 2668), True, 'import numpy as np\n'), ((2671, 2685), 'numpy.sqrt', 'np.sqrt', (['len_h'], {}), '(len_h)\n', (2678, 2685), True, 'import numpy as np\n'), ((3295, 3304), 'numpy.exp', 'np.exp', (['y'], {}), '(y)\n', (3301, 3304), True, 'import numpy as np\n'), ((3782, 3800), 'numpy.outer', 'np.outer', (['ha', 'd_b2'], {}), '(ha, d_b2)\n', (3790, 3800), True, 'import numpy as np\n'), ((3856, 3873), 'numpy.outer', 'np.outer', (['x', 'd_b1'], {}), '(x, d_b1)\n', (3864, 3873), True, 'import numpy as np\n'), ((3147, 3160), 'numpy.dot', 'np.dot', (['x', 'w1'], {}), '(x, w1)\n', (3153, 3160), True, 'import numpy as np\n'), ((3263, 3277), 'numpy.dot', 'np.dot', (['ha', 'w2'], {}), '(ha, w2)\n', (3269, 3277), True, 'import numpy as np\n'), ((3619, 3632), 'numpy.log', 'np.log', (['ya[t]'], {}), '(ya[t])\n', (3625, 3632), True, 'import numpy as np\n'), ((4123, 4141), 'numpy.zeros', 'np.zeros', (['shape_w1'], {}), '(shape_w1)\n', (4131, 4141), True, 'import numpy as np\n'), ((4161, 4179), 'numpy.zeros', 'np.zeros', (['shape_w2'], {}), '(shape_w2)\n', (4169, 4179), True, 'import numpy as np\n'), ((4199, 4214), 'numpy.zeros', 'np.zeros', (['len_h'], {}), '(len_h)\n', (4207, 4214), True, 'import numpy as np\n'), ((4234, 4249), 'numpy.zeros', 'np.zeros', (['len_y'], {}), '(len_y)\n', (4242, 4249), True, 'import numpy as np\n'), ((3184, 3194), 'numpy.exp', 'np.exp', (['(-h)'], {}), '(-h)\n', (3190, 3194), True, 'import numpy as np\n'), ((3812, 3828), 'numpy.dot', 'np.dot', (['w2', 'd_b2'], {}), '(w2, d_b2)\n', (3818, 3828), True, 'import numpy as np\n')] |
#from https://github.com/sklam/numba-example-wavephysics
#setup: N=4000
#run: wave(N)
import numpy as np
from math import ceil
def physics(masspoints, dt, plunk, which):
ppos = masspoints[1]
cpos = masspoints[0]
N = cpos.shape[0]
# apply hooke's law
HOOKE_K = 2100000.
DAMPING = 0.0001
MASS = .01
force = np.zeros((N, 2))
for i in range(1, N):
dx, dy = cpos[i] - cpos[i - 1]
dist = np.sqrt(dx**2 + dy**2)
assert dist != 0
fmag = -HOOKE_K * dist
cosine = dx / dist
sine = dy / dist
fvec = np.array([fmag * cosine, fmag * sine])
force[i - 1] -= fvec
force[i] += fvec
force[0] = force[-1] = 0, 0
force[which][1] += plunk
accel = force / MASS
# verlet integration
npos = (2 - DAMPING) * cpos - (1 - DAMPING) * ppos + accel * (dt**2)
masspoints[1] = cpos
masspoints[0] = npos
#pythran export wave(int)
def wave(PARTICLE_COUNT):
SUBDIVISION = 300
FRAMERATE = 60
count = PARTICLE_COUNT
width, height = 1200, 400
masspoints = np.empty((2, count, 2), np.float64)
initpos = np.zeros(count, np.float64)
for i in range(1, count):
initpos[i] = initpos[i - 1] + float(width) / count
masspoints[:, :, 0] = initpos
masspoints[:, :, 1] = height / 2
f = 15
plunk_pos = count // 2
physics( masspoints, 1./ (SUBDIVISION * FRAMERATE), f, plunk_pos)
return masspoints
| [
"numpy.array",
"numpy.empty",
"numpy.zeros",
"numpy.sqrt"
] | [((323, 339), 'numpy.zeros', 'np.zeros', (['(N, 2)'], {}), '((N, 2))\n', (331, 339), True, 'import numpy as np\n'), ((1016, 1051), 'numpy.empty', 'np.empty', (['(2, count, 2)', 'np.float64'], {}), '((2, count, 2), np.float64)\n', (1024, 1051), True, 'import numpy as np\n'), ((1066, 1093), 'numpy.zeros', 'np.zeros', (['count', 'np.float64'], {}), '(count, np.float64)\n', (1074, 1093), True, 'import numpy as np\n'), ((410, 436), 'numpy.sqrt', 'np.sqrt', (['(dx ** 2 + dy ** 2)'], {}), '(dx ** 2 + dy ** 2)\n', (417, 436), True, 'import numpy as np\n'), ((536, 574), 'numpy.array', 'np.array', (['[fmag * cosine, fmag * sine]'], {}), '([fmag * cosine, fmag * sine])\n', (544, 574), True, 'import numpy as np\n')] |
from torch_geometric.data import Data
import numpy as np
import torch
from tqdm import tqdm
from torch_geometric.data import InMemoryDataset
class EmotionDataset(InMemoryDataset):
def __init__(self, config, stage, root, sub_idx, pos=None, X=None, Y=None, edge_index=None,
transform=None, pre_transform=None):
self.stage = stage # Train or test
# train setting
self.subjects = config['subjects']
self.feature = config["feature"]
self.dataset = config['dataset']
self.sub_idx = sub_idx
# train data
self.X = X
self.Y = Y
self.edge_index = edge_index
self.pos = pos # position of EEG nodes
super().__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@ property
def raw_file_names(self):
return []
@ property
def processed_file_names(self):
return ['./V_{:s}_{:s}_{:s}_CV{:.0f}_{:.0f}.dataset'.format(
self.dataset, self.feature, self.stage, self.subjects, self.sub_idx)]
def download(self):
pass
def process(self):
data_list = []
# process by samples
num_samples = np.shape(self.Y)[0]
for sample_id in tqdm(range(num_samples)):
x = self.X[sample_id, :, :]
x = torch.FloatTensor(x)
y = torch.FloatTensor(self.Y[sample_id, :])
data = Data(x=x, y=y, pos=self.pos)
data_list.append(data)
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
| [
"torch.load",
"torch.FloatTensor",
"torch.save",
"numpy.shape",
"torch_geometric.data.Data"
] | [((792, 827), 'torch.load', 'torch.load', (['self.processed_paths[0]'], {}), '(self.processed_paths[0])\n', (802, 827), False, 'import torch\n'), ((1813, 1864), 'torch.save', 'torch.save', (['(data, slices)', 'self.processed_paths[0]'], {}), '((data, slices), self.processed_paths[0])\n', (1823, 1864), False, 'import torch\n'), ((1235, 1251), 'numpy.shape', 'np.shape', (['self.Y'], {}), '(self.Y)\n', (1243, 1251), True, 'import numpy as np\n'), ((1362, 1382), 'torch.FloatTensor', 'torch.FloatTensor', (['x'], {}), '(x)\n', (1379, 1382), False, 'import torch\n'), ((1399, 1438), 'torch.FloatTensor', 'torch.FloatTensor', (['self.Y[sample_id, :]'], {}), '(self.Y[sample_id, :])\n', (1416, 1438), False, 'import torch\n'), ((1458, 1486), 'torch_geometric.data.Data', 'Data', ([], {'x': 'x', 'y': 'y', 'pos': 'self.pos'}), '(x=x, y=y, pos=self.pos)\n', (1462, 1486), False, 'from torch_geometric.data import Data\n')] |
# Hodographic shaping in SI units
# <NAME>, 2019
# Based on
# Paper: [Gondelach 2015]
# Thesis: [Gondelach 2012]
import time
import numpy as np
import matplotlib as mlt
import matplotlib.pyplot as plt
import pykep as pk
import scipy as sci
from conversions import *
from utils import *
from shapingFunctions import shapeFunctions
from shapingFunctions import shapeFunctionsFree
from integration import integrate
from patchedTrajectoryUtils import ephemeris
# main computation
class hodographicShaping(object):
"""
Class implementing the hodographic shaping method
Creating an object creates a trajectory, whose parameters can be computed
by the methods
Call methods in the order they are defined, transverseShaping needs the
result from radialShaping
Call status last to show results of computation
Example: transfer = hodographicShaping()
transfer.shapingRadial()
transfer.shapingVertical()
transfer.shapingTransverse()
transfer.assembleThrust()
transfer.checkBoundaryConditions()
transfer.evaluate()
transfer.status()
"""
def __init__(self,
departureState,
arrivalState,
departureDate = 7400, # only used in status and plotting
tof = 500,
N = 0,
departureBody = 'earth', # only used in status and plotting
arrivalBody = 'mars', # only used in status and plotting
rShape = 'CPowPow2_scaled',
thetaShape = 'CPowPow2_scaled',
zShape = 'CosR5P3CosR5P3SinR5_scaled',
rShapeFree = 'PSin05PCos05_scaled',
thetaShapeFree = 'PSin05PCos05_scaled',
zShapeFree = 'P4CosR5P4SinR5_scaled',
rFreeC = [0, 0],
thetaFreeC = [0, 0],
zFreeC = [0, 0]
):
'''
Initializes the trajectory computation object
'''
# store time at start of computation
self.timeStart = time.process_time()
# number of orbital revolutions
self.N = N
# catch instances where N is not an integer
if (N%1 != 0):
print('ERROR: N has to be an integer. N =', N)
# time of flight in days
self.tof = tof
# pick departure date and ToF/arrival date
# dates in mjd2000
self.jdDep = departureDate
self.jdArr = self.jdDep + self.tof
# chose shaping functions
self.rShape = rShape
self.thetaShape = thetaShape
self.zShape = zShape
self.rShapeFree = rShapeFree
self.thetaShapeFree = thetaShapeFree
self.zShapeFree = zShapeFree
# free coefficients, set externally (by optimizer)
self.rFreeC = rFreeC
self.thetaFreeC = thetaFreeC
self.zFreeC = zFreeC
# get state vectors (for trajectory computation)
self.xDep = departureState
self.xArr = arrivalState
self.rDepCyl = self.xDep[0:3]
self.rArrCyl = self.xArr[0:3]
self.vDepCyl = self.xDep[3:6]
self.vArrCyl = self.xArr[3:6]
# planets (for plotting and convenience)
self.departureBody = departureBody
self.arrivalBody = arrivalBody
# polar angle at departure, transfer angle and arrival
self.psiTransfer = self.rArrCyl[1] - self.rDepCyl[1]
if self.psiTransfer < 0:
self.psiTransfer = self.psiTransfer + 2*np.pi
self.thetaArr = self.psiTransfer + N*2*np.pi
# time of flights in seconds
self.tofSec = self.tof * 24 * 60 * 60
def status(self, printBC=False, precision=2):
'''
Retrieve and print the status of the computation (settings, results,...)
The boundary conditions can be toggled on and off
Call in the end of the computation when all results are available
'''
np.set_printoptions(precision=precision)
print('###############################################################')
print(f'Hodographic Shaping Problem: {self.departureBody} to {self.arrivalBody}')
print('\nSettings')
print('Departure state: ', np.array(self.xDep))
print('Arrival state:\t ', np.array(self.xArr))
print('Departure date:\t', pk.epoch(self.jdDep, 'mjd2000'))
print('Departure date:\t', np.array(self.jdDep), 'mjd2000')
print('Arrival date:\t', pk.epoch(self.jdArr, 'mjd2000'))
print('Time of Flight:\t', np.array(self.tof), ' days')
print('Revolutions:\t', self.N)
print('Transfer angle: ', round(self.psiTransfer*180/np.pi, 2), ' deg')
print('Radial velocity:\t', self.rShape)
print('Traverse velocity:\t', self.thetaShape)
print('Axial velocity:\t\t', self.zShape)
print('\nFree part of shape (input)')
print('Radial velocity free:\t' + self.rShapeFree)
print('Traverse velocity free:\t' + self.thetaShapeFree)
print('Axial velocity free:\t' + self.zShapeFree)
print('Radial coefficients free:\t', np.array(self.rFreeC))
print('Transverse coefficients free:\t', np.array(self.thetaFreeC))
print('Vertical coefficients free:\t', np.array(self.zFreeC))
print('\nVelocity functions')
print('Radial coefficients:\t\t', self.cRadial)
print('Transverse coefficients:\t', self.cTheta)
print('Vertical coefficients:\t\t', self.cVertical)
print('Position offsets (r0, theta0, z0): ', np.array([self.rDepCyl[0],
self.rDepCyl[1], self.rDepCyl[2]]))
try:
if self.velCompare:
print('\nBoundary condition check:')
print('Velocity boundary conditions are satisfied!',
' Difference < ', self.velTolAbs, ' m/s')
else:
print('\nBoundary condition check:')
print('ERROR: Velocity boundary conditions are not satisfied!',
' Difference > ', self.velTolAbs, ' m/s')
if self.posCompare:
print('Position boundary conditions are satisfied!',
' Difference < ', self.posTolAbs, 'm and rad')
else:
print('ERROR: Position boundary conditions are not satisfied!',
' Difference > ', self.posTolAbs, ' m and rad')
except AttributeError:
print('\nFullfilment of boundary conditions was not',
'explicitly checked.')
if printBC:
print('\nBoundary conditions:')
print('Pos planet dep (r, theta, z):\t',
np.array( [self.rDepCyl[0], self.rDepCyl[1], self.rDepCyl[2]] ))
print('Pos shape dep (r, theta, z):\t',
np.array( [self.r(0), self.t(0), self.z(0)] ))
print('Pos planet arr (r, theta, z):\t',
np.array( [self.rArrCyl[0], self.rArrCyl[1], self.rArrCyl[2]] ))
print('Pos shape arr (r, theta, z):\t',
np.array( [self.r(self.tofSec),
self.t(self.tofSec), self.z(self.tofSec)] ))
print('Vel planet dep (rDot, vTheta, zDot):\t',
np.array( [self.vDepCyl[0], self.vDepCyl[1], self.vDepCyl[2]] ))
print('Vel shape dep (rDot, vTheta, zDot):\t',
np.array( [self.rDot(0), self.tDot(0), self.zDot(0)] ))
print('Vel planet arr (rDot, vTheta, zDot):\t',
np.array( [self.vArrCyl[0], self.vArrCyl[1], self.vArrCyl[2]] ))
print('Vel shape arr (rDot, vTheta, zDot):\t',
np.array( [self.rDot(self.tofSec),
self.tDot(self.tofSec), self.zDot(self.tofSec)] ))
# computation time
print('\nComputation time')
timeEnd = time.process_time()
print('Computing this trajectory took {:.3f} ms'\
.format((timeEnd - self.timeStart)*1000.0))
# print results
print('\nResults')
print('DeltaV:\t\t', round(self.deltaV/1e3, 5), ' km/s')
print('Max thrust:\t', round(self.maxThrust, 7), ' m/s^2')
print('###############################################################')
def shapingRadial(self):
'''
Compute coefficients for the radial shape
'''
rFunc = shapeFunctions(self.N, shorthand=self.rShape, tMax=self.tofSec)
rFuncFree = shapeFunctionsFree(self.N, self.rFreeC,
shorthand=self.rShapeFree, tMax=self.tofSec)
# compute parameters
A = np.array([[rFunc.v1(0), rFunc.v2(0), rFunc.v3(0)],
[rFunc.v1(self.tofSec), rFunc.v2(self.tofSec),
rFunc.v3(self.tofSec)],
[rFunc.Iv1(self.tofSec) - rFunc.Iv1(0),
rFunc.Iv2(self.tofSec) - rFunc.Iv2(0),
rFunc.Iv3(self.tofSec)-rFunc.Iv3(0)]])
b = np.array([self.vDepCyl[0] - rFuncFree.v(0),
self.vArrCyl[0] - rFuncFree.v(self.tofSec),
self.rArrCyl[0] - self.rDepCyl[0]
- (rFuncFree.Iv(self.tofSec) - rFuncFree.Iv(0))])
self.cRadial = np.linalg.solve(A, b)
# assemble shape
self.r = lambda t: (self.rDepCyl[0] + self.cRadial[0] * rFunc.Iv1(t)
+ self.cRadial[1] * rFunc.Iv2(t)
+ self.cRadial[2] * rFunc.Iv3(t)
+ rFuncFree.Iv(t))
self.rDot = lambda t: (self.cRadial[0] * rFunc.v1(t)
+ self.cRadial[1] * rFunc.v2(t)
+ self.cRadial[2] * rFunc.v3(t)
+ rFuncFree.v(t))
self.rDDot = lambda t: (self.cRadial[0] * rFunc.Dv1(t)
+ self.cRadial[1] * rFunc.Dv2(t)
+ self.cRadial[2] * rFunc.Dv3(t)
+ rFuncFree.Dv(t))
def shapingVertical(self):
'''
Compute coefficients for the vertical (z) shape
'''
zFunc = shapeFunctions(self.N, shorthand=self.zShape, tMax=self.tofSec)
zFuncFree = shapeFunctionsFree(self.N, self.zFreeC,
shorthand=self.zShapeFree, tMax=self.tofSec)
A = np.array([[zFunc.v1(0), zFunc.v2(0), zFunc.v3(0)],
[zFunc.v1(self.tofSec), zFunc.v2(self.tofSec),
zFunc.v3(self.tofSec)],
[zFunc.Iv1(self.tofSec)-zFunc.Iv1(0),
zFunc.Iv2(self.tofSec)-zFunc.Iv2(0),
zFunc.Iv3(self.tofSec)-zFunc.Iv3(0)]])
b = np.array([self.vDepCyl[2] - zFuncFree.v(0),
self.vArrCyl[2] - zFuncFree.v(self.tofSec),
self.rArrCyl[2] - self.rDepCyl[2]
- (zFuncFree.Iv(self.tofSec) - zFuncFree.Iv(0))])
self.cVertical = np.linalg.solve(A, b)
# assemble shape
self.z = lambda t: (self.rDepCyl[2] + self.cVertical[0] * zFunc.Iv1(t)
+ self.cVertical[1] * zFunc.Iv2(t)
+ self.cVertical[2] * zFunc.Iv3(t)
+ zFuncFree.Iv(t))
self.zDot = lambda t: (self.cVertical[0] * zFunc.v1(t)
+ self.cVertical[1] * zFunc.v2(t)
+ self.cVertical[2] * zFunc.v3(t)
+ zFuncFree.v(t))
self.zDDot = lambda t: (self.cVertical[0] * zFunc.Dv1(t)
+ self.cVertical[1] * zFunc.Dv2(t)
+ self.cVertical[2] * zFunc.Dv3(t)
+ zFuncFree.Dv(t))
def shapingTransverse(self):
'''
Compute coefficients for the transverse (theta) shape
'''
thetaFunc = shapeFunctions(self.N, shorthand=self.thetaShape,
tMax=self.tofSec)
thetaFuncFree = shapeFunctionsFree(self.N, self.thetaFreeC,
shorthand=self.thetaShapeFree, tMax=self.tofSec)
# intermediate values
[K1, K2] = np.dot(np.linalg.inv([[thetaFunc.v1(0), thetaFunc.v2(0)],
[thetaFunc.v1(self.tofSec), thetaFunc.v2(self.tofSec)]]),
[-thetaFunc.v3(0), - thetaFunc.v3(self.tofSec)])
[L1, L2] = np.dot(np.linalg.inv([[thetaFunc.v1(0), thetaFunc.v2(0)],
[thetaFunc.v1(self.tofSec), thetaFunc.v2(self.tofSec)]]),
[self.vDepCyl[1] - thetaFuncFree.v(0),
self.vArrCyl[1] - thetaFuncFree.v(self.tofSec)])
# cTheta3
integrand1 = lambda t: (L1*thetaFunc.v1(t) + L2*thetaFunc.v2(t)
+ thetaFuncFree.v(t))/self.r(t)
integrand2 = lambda t: (K1*thetaFunc.v1(t) + K2*thetaFunc.v2(t)
+ thetaFunc.v3(t))/self.r(t)
int1 = integrate(integrand1, 0, self.tofSec, method='trapz', nSteps=25)
int2 = integrate(integrand2, 0, self.tofSec, method='trapz', nSteps=25)
cTheta3 = (self.thetaArr - int1)/(int2)
# cTheta1 and cTheta2
cTheta12 = cTheta3 * np.array([K1, K2]) + np.array([L1, L2])
self.cTheta = np.array([cTheta12[0], cTheta12[1], cTheta3])
# assemble shape
self.tDot = lambda t: (self.cTheta[0] * thetaFunc.v1(t)
+ self.cTheta[1] * thetaFunc.v2(t)
+ self.cTheta[2] * thetaFunc.v3(t)
+ thetaFuncFree.v(t))
self.thetaDot = lambda t: self.tDot(t)/self.r(t)
self.tDDot = lambda t: (self.cTheta[0] * thetaFunc.Dv1(t)
+ self.cTheta[1] * thetaFunc.Dv2(t)
+ self.cTheta[2] * thetaFunc.Dv3(t)
+ thetaFuncFree.Dv(t))
def t(self, time):
'''
Convenience function to call the polar angle as a function of time
Computationally inefficient due to numerical integration
'''
# compute theta value by integration of thetaDot
thetaChange = integrate(self.thetaDot, 0, time, method='trapz',
nSteps=25)
thetaFinal = thetaChange + self.rDepCyl[1]
return thetaFinal
def assembleThrust(self):
'''
Compute the thrust profile from the equations of motion
See Equation 5.13-16 in [Gondelach, 2012]
'''
s = lambda t: np.sqrt(self.r(t)**2 + self.z(t)**2)
self.fr = lambda t: (self.rDDot(t) - self.tDot(t)**2/self.r(t)
+ pk.MU_SUN/(s(t)**3) * self.r(t))
self.ft = lambda t: self.tDDot(t) + self.tDot(t)*self.rDot(t)/self.r(t)
self.fz = lambda t: self.zDDot(t) + pk.MU_SUN/(s(t)**3) * self.z(t)
self.fTotal = lambda t: (np.sqrt(self.fr(t)**2
+ self.ft(t)**2
+ self.fz(t)**2))
def checkBoundaryConditions(self, velTolRel=0.001, velTolAbs=0.1,
posTolRel=0.001, posTolAbs=0.1):
'''
Check if the boundary conditions are satisfied
Compare initial and final velocities to a given tolerance
'''
self.velTolRel = velTolRel
self.velTolAbs = velTolAbs
self.velCompare = np.allclose(
[self.vDepCyl[0], self.vDepCyl[1], self.vDepCyl[2],
self.vArrCyl[0], self.vArrCyl[1], self.vArrCyl[2]],
[self.rDot(0), self.tDot(0), self.zDot(0),
self.rDot(self.tofSec), self.tDot(self.tofSec),
self.zDot(self.tofSec)],
self.velTolRel, self.velTolAbs)
# compare initial and final positions
self.posTolRel = posTolRel
self.posTolAbs = posTolAbs
# theta may do several revolutions -> subtract these
arrivalTheta = self.t(self.tofSec) - self.N * 2 * np.pi
posCompare1 = np.allclose(
[self.rDepCyl[0], self.rDepCyl[1], self.rDepCyl[2],
self.rArrCyl[0], self.rArrCyl[1], self.rArrCyl[2]],
[self.r(0), self.t(0), self.z(0),
self.r(self.tofSec), arrivalTheta, self.z(self.tofSec)],
self.posTolRel, self.posTolAbs)
arrivalTheta2 = self.t(self.tofSec) - (self.N+1) * 2 * np.pi
posCompare2 = np.allclose(
[self.rDepCyl[0], self.rDepCyl[1], self.rDepCyl[2],
self.rArrCyl[0], self.rArrCyl[1], self.rArrCyl[2]],
[self.r(0), self.t(0), self.z(0),
self.r(self.tofSec), arrivalTheta2, self.z(self.tofSec)],
self.posTolRel, self.posTolAbs)
self.posCompare = posCompare1 or posCompare2
def evaluate(self, evalThrust=False, nEvalPoints=100, printTime=False):
'''
Compute DeltaV and maximum thrust
By numerically integrating and sampling the thrust profile
Number of sampling points has a serious impact on performance
-> Activate thrust evaluation only when needed
'''
deltaVtemp = integrate(self.fTotal, 0, self.tofSec,
method='trapz', nSteps=25)
self.deltaV = deltaVtemp
if printTime==True:
time1 = time.time()
# perform grid search at equally spaced sample points
if evalThrust=='Grid':
self.maxThrust = np.max(self.fTotal(np.linspace(0, self.tofSec,
nEvalPoints)))
# call local optimizer from scipy (NOT RECOMMENED as not robust)
elif evalThrust=='Optimize':
maxThrustTime = sci.optimize.minimize_scalar(
lambda t: -self.fTotal(t),
bounds=[0,self.tofSec],
method='bounded')
self.maxThrust = self.fTotal(maxThrustTime.fun)
# don't look for maximum thrust value
else:
self.maxThrust = -1
# print the measured time spent in this method
if printTime==True:
time2 = time.time()
print(f'Finding maximum of thrust profile took '
f'{(time2-time1)*1e3:.3f} ms')
| [
"numpy.set_printoptions",
"shapingFunctions.shapeFunctions",
"time.process_time",
"time.time",
"numpy.array",
"numpy.linspace",
"shapingFunctions.shapeFunctionsFree",
"pykep.epoch",
"integration.integrate",
"numpy.linalg.solve"
] | [((2135, 2154), 'time.process_time', 'time.process_time', ([], {}), '()\n', (2152, 2154), False, 'import time\n'), ((4026, 4066), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': 'precision'}), '(precision=precision)\n', (4045, 4066), True, 'import numpy as np\n'), ((7905, 7924), 'time.process_time', 'time.process_time', ([], {}), '()\n', (7922, 7924), False, 'import time\n'), ((8424, 8487), 'shapingFunctions.shapeFunctions', 'shapeFunctions', (['self.N'], {'shorthand': 'self.rShape', 'tMax': 'self.tofSec'}), '(self.N, shorthand=self.rShape, tMax=self.tofSec)\n', (8438, 8487), False, 'from shapingFunctions import shapeFunctions\n'), ((8508, 8597), 'shapingFunctions.shapeFunctionsFree', 'shapeFunctionsFree', (['self.N', 'self.rFreeC'], {'shorthand': 'self.rShapeFree', 'tMax': 'self.tofSec'}), '(self.N, self.rFreeC, shorthand=self.rShapeFree, tMax=\n self.tofSec)\n', (8526, 8597), False, 'from shapingFunctions import shapeFunctionsFree\n'), ((9297, 9318), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (9312, 9318), True, 'import numpy as np\n'), ((10179, 10242), 'shapingFunctions.shapeFunctions', 'shapeFunctions', (['self.N'], {'shorthand': 'self.zShape', 'tMax': 'self.tofSec'}), '(self.N, shorthand=self.zShape, tMax=self.tofSec)\n', (10193, 10242), False, 'from shapingFunctions import shapeFunctions\n'), ((10263, 10352), 'shapingFunctions.shapeFunctionsFree', 'shapeFunctionsFree', (['self.N', 'self.zFreeC'], {'shorthand': 'self.zShapeFree', 'tMax': 'self.tofSec'}), '(self.N, self.zFreeC, shorthand=self.zShapeFree, tMax=\n self.tofSec)\n', (10281, 10352), False, 'from shapingFunctions import shapeFunctionsFree\n'), ((11021, 11042), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (11036, 11042), True, 'import numpy as np\n'), ((11921, 11988), 'shapingFunctions.shapeFunctions', 'shapeFunctions', (['self.N'], {'shorthand': 'self.thetaShape', 'tMax': 'self.tofSec'}), '(self.N, shorthand=self.thetaShape, tMax=self.tofSec)\n', (11935, 11988), False, 'from shapingFunctions import shapeFunctions\n'), ((12041, 12137), 'shapingFunctions.shapeFunctionsFree', 'shapeFunctionsFree', (['self.N', 'self.thetaFreeC'], {'shorthand': 'self.thetaShapeFree', 'tMax': 'self.tofSec'}), '(self.N, self.thetaFreeC, shorthand=self.thetaShapeFree,\n tMax=self.tofSec)\n', (12059, 12137), False, 'from shapingFunctions import shapeFunctionsFree\n'), ((13004, 13068), 'integration.integrate', 'integrate', (['integrand1', '(0)', 'self.tofSec'], {'method': '"""trapz"""', 'nSteps': '(25)'}), "(integrand1, 0, self.tofSec, method='trapz', nSteps=25)\n", (13013, 13068), False, 'from integration import integrate\n'), ((13084, 13148), 'integration.integrate', 'integrate', (['integrand2', '(0)', 'self.tofSec'], {'method': '"""trapz"""', 'nSteps': '(25)'}), "(integrand2, 0, self.tofSec, method='trapz', nSteps=25)\n", (13093, 13148), False, 'from integration import integrate\n'), ((13319, 13364), 'numpy.array', 'np.array', (['[cTheta12[0], cTheta12[1], cTheta3]'], {}), '([cTheta12[0], cTheta12[1], cTheta3])\n', (13327, 13364), True, 'import numpy as np\n'), ((14201, 14261), 'integration.integrate', 'integrate', (['self.thetaDot', '(0)', 'time'], {'method': '"""trapz"""', 'nSteps': '(25)'}), "(self.thetaDot, 0, time, method='trapz', nSteps=25)\n", (14210, 14261), False, 'from integration import integrate\n'), ((17131, 17196), 'integration.integrate', 'integrate', (['self.fTotal', '(0)', 'self.tofSec'], {'method': '"""trapz"""', 'nSteps': '(25)'}), "(self.fTotal, 0, self.tofSec, method='trapz', nSteps=25)\n", (17140, 17196), False, 'from integration import integrate\n'), ((4302, 4321), 'numpy.array', 'np.array', (['self.xDep'], {}), '(self.xDep)\n', (4310, 4321), True, 'import numpy as np\n'), ((4358, 4377), 'numpy.array', 'np.array', (['self.xArr'], {}), '(self.xArr)\n', (4366, 4377), True, 'import numpy as np\n'), ((4414, 4445), 'pykep.epoch', 'pk.epoch', (['self.jdDep', '"""mjd2000"""'], {}), "(self.jdDep, 'mjd2000')\n", (4422, 4445), True, 'import pykep as pk\n'), ((4482, 4502), 'numpy.array', 'np.array', (['self.jdDep'], {}), '(self.jdDep)\n', (4490, 4502), True, 'import numpy as np\n'), ((4548, 4579), 'pykep.epoch', 'pk.epoch', (['self.jdArr', '"""mjd2000"""'], {}), "(self.jdArr, 'mjd2000')\n", (4556, 4579), True, 'import pykep as pk\n'), ((4616, 4634), 'numpy.array', 'np.array', (['self.tof'], {}), '(self.tof)\n', (4624, 4634), True, 'import numpy as np\n'), ((5192, 5213), 'numpy.array', 'np.array', (['self.rFreeC'], {}), '(self.rFreeC)\n', (5200, 5213), True, 'import numpy as np\n'), ((5264, 5289), 'numpy.array', 'np.array', (['self.thetaFreeC'], {}), '(self.thetaFreeC)\n', (5272, 5289), True, 'import numpy as np\n'), ((5338, 5359), 'numpy.array', 'np.array', (['self.zFreeC'], {}), '(self.zFreeC)\n', (5346, 5359), True, 'import numpy as np\n'), ((5625, 5686), 'numpy.array', 'np.array', (['[self.rDepCyl[0], self.rDepCyl[1], self.rDepCyl[2]]'], {}), '([self.rDepCyl[0], self.rDepCyl[1], self.rDepCyl[2]])\n', (5633, 5686), True, 'import numpy as np\n'), ((13278, 13296), 'numpy.array', 'np.array', (['[L1, L2]'], {}), '([L1, L2])\n', (13286, 13296), True, 'import numpy as np\n'), ((17307, 17318), 'time.time', 'time.time', ([], {}), '()\n', (17316, 17318), False, 'import time\n'), ((18167, 18178), 'time.time', 'time.time', ([], {}), '()\n', (18176, 18178), False, 'import time\n'), ((6759, 6820), 'numpy.array', 'np.array', (['[self.rDepCyl[0], self.rDepCyl[1], self.rDepCyl[2]]'], {}), '([self.rDepCyl[0], self.rDepCyl[1], self.rDepCyl[2]])\n', (6767, 6820), True, 'import numpy as np\n'), ((7008, 7069), 'numpy.array', 'np.array', (['[self.rArrCyl[0], self.rArrCyl[1], self.rArrCyl[2]]'], {}), '([self.rArrCyl[0], self.rArrCyl[1], self.rArrCyl[2]])\n', (7016, 7069), True, 'import numpy as np\n'), ((7310, 7371), 'numpy.array', 'np.array', (['[self.vDepCyl[0], self.vDepCyl[1], self.vDepCyl[2]]'], {}), '([self.vDepCyl[0], self.vDepCyl[1], self.vDepCyl[2]])\n', (7318, 7371), True, 'import numpy as np\n'), ((7582, 7643), 'numpy.array', 'np.array', (['[self.vArrCyl[0], self.vArrCyl[1], self.vArrCyl[2]]'], {}), '([self.vArrCyl[0], self.vArrCyl[1], self.vArrCyl[2]])\n', (7590, 7643), True, 'import numpy as np\n'), ((13257, 13275), 'numpy.array', 'np.array', (['[K1, K2]'], {}), '([K1, K2])\n', (13265, 13275), True, 'import numpy as np\n'), ((17461, 17501), 'numpy.linspace', 'np.linspace', (['(0)', 'self.tofSec', 'nEvalPoints'], {}), '(0, self.tofSec, nEvalPoints)\n', (17472, 17501), True, 'import numpy as np\n')] |
from typing import Any, List, Set
import numpy as np
from src.featurizer.client_profile import ClientProfile
from src.featurizer.product_info import ProductInfoMapType
from src.utils import ProductEncoder
class CandidatSelector:
def __init__(
self, model: Any, global_top: Set[str], product_info_map: ProductInfoMapType,
):
self._model = model
self._global_top = global_top
self._pim = product_info_map
def get_features(self, profile: ClientProfile, precalc) -> List[str]:
candidates_set = set(self._global_top)
candidates_set.update(profile.seen_products)
for pairs in precalc["pairs"].values():
candidates_set.update([pid for (pid, _) in pairs])
candidates_list = sorted(list(candidates_set))
candidates_map = {pid: idx for idx, pid in enumerate(candidates_list)}
features = np.zeros((len(candidates_list), 6)).astype(np.float32) - 10
# f0: tf1
for (pid, score) in precalc["pairs"]["Mtf1"]:
if pid not in candidates_map:
continue
features[candidates_map[pid], 0] = score
# f1: tf10
for (pid, score) in precalc["pairs"]["Mtf10"]:
if pid not in candidates_map:
continue
features[candidates_map[pid], 1] = score
# f2: nn22
curr_map = precalc["map"]["nn22"]
for idx, pid in enumerate(candidates_list):
if pid not in curr_map:
continue
features[idx, 2] = curr_map[pid]
# f3: seen in history
for pid in profile.seen_products:
features[candidates_map[pid], 3] = 1
# f4: estimated popularity
# f5: last_seen_date
for idx, pid in enumerate(candidates_list):
features[idx, 4] = self._pim[pid].seen_cnt
features[idx, 5] = self._pim[pid].last_seen_day
return (candidates_list, features)
def get_candidates(self, profile: ClientProfile, precalc) -> List[str]:
candidates_list, features = self.get_features(profile, precalc)
scores = self._model.predict(features)
idx = np.argsort(-scores)
return [str(x) for x in np.array(candidates_list)[idx[:200]]]
| [
"numpy.argsort",
"numpy.array"
] | [((2163, 2182), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (2173, 2182), True, 'import numpy as np\n'), ((2215, 2240), 'numpy.array', 'np.array', (['candidates_list'], {}), '(candidates_list)\n', (2223, 2240), True, 'import numpy as np\n')] |
"""
Definition of the controller class for Q learning with
lazy action model.
"""
import pickle
import time
from os import path, mkdir
import tqdm
import numpy as np
import matplotlib.pyplot as plt
from tensorflow import keras
from cvxopt.solvers import qp
from cvxopt import matrix as cvxopt_matrix
class DnnRegressor2DPlus1D():
"""
DNN regression model using 2D state input from
[-floor_size,floor_size]x[-floor_size,floor_size] and 1D goal input (angle)
"""
def __init__(
self,
floor_size,
target_update_interval,
n_epochs,
keras_model=None
):
self.floor_size = floor_size
self.target_update_interval = target_update_interval
self.n_epochs = n_epochs
self.norm_factors = [
1/floor_size, # state: x-position
1/floor_size, # state: y-position
1, # goal: intended direction x
1 # goal: intended direction y
]
if keras_model is None:
self.model = self.build_model()
self.target_model = self.build_model()
else:
self.model = keras_model
self.target_model = keras_model.copy()
self.fit_counter = 0
def build_model(self):
"""
Build underlying Keras model
"""
x_in = keras.layers.Input((4,))
# Normalize stuff
f_x = keras.layers.Lambda(
lambda x: x * self.norm_factors
)(x_in)
f_x = keras.layers.Dense(20, activation='relu')(f_x)
# f_x = keras.layers.Dropout(0.4)(f_x)
f_x = keras.layers.BatchNormalization()(f_x)
f_x = keras.layers.Dense(30, activation='relu')(f_x)
# f_x = keras.layers.Dropout(0.4)(f_x)
f_x = keras.layers.BatchNormalization()(f_x)
f_x = keras.layers.Dense(50, activation='relu')(f_x)
f_x = keras.layers.BatchNormalization()(f_x)
f_x = keras.layers.Dense(40, activation='relu')(f_x)
f_x = keras.layers.BatchNormalization()(f_x)
f_x = keras.layers.Dense(50, activation='relu')(f_x)
f_x = keras.layers.BatchNormalization()(f_x)
f_x = keras.layers.Dense(40, activation='relu')(f_x)
f_x = keras.layers.BatchNormalization()(f_x)
f_x = keras.layers.Dense(30, activation='relu')(f_x)
# f_x = keras.layers.BatchNormalization()(f_x)
f_x = keras.layers.Dense(20, activation='relu')(f_x)
# f_x = keras.layers.BatchNormalization()(f_x)
y_out = keras.layers.Dense(1)(f_x)
new_model = keras.models.Model(
inputs=x_in,
outputs=y_out
)
new_model.compile(
optimizer='Adam',
loss='mse'
)
return new_model
def fit(self, x_data, y_data, verbose=0):
"""
Fit the value function to data
"""
# fit the model
self.fit_counter += 1
self.model.fit(x_data, y_data, verbose=verbose, epochs=self.n_epochs)
# and possibly update the target_model
if self.fit_counter % self.target_update_interval == 0:
self.target_model.set_weights(
self.model.get_weights()
)
# if target network has been updated, set flag so that the
# controller updates the data
return True
return False
def predict(self, x_data):
"""
Predict instances of the value function
"""
# predict using the target model
return np.array(self.target_model.predict(x_data))
def visualize(self, goal, state_grid):
"""utility function to plot the V function under goal 'goal'"""
plt.imshow(
self.predict(
np.concatenate(
(
state_grid,
np.repeat(
goal.reshape((1, 2)),
len(state_grid),
axis=0
),
),
axis=-1
)
).reshape(int(np.sqrt(len(state_grid))), int(np.sqrt(len(state_grid))))[:, ::-1].T
)
plt.colorbar()
plt.xticks(
np.linspace(
0,
int(np.sqrt(len(state_grid))),
3
).astype(int),
np.linspace(
min(state_grid[:, 0]),
max(state_grid[:, 0]),
3
)
)
plt.yticks(
np.linspace(
0,
int(np.sqrt(len(state_grid))),
3
).astype(int)[::-1],
np.linspace(
min(state_grid[:, 1]),
max(state_grid[:, 1]),
3
)
)
plt.xlabel('x1')
plt.ylabel('x2')
plt.show()
class Controller():
"""
The controller maintains the data set and the value function estimator,
and contains methods for performing training iterations as well as
predicting optimal actions using the value function and the lazy
action model
"""
def __init__(
self,
v_functions,
local_model_regularization,
discount,
k_nearest,
boundaries,
n_v_function_update_iterations,
environment,
scaleup_factor,
# how many of the elements with too many neighbours should actually be thrown out
prune_ratio=0.,
init_length=3,
scratch_dir=None,
record_plots=False,
scale_logger=False
):
self.v_functions = v_functions
self.local_model_regularization = local_model_regularization
self.discount = discount
self.k_nearest = k_nearest
self.boundaries = boundaries
self.n_v_function_update_iterations = n_v_function_update_iterations
self.environment = environment
self.scaleup_factor = scaleup_factor
self.prune_ratio = prune_ratio
self.init_length = init_length
self.scratch_dir = scratch_dir
self.record_plots = record_plots
self.scale_logger = scale_logger
self.mean_kept = []
if self.scale_logger:
self.scales = []
# Initialize raw data
angles = 2*np.pi*np.random.rand(self.init_length*self.k_nearest+1)
self.goals_all = [
np.stack((
np.cos(angles),
np.sin(angles)
), axis=-1)
for _ in range(len(self.v_functions))
]
self.states_all = [
np.random.rand(self.init_length*self.k_nearest+1, 2)
for v_function in range(len(self.v_functions))
]
self.actions_all = [
np.random.rand(self.init_length*self.k_nearest+1, 2)
for _ in range(len(self.v_functions))
]
self.rewards_all = [
np.zeros(self.init_length*self.k_nearest+1)
for _ in range(len(self.v_functions))
]
self.next_states_all = [
np.random.rand(self.init_length*self.k_nearest+1, 2)
for _ in range(len(self.v_functions))
]
# initialize processed data
# This only depends on the current data set
self.k_smallest_inds_all = [
None
for _ in range(len(self.v_functions))
]
# This only depends on my current estimate of the V function
self.r_plus_gamma_v_all = [
None
for _ in range(len(self.v_functions))
]
# This depends both on the current data set
# and on the current estimate of the V function
self.targets_all = [
None
for _ in range(len(self.v_functions))
]
# Flags that show if the initial data has been kicked out already
self.already_pruned = [
False
for _ in range(len(self.v_functions))
]
for train_signal in range(len(self.v_functions)):
self.update(train_signal)
def get_lazy_policy_action(
self,
r_plus_gamma_v_nexts,
actions,
):
"""
Get optimal action using the lazy action model
"""
actions_augmented = np.concatenate((
np.ones((len(actions), 1)),
actions
), axis=-1)
sol = qp(
cvxopt_matrix(
np.sum(
actions_augmented[
:, :, None
] * actions_augmented[
:, None, :
],
axis=0
) + self.local_model_regularization * np.eye(3)
),
cvxopt_matrix(np.sum(
-(r_plus_gamma_v_nexts[:, None] * actions_augmented[:, :]),
axis=0
))
)
if 'optimal' not in sol['status']:
raise Exception('Optimal solution not found')
alphabeta = np.array(sol['x']).reshape(-1)
return self.environment.action_length/np.linalg.norm(alphabeta[1:]) * alphabeta[1:]
def get_lazy_model_value(
self,
r_plus_gamma_v_nexts,
actions,
):
"""
Predict value of V-function, performing the maximum over the available actions using
the lazy action model
"""
actions_augmented = np.concatenate((
np.ones((len(actions), 1)),
actions
), axis=-1)
sol = qp(
cvxopt_matrix(
np.sum(
actions_augmented[
:, :, None
] * actions_augmented[
:, None, :
],
axis=0
) + self.local_model_regularization * np.eye(3)
),
cvxopt_matrix(np.sum(
-(r_plus_gamma_v_nexts[:, None] * actions_augmented[:, :]),
axis=0
))
)
if 'optimal' not in sol['status']:
raise Exception('Optimal solution not found')
alphabeta = np.array(sol['x']).reshape(-1)
# Instability countermeasure: The target cant be higher than the maximum
# r_plus_gamma_V_next in the vicinity
return min([
alphabeta[0] + self.environment.action_length *
np.linalg.norm(alphabeta[1:]),
np.max(r_plus_gamma_v_nexts)
])
def append_new_data_point(
self,
rollout_policy_inds,
goal,
state,
action,
reward,
next_state
):
"""
Add new data point to the value functions
"""
for train_signal in rollout_policy_inds:
# Append raw data ONLY
self.goals_all[train_signal] = np.append(
self.goals_all[train_signal],
goal.reshape((1, len(goal))),
axis=0
)
self.states_all[train_signal] = np.append(
self.states_all[train_signal],
state.reshape((1, len(state))),
axis=0
)
self.actions_all[train_signal] = np.append(
self.actions_all[train_signal],
action.reshape((1, len(action))),
axis=0
)
self.rewards_all[train_signal] = np.append(
self.rewards_all[train_signal],
np.array([reward]),
axis=0
)
self.next_states_all[train_signal] = np.append(
self.next_states_all[train_signal],
next_state.reshape((1, len(next_state))),
axis=0
)
if not self.already_pruned[train_signal]:
if len(self.states_all[train_signal]) > self.init_length*self.k_nearest + 3:
# throw our placeholders from raw data
print(
'throw out initial placeholder raw data for no.', train_signal)
self.goals_all[train_signal] = self.goals_all[train_signal][
self.init_length*self.k_nearest + 2:
]
self.states_all[train_signal] = self.states_all[train_signal][
self.init_length*self.k_nearest + 2:
]
self.actions_all[train_signal] = self.actions_all[train_signal][
self.init_length*self.k_nearest + 2:
]
self.rewards_all[train_signal] = self.rewards_all[train_signal][
self.init_length*self.k_nearest + 2:
]
self.next_states_all[train_signal] = self.next_states_all[train_signal][
self.init_length*self.k_nearest + 2:
]
self.already_pruned[train_signal] = True
def rapid_near_neighbours_scale_up(
self,
rollout_policy_ind,
state,
goal
):
"""
Find neighbors in a neighborhood of gradually increased size
"""
scale = 1
for _ in range(20):
neighbours = self.environment.find_near_neighbours(
self.states_all[rollout_policy_ind],
self.goals_all[rollout_policy_ind],
state,
goal,
scale
)
# print(neighbours.shape)
if len(neighbours) > self.k_nearest:
# If there is a sufficient number
# of points that has been found,
# choose randomly without replace
# from them.
if self.scale_logger:
self.scales.append(scale)
return [
np.random.choice(
neighbours,
size=self.k_nearest,
replace=False
),
scale
]
# else: scale up and repeat
scale = scale*self.scaleup_factor
# Raise exception if not enough found
raise Exception(
'Not enough near neighbours found after scaling to ' + str(scale))
def get_individual_action(self, rollout_policy_ind, state, goal):
"""get action from policy no. rollout_policy_ind"""
k_neighbours_inds = self.rapid_near_neighbours_scale_up(
rollout_policy_ind,
state,
goal
)[0]
return self.get_lazy_policy_action(
self.r_plus_gamma_v_all[rollout_policy_ind][k_neighbours_inds].reshape(
-1),
self.actions_all[rollout_policy_ind][k_neighbours_inds]
)
def get_action(self, state, goal):
"""Get averaged action from all policies"""
average = np.mean(
np.stack(
[
self.get_individual_action(pol_ind, state, goal)
for pol_ind in range(len(self.v_functions))
],
axis=0
),
axis=0
)
return self.environment.action_length*average/np.linalg.norm(average)
def update_targets_only(
self,
train_signal
):
"""
Update all targets. This function assumes
up-to-date nearest neighbours and up-to-date V function values"""
self.targets_all[train_signal] = np.array([
self.get_lazy_model_value(
# Here I assume that the V function values are up-to-date
self.r_plus_gamma_v_all[train_signal][k_smallest_inds],
self.actions_all[train_signal][k_smallest_inds]
)
# Here I assume that the indices of all neighbours are up-to-date
for k_smallest_inds in self.k_smallest_inds_all[train_signal]
])
# data augmentation: set targets to 0 outside of the interesting domain
self.environment.get_augmented_targets(
self.states_all[train_signal],
self.targets_all[train_signal]
)
# The rollout ends once a reward is given. Thus, the value of the value function is
# the reward itself whereever rewards are given
reward_was_given_mask = (
np.abs(self.rewards_all[train_signal]) > 1e-10)
self.targets_all[train_signal][
reward_was_given_mask
] = self.rewards_all[train_signal][
reward_was_given_mask
]
# There are user-defined lower and upper bounds on the value function as well
self.targets_all[train_signal][
self.targets_all[train_signal] < self.boundaries[0]
] = self.boundaries[0]
self.targets_all[train_signal][
self.targets_all[train_signal] > self.boundaries[1]
] = self.boundaries[1]
_ = plt.hist(self.targets_all[train_signal], bins=100, log=True)
if self.record_plots:
if not path.exists(self.scratch_dir + '/plots/'):
mkdir(self.scratch_dir + '/plots/')
plt.savefig(self.scratch_dir + '/plots/' + str(
time.time()
) + '.png')
def update_r_plus_gamma_v(self, train_signal):
"""Update the V values of all data points. These are only
dependent on the weights of the target model"""
self.r_plus_gamma_v_all[train_signal] = self.rewards_all[
train_signal
] + self.discount * self.v_functions[
train_signal
].predict(
np.concatenate((
self.next_states_all[train_signal],
self.goals_all[train_signal]
), axis=-1)
).reshape(-1)
def update_k_smallest_inds_and_calculate_pruning(self, train_signal):
"""Update the indices of the nearest neighbours
this depends on the data set only (in the sense that
for every point in the set, its nearest neighbours
depend on the entire data set). After that delete
some points in areas with high overlap"""
self.k_smallest_inds_all[train_signal] = []
keep_in = []
for state, goal in zip(
self.states_all[train_signal],
self.goals_all[train_signal]
):
inds, scale = self.rapid_near_neighbours_scale_up(
train_signal,
state,
goal
)
self.k_smallest_inds_all[train_signal].append(
inds
)
keep_in.append(
scale != 1
)
keep_in = np.array(keep_in)
# only throw out self.prune_ratio of all elements with too many neighbours
keep_in = np.logical_or(
keep_in,
np.random.rand(len(keep_in)) < 1-self.prune_ratio
)
self.mean_kept.append(np.mean(keep_in))
return keep_in
def prune(
self,
train_signal,
keep_in
):
"""
Thin out dataset in areas of high density
"""
self.goals_all[train_signal] = self.goals_all[train_signal][keep_in]
self.states_all[train_signal] = self.states_all[train_signal][keep_in]
self.actions_all[train_signal] = self.actions_all[train_signal][keep_in]
self.rewards_all[train_signal] = self.rewards_all[train_signal][keep_in]
self.next_states_all[train_signal] = self.next_states_all[train_signal][keep_in]
print(
len(self.goals_all[train_signal]),
len(self.states_all[train_signal]),
len(self.actions_all[train_signal]),
len(self.rewards_all[train_signal]),
len(self.next_states_all[train_signal])
)
def update(self, train_signal, intermediate_results_base=None):
"""This function is supposed to be a full from-scratch
extraction of a value function from a static data set"""
print('TRAIN_SIGNAL NO.', train_signal)
# Before the self-consistent V function iteration starts,
# all the stuff that only depends on the static raw data set is pre-calcluated
# 1. The X values
print('Update X values')
x_values = np.concatenate(
(
self.states_all[train_signal],
self.goals_all[train_signal]
),
axis=-1
)
# 2. k_smallest_inds_all also only depends on static raw data
print('Update k_smallest_inds_all and calculate pruning mask for later...')
keep_in = self.update_k_smallest_inds_and_calculate_pruning(
train_signal)
# Once the static data is up-to-date, we learn the value function
# self-consistently on the static data set.
print('Self-consistent V-function iterations...')
# Start loop with initializing
target_weights_updated = True
for iteration in tqdm.tqdm(range(self.n_v_function_update_iterations)):
if target_weights_updated:
# If the weights of the target network have been updated,
# self.r_plus_gamma_v_all has to be updated as well.
print('Update r_plus_gamma_V_all')
self.update_r_plus_gamma_v(train_signal)
# Since self.targets_all depend on self.r_plus_gamma_v_all,
# self.targets_all has to be updated as well
print('Update targets_all')
self.update_targets_only(
train_signal
)
# Fit network on X and Y
print('Fit network...')
target_weights_updated = self.v_functions[train_signal].fit(
x_values,
self.targets_all[train_signal]
)
if intermediate_results_base is not None:
self.save(intermediate_results_base + '_' + str(iteration))
# Apply pruning mask
print('Prune data set')
self.prune(train_signal, keep_in)
print('Kept', self.mean_kept[-1], 'of original data')
def save(self, folder_name):
"""
Save the controller object
"""
# make sure not to overwrite anything
assert not path.exists(folder_name)
mkdir(folder_name)
# save v functions
for ind, v_function in enumerate(self.v_functions):
v_function.target_model.save_weights(
folder_name + '/v_function_' + str(ind) + '.hd5')
# save the controller's collected data
data = {
'goals_all': self.goals_all,
'states_all': self.states_all,
'actions_all': self.actions_all,
'rewards_all': self.rewards_all,
'next_states_all': self.next_states_all
}
with open(folder_name + '/data.pickle', 'wb') as file:
pickle.dump(data, file)
def load(self, folder_name, light=False, only_values=False):
"""
Load the controller object
"""
# make sure data exists
if isinstance(folder_name, str):
assert path.exists(folder_name)
v_func_paths = [folder_name + '/v_function_' + str(
ind
) + '.hd5' for ind in range(len(self.v_functions))]
data_dirs = folder_name + '/data.pickle'
data_inds = range(len(self.v_functions))
else:
for name in folder_name:
assert path.exists(name)
v_func_paths = [name + '/v_function_' + str(
0
) + '.hd5' for name in folder_name]
data_dirs = [name + '/data.pickle' for name in folder_name]
data_inds = [0 for name in folder_name]
# load V functions
for v_function, v_func_path in zip(self.v_functions, v_func_paths):
v_function.target_model.load_weights(v_func_path)
# load the controller's collected data. Not efficient, but this is
# not significant usually
for ind, [data_dir, data_ind] in enumerate(zip(data_dirs, data_inds)):
with open(data_dir, 'rb') as file:
data = pickle.load(file)
self.goals_all[ind] = data['goals_all'][data_ind]
self.states_all[ind] = data['states_all'][data_ind]
self.actions_all[ind] = data['actions_all'][data_ind]
self.rewards_all[ind] = data['rewards_all'][data_ind]
self.next_states_all[ind] = data['next_states_all'][data_ind]
if not light:
for train_signal in tqdm.tqdm(range(len(self.v_functions))):
self.update_r_plus_gamma_v(train_signal)
if not only_values:
self.update_k_smallest_inds_and_calculate_pruning(
train_signal)
self.update_targets_only(train_signal)
| [
"os.mkdir",
"pickle.dump",
"numpy.abs",
"numpy.sum",
"tensorflow.keras.layers.Dense",
"numpy.mean",
"numpy.linalg.norm",
"pickle.load",
"numpy.sin",
"tensorflow.keras.layers.BatchNormalization",
"os.path.exists",
"matplotlib.pyplot.colorbar",
"numpy.max",
"tensorflow.keras.layers.Input",
... | [((1384, 1408), 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['(4,)'], {}), '((4,))\n', (1402, 1408), False, 'from tensorflow import keras\n'), ((2606, 2652), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'x_in', 'outputs': 'y_out'}), '(inputs=x_in, outputs=y_out)\n', (2624, 2652), False, 'from tensorflow import keras\n'), ((4243, 4257), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (4255, 4257), True, 'import matplotlib.pyplot as plt\n'), ((4874, 4890), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x1"""'], {}), "('x1')\n", (4884, 4890), True, 'import matplotlib.pyplot as plt\n'), ((4899, 4915), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""x2"""'], {}), "('x2')\n", (4909, 4915), True, 'import matplotlib.pyplot as plt\n'), ((4925, 4935), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4933, 4935), True, 'import matplotlib.pyplot as plt\n'), ((17143, 17203), 'matplotlib.pyplot.hist', 'plt.hist', (['self.targets_all[train_signal]'], {'bins': '(100)', 'log': '(True)'}), '(self.targets_all[train_signal], bins=100, log=True)\n', (17151, 17203), True, 'import matplotlib.pyplot as plt\n'), ((18890, 18907), 'numpy.array', 'np.array', (['keep_in'], {}), '(keep_in)\n', (18898, 18907), True, 'import numpy as np\n'), ((20507, 20598), 'numpy.concatenate', 'np.concatenate', (['(self.states_all[train_signal], self.goals_all[train_signal])'], {'axis': '(-1)'}), '((self.states_all[train_signal], self.goals_all[train_signal]\n ), axis=-1)\n', (20521, 20598), True, 'import numpy as np\n'), ((22553, 22571), 'os.mkdir', 'mkdir', (['folder_name'], {}), '(folder_name)\n', (22558, 22571), False, 'from os import path, mkdir\n'), ((1450, 1502), 'tensorflow.keras.layers.Lambda', 'keras.layers.Lambda', (['(lambda x: x * self.norm_factors)'], {}), '(lambda x: x * self.norm_factors)\n', (1469, 1502), False, 'from tensorflow import keras\n'), ((1546, 1587), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(20)'], {'activation': '"""relu"""'}), "(20, activation='relu')\n", (1564, 1587), False, 'from tensorflow import keras\n'), ((1654, 1687), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (1685, 1687), False, 'from tensorflow import keras\n'), ((1707, 1748), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(30)'], {'activation': '"""relu"""'}), "(30, activation='relu')\n", (1725, 1748), False, 'from tensorflow import keras\n'), ((1815, 1848), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (1846, 1848), False, 'from tensorflow import keras\n'), ((1868, 1909), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(50)'], {'activation': '"""relu"""'}), "(50, activation='relu')\n", (1886, 1909), False, 'from tensorflow import keras\n'), ((1929, 1962), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (1960, 1962), False, 'from tensorflow import keras\n'), ((1982, 2023), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(40)'], {'activation': '"""relu"""'}), "(40, activation='relu')\n", (2000, 2023), False, 'from tensorflow import keras\n'), ((2043, 2076), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (2074, 2076), False, 'from tensorflow import keras\n'), ((2096, 2137), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(50)'], {'activation': '"""relu"""'}), "(50, activation='relu')\n", (2114, 2137), False, 'from tensorflow import keras\n'), ((2157, 2190), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (2188, 2190), False, 'from tensorflow import keras\n'), ((2210, 2251), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(40)'], {'activation': '"""relu"""'}), "(40, activation='relu')\n", (2228, 2251), False, 'from tensorflow import keras\n'), ((2271, 2304), 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), '()\n', (2302, 2304), False, 'from tensorflow import keras\n'), ((2324, 2365), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(30)'], {'activation': '"""relu"""'}), "(30, activation='relu')\n", (2342, 2365), False, 'from tensorflow import keras\n'), ((2440, 2481), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(20)'], {'activation': '"""relu"""'}), "(20, activation='relu')\n", (2458, 2481), False, 'from tensorflow import keras\n'), ((2558, 2579), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {}), '(1)\n', (2576, 2579), False, 'from tensorflow import keras\n'), ((6441, 6494), 'numpy.random.rand', 'np.random.rand', (['(self.init_length * self.k_nearest + 1)'], {}), '(self.init_length * self.k_nearest + 1)\n', (6455, 6494), True, 'import numpy as np\n'), ((6728, 6784), 'numpy.random.rand', 'np.random.rand', (['(self.init_length * self.k_nearest + 1)', '(2)'], {}), '(self.init_length * self.k_nearest + 1, 2)\n', (6742, 6784), True, 'import numpy as np\n'), ((6891, 6947), 'numpy.random.rand', 'np.random.rand', (['(self.init_length * self.k_nearest + 1)', '(2)'], {}), '(self.init_length * self.k_nearest + 1, 2)\n', (6905, 6947), True, 'import numpy as np\n'), ((7045, 7092), 'numpy.zeros', 'np.zeros', (['(self.init_length * self.k_nearest + 1)'], {}), '(self.init_length * self.k_nearest + 1)\n', (7053, 7092), True, 'import numpy as np\n'), ((7194, 7250), 'numpy.random.rand', 'np.random.rand', (['(self.init_length * self.k_nearest + 1)', '(2)'], {}), '(self.init_length * self.k_nearest + 1, 2)\n', (7208, 7250), True, 'import numpy as np\n'), ((15425, 15448), 'numpy.linalg.norm', 'np.linalg.norm', (['average'], {}), '(average)\n', (15439, 15448), True, 'import numpy as np\n'), ((16564, 16602), 'numpy.abs', 'np.abs', (['self.rewards_all[train_signal]'], {}), '(self.rewards_all[train_signal])\n', (16570, 16602), True, 'import numpy as np\n'), ((19149, 19165), 'numpy.mean', 'np.mean', (['keep_in'], {}), '(keep_in)\n', (19156, 19165), True, 'import numpy as np\n'), ((22520, 22544), 'os.path.exists', 'path.exists', (['folder_name'], {}), '(folder_name)\n', (22531, 22544), False, 'from os import path, mkdir\n'), ((23151, 23174), 'pickle.dump', 'pickle.dump', (['data', 'file'], {}), '(data, file)\n', (23162, 23174), False, 'import pickle\n'), ((23393, 23417), 'os.path.exists', 'path.exists', (['folder_name'], {}), '(folder_name)\n', (23404, 23417), False, 'from os import path, mkdir\n'), ((8882, 8956), 'numpy.sum', 'np.sum', (['(-(r_plus_gamma_v_nexts[:, None] * actions_augmented[:, :]))'], {'axis': '(0)'}), '(-(r_plus_gamma_v_nexts[:, None] * actions_augmented[:, :]), axis=0)\n', (8888, 8956), True, 'import numpy as np\n'), ((9137, 9155), 'numpy.array', 'np.array', (["sol['x']"], {}), "(sol['x'])\n", (9145, 9155), True, 'import numpy as np\n'), ((9215, 9244), 'numpy.linalg.norm', 'np.linalg.norm', (['alphabeta[1:]'], {}), '(alphabeta[1:])\n', (9229, 9244), True, 'import numpy as np\n'), ((10021, 10095), 'numpy.sum', 'np.sum', (['(-(r_plus_gamma_v_nexts[:, None] * actions_augmented[:, :]))'], {'axis': '(0)'}), '(-(r_plus_gamma_v_nexts[:, None] * actions_augmented[:, :]), axis=0)\n', (10027, 10095), True, 'import numpy as np\n'), ((10276, 10294), 'numpy.array', 'np.array', (["sol['x']"], {}), "(sol['x'])\n", (10284, 10294), True, 'import numpy as np\n'), ((10571, 10599), 'numpy.max', 'np.max', (['r_plus_gamma_v_nexts'], {}), '(r_plus_gamma_v_nexts)\n', (10577, 10599), True, 'import numpy as np\n'), ((11640, 11658), 'numpy.array', 'np.array', (['[reward]'], {}), '([reward])\n', (11648, 11658), True, 'import numpy as np\n'), ((17254, 17295), 'os.path.exists', 'path.exists', (["(self.scratch_dir + '/plots/')"], {}), "(self.scratch_dir + '/plots/')\n", (17265, 17295), False, 'from os import path, mkdir\n'), ((17313, 17348), 'os.mkdir', 'mkdir', (["(self.scratch_dir + '/plots/')"], {}), "(self.scratch_dir + '/plots/')\n", (17318, 17348), False, 'from os import path, mkdir\n'), ((23746, 23763), 'os.path.exists', 'path.exists', (['name'], {}), '(name)\n', (23757, 23763), False, 'from os import path, mkdir\n'), ((24436, 24453), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (24447, 24453), False, 'import pickle\n'), ((6557, 6571), 'numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (6563, 6571), True, 'import numpy as np\n'), ((6589, 6603), 'numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (6595, 6603), True, 'import numpy as np\n'), ((8567, 8644), 'numpy.sum', 'np.sum', (['(actions_augmented[:, :, None] * actions_augmented[:, None, :])'], {'axis': '(0)'}), '(actions_augmented[:, :, None] * actions_augmented[:, None, :], axis=0)\n', (8573, 8644), True, 'import numpy as np\n'), ((9706, 9783), 'numpy.sum', 'np.sum', (['(actions_augmented[:, :, None] * actions_augmented[:, None, :])'], {'axis': '(0)'}), '(actions_augmented[:, :, None] * actions_augmented[:, None, :], axis=0)\n', (9712, 9783), True, 'import numpy as np\n'), ((14046, 14110), 'numpy.random.choice', 'np.random.choice', (['neighbours'], {'size': 'self.k_nearest', 'replace': '(False)'}), '(neighbours, size=self.k_nearest, replace=False)\n', (14062, 14110), True, 'import numpy as np\n'), ((8831, 8840), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (8837, 8840), True, 'import numpy as np\n'), ((9970, 9979), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (9976, 9979), True, 'import numpy as np\n'), ((10528, 10557), 'numpy.linalg.norm', 'np.linalg.norm', (['alphabeta[1:]'], {}), '(alphabeta[1:])\n', (10542, 10557), True, 'import numpy as np\n'), ((17425, 17436), 'time.time', 'time.time', ([], {}), '()\n', (17434, 17436), False, 'import time\n'), ((17828, 17924), 'numpy.concatenate', 'np.concatenate', (['(self.next_states_all[train_signal], self.goals_all[train_signal])'], {'axis': '(-1)'}), '((self.next_states_all[train_signal], self.goals_all[\n train_signal]), axis=-1)\n', (17842, 17924), True, 'import numpy as np\n')] |
# Copyright 2018 <NAME> (nikmedoed)
# Licensed under the Apache License, Version 2.0 (the «License»)
import random
from src.localisation import localisation
import os
import matplotlib.pyplot as plt
import numpy as np
import time
def save(name, fold = '', fmt='png'):
pwd = os.getcwd()
if fold != "":
if not os.path.exists(fold):
os.mkdir(fold)
os.chdir(fold)
plt.savefig('{}.{}'.format(name, fmt), fmt='png')
if os.getcwd() != pwd:
os.chdir(pwd)
def plot(c, d, name = "", fold = '', show=False):
plt.clf()
loc = localisation.loc(__file__)
index = np.arange(13)
bar_width = 0.35
opacity = 0.8
plt.grid(True, linestyle=':', fillstyle='bottom', axis='y')
rects1 = plt.bar(index- bar_width/2, d, bar_width,
alpha=opacity,
color='tab:purple',
label=loc['Bar1'])
rects2 = plt.bar(index + bar_width/2, c, bar_width,
alpha=opacity,
color='orange',
label=loc['Bar2'])
plt.xlabel(loc['XLabel'])
plt.ylabel(loc['YLabel'])
plt.title(loc['Name'])
plt.xticks(index, range(len(d)))
plt.legend()
plt.tight_layout()
# save(name='pic_1_4_1', fmt='pdf')
# save(name=(time.strftime("%Y-%m-%d_%H-%M-%S") if name =="" else name), fold=fold, fmt='png')
if show: plt.show()
if __name__ == "__main__":
d = []
c = []
for i in range(13):
t = random.random()
d.append(t)
c.append(t + random.random()*0.05-0.02)
plot(c, d, fold="rfolder")
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.tight_layout",
"src.localisation.localisation.loc",
"os.mkdir",
"matplotlib.pyplot.show",
"matplotlib.pyplot.clf",
"os.getcwd",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.legend",
"os.path.exists",
"random.random",
"numpy.arange",
"matplotlib.py... | [((282, 293), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (291, 293), False, 'import os\n'), ((558, 567), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (565, 567), True, 'import matplotlib.pyplot as plt\n'), ((578, 604), 'src.localisation.localisation.loc', 'localisation.loc', (['__file__'], {}), '(__file__)\n', (594, 604), False, 'from src.localisation import localisation\n'), ((617, 630), 'numpy.arange', 'np.arange', (['(13)'], {}), '(13)\n', (626, 630), True, 'import numpy as np\n'), ((675, 734), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'linestyle': '""":"""', 'fillstyle': '"""bottom"""', 'axis': '"""y"""'}), "(True, linestyle=':', fillstyle='bottom', axis='y')\n", (683, 734), True, 'import matplotlib.pyplot as plt\n'), ((748, 851), 'matplotlib.pyplot.bar', 'plt.bar', (['(index - bar_width / 2)', 'd', 'bar_width'], {'alpha': 'opacity', 'color': '"""tab:purple"""', 'label': "loc['Bar1']"}), "(index - bar_width / 2, d, bar_width, alpha=opacity, color=\n 'tab:purple', label=loc['Bar1'])\n", (755, 851), True, 'import matplotlib.pyplot as plt\n'), ((921, 1019), 'matplotlib.pyplot.bar', 'plt.bar', (['(index + bar_width / 2)', 'c', 'bar_width'], {'alpha': 'opacity', 'color': '"""orange"""', 'label': "loc['Bar2']"}), "(index + bar_width / 2, c, bar_width, alpha=opacity, color='orange',\n label=loc['Bar2'])\n", (928, 1019), True, 'import matplotlib.pyplot as plt\n'), ((1082, 1107), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["loc['XLabel']"], {}), "(loc['XLabel'])\n", (1092, 1107), True, 'import matplotlib.pyplot as plt\n'), ((1112, 1137), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (["loc['YLabel']"], {}), "(loc['YLabel'])\n", (1122, 1137), True, 'import matplotlib.pyplot as plt\n'), ((1142, 1164), 'matplotlib.pyplot.title', 'plt.title', (["loc['Name']"], {}), "(loc['Name'])\n", (1151, 1164), True, 'import matplotlib.pyplot as plt\n'), ((1206, 1218), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1216, 1218), True, 'import matplotlib.pyplot as plt\n'), ((1224, 1242), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1240, 1242), True, 'import matplotlib.pyplot as plt\n'), ((385, 399), 'os.chdir', 'os.chdir', (['fold'], {}), '(fold)\n', (393, 399), False, 'import os\n'), ((461, 472), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (470, 472), False, 'import os\n'), ((489, 502), 'os.chdir', 'os.chdir', (['pwd'], {}), '(pwd)\n', (497, 502), False, 'import os\n'), ((1396, 1406), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1404, 1406), True, 'import matplotlib.pyplot as plt\n'), ((1494, 1509), 'random.random', 'random.random', ([], {}), '()\n', (1507, 1509), False, 'import random\n'), ((328, 348), 'os.path.exists', 'os.path.exists', (['fold'], {}), '(fold)\n', (342, 348), False, 'import os\n'), ((362, 376), 'os.mkdir', 'os.mkdir', (['fold'], {}), '(fold)\n', (370, 376), False, 'import os\n'), ((1551, 1566), 'random.random', 'random.random', ([], {}), '()\n', (1564, 1566), False, 'import random\n')] |
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn.ensemble import IsolationForest
from scipy.stats import multivariate_normal
class Build_Anomaly_Model():
def __init__(self, X, model_name, para, Y = None):
self.X = X
self.Y = Y
self.model_name = model_name
self.para = para
if model_name == 'multivariate':
self.classes, self.counts = np.unique(self.Y, return_counts = True)
self.k = self.classes.size
self.d = self.X.shape[1]
self.mu = np.zeros((self.k, self.d))
self.sigma = np.zeros((self.k, self.d, self.d))
self.pi = np.zeros(self.k)
self.threshold = self.para['threshold']
if model_name == 'dbscan':
self.model = DBSCAN(**self.para)
if model_name == 'isolation':
self.model = IsolationForest(**self.para)
self.anomaly_model = None
def fit(self):
if self.model_name == 'multivariate':
for label in range(0, self.k):
indices = (self.Y == label)
self.mu[label] = np.mean(self.X[indices, :], axis = 0)
self.sigma[label] = np.cov(self.X[indices, :], rowvar = 0, bias = 0)
self.pi[label] = self.counts[label] / (len(self.Y))
else:
self.anomaly_model = self.model.fit(self.X)
def predict(self, X):
if self.model_name == 'multivariate':
all_prob = np.zeros((len(X), self.k))
for label in range(0, self.k):
rv = multivariate_normal(mean = self.mu[label], cov = self.sigma[label],
allow_singular = True)
for i in range(0, len(X)):
all_prob[:, label] = rv.logpdf(X[:, :]) + np.log(self.pi[label])
pred_label = np.max(all_prob, axis = 1) * -1
pred_class = np.argmax(all_prob, axis = 1)
pred_label[pred_label < self.threshold] = -1
return np.squeeze(pred_label).tolist()
else:
return self.anomaly_model.predict(X) | [
"sklearn.ensemble.IsolationForest",
"numpy.log",
"numpy.argmax",
"numpy.unique",
"numpy.zeros",
"scipy.stats.multivariate_normal",
"numpy.max",
"numpy.mean",
"numpy.squeeze",
"numpy.cov",
"sklearn.cluster.DBSCAN"
] | [((370, 407), 'numpy.unique', 'np.unique', (['self.Y'], {'return_counts': '(True)'}), '(self.Y, return_counts=True)\n', (379, 407), True, 'import numpy as np\n'), ((481, 507), 'numpy.zeros', 'np.zeros', (['(self.k, self.d)'], {}), '((self.k, self.d))\n', (489, 507), True, 'import numpy as np\n'), ((524, 558), 'numpy.zeros', 'np.zeros', (['(self.k, self.d, self.d)'], {}), '((self.k, self.d, self.d))\n', (532, 558), True, 'import numpy as np\n'), ((572, 588), 'numpy.zeros', 'np.zeros', (['self.k'], {}), '(self.k)\n', (580, 588), True, 'import numpy as np\n'), ((678, 697), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {}), '(**self.para)\n', (684, 697), False, 'from sklearn.cluster import DBSCAN\n'), ((747, 775), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ([], {}), '(**self.para)\n', (762, 775), False, 'from sklearn.ensemble import IsolationForest\n'), ((1591, 1618), 'numpy.argmax', 'np.argmax', (['all_prob'], {'axis': '(1)'}), '(all_prob, axis=1)\n', (1600, 1618), True, 'import numpy as np\n'), ((950, 985), 'numpy.mean', 'np.mean', (['self.X[indices, :]'], {'axis': '(0)'}), '(self.X[indices, :], axis=0)\n', (957, 985), True, 'import numpy as np\n'), ((1012, 1056), 'numpy.cov', 'np.cov', (['self.X[indices, :]'], {'rowvar': '(0)', 'bias': '(0)'}), '(self.X[indices, :], rowvar=0, bias=0)\n', (1018, 1056), True, 'import numpy as np\n'), ((1321, 1409), 'scipy.stats.multivariate_normal', 'multivariate_normal', ([], {'mean': 'self.mu[label]', 'cov': 'self.sigma[label]', 'allow_singular': '(True)'}), '(mean=self.mu[label], cov=self.sigma[label],\n allow_singular=True)\n', (1340, 1409), False, 'from scipy.stats import multivariate_normal\n'), ((1543, 1567), 'numpy.max', 'np.max', (['all_prob'], {'axis': '(1)'}), '(all_prob, axis=1)\n', (1549, 1567), True, 'import numpy as np\n'), ((1679, 1701), 'numpy.squeeze', 'np.squeeze', (['pred_label'], {}), '(pred_label)\n', (1689, 1701), True, 'import numpy as np\n'), ((1504, 1526), 'numpy.log', 'np.log', (['self.pi[label]'], {}), '(self.pi[label])\n', (1510, 1526), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import unittest
import numpy as np
import sklearn.metrics as sm
from .context import grouplasso
from grouplasso.util import sigmoid, binary_log_loss, mean_squared_error
class BasicTestSuite(unittest.TestCase):
"""Basic test cases."""
def test_log_loss(self):
np.random.seed(0)
n_samples = 200
y_true = np.random.randint(0, 2, size=n_samples)
y_pred = np.random.uniform(0, 1, size=n_samples)
loss1 = sm.log_loss(y_true, y_pred)
loss2 = binary_log_loss(y_true, y_pred)
assert loss2 > 0
assert abs(loss1 - loss2) < 1e-12
def test_mean_squared_error(self):
np.random.seed(0)
n_samples = 200
y_true = np.random.randn(n_samples)
y_pred = np.random.randn(n_samples)
loss1 = sm.mean_squared_error(y_true, y_pred)
loss2 = mean_squared_error(y_true, y_pred)
assert loss2 > 0
assert abs(loss1 - loss2) < 1e-12
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.random.uniform",
"numpy.random.seed",
"numpy.random.randn",
"sklearn.metrics.log_loss",
"grouplasso.util.binary_log_loss",
"numpy.random.randint",
"grouplasso.util.mean_squared_error",
"sklearn.metrics.mean_squared_error"
] | [((1001, 1016), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1014, 1016), False, 'import unittest\n'), ((303, 320), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (317, 320), True, 'import numpy as np\n'), ((362, 401), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': 'n_samples'}), '(0, 2, size=n_samples)\n', (379, 401), True, 'import numpy as np\n'), ((419, 458), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {'size': 'n_samples'}), '(0, 1, size=n_samples)\n', (436, 458), True, 'import numpy as np\n'), ((475, 502), 'sklearn.metrics.log_loss', 'sm.log_loss', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (486, 502), True, 'import sklearn.metrics as sm\n'), ((519, 550), 'grouplasso.util.binary_log_loss', 'binary_log_loss', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (534, 550), False, 'from grouplasso.util import sigmoid, binary_log_loss, mean_squared_error\n'), ((666, 683), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (680, 683), True, 'import numpy as np\n'), ((725, 751), 'numpy.random.randn', 'np.random.randn', (['n_samples'], {}), '(n_samples)\n', (740, 751), True, 'import numpy as np\n'), ((769, 795), 'numpy.random.randn', 'np.random.randn', (['n_samples'], {}), '(n_samples)\n', (784, 795), True, 'import numpy as np\n'), ((812, 849), 'sklearn.metrics.mean_squared_error', 'sm.mean_squared_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (833, 849), True, 'import sklearn.metrics as sm\n'), ((866, 900), 'grouplasso.util.mean_squared_error', 'mean_squared_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (884, 900), False, 'from grouplasso.util import sigmoid, binary_log_loss, mean_squared_error\n')] |
from __future__ import print_function, division
import os
import cv2
import csv
import torch
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms, utils
import bateauxCsv
from torchstat import stat
from torchvision.utils import make_grid
ABSOLUTE = 'D:/Documents/Prepa/TIPE'
pathBateaux = ABSOLUTE + "/data/MASATI-v2/ship"
pathMer = ABSOLUTE + "/data/MASATI-v2/water"
pathModels = ABSOLUTE + "/Models/"
listeBateaux = os.listdir(pathBateaux)
listeMer = os.listdir(pathMer)
NUMBER = 500
bateauxCsv.generateCsv(NUMBER)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ImageData(Dataset):
def __init__(self, csvtruc, transform = None):
self.transform = transform
self.images = []
self.resultats = []
with open(csvtruc, 'r') as fichier:
truc = csv.reader(fichier, delimiter = ',')
for ligne in truc:
if ligne != []:
image, resultat = ligne[0].split(',')
self.images.append(self.transform(cv2.imread(image)).float())
resultat = int(resultat)
if resultat == 0:
self.resultats.append(0)
else:
self.resultats.append(1)
def __getitem__(self, index):
image = self.images[index]
resultat = self.resultats[index]
return image, resultat
def __len__(self):
return len(self.resultats)
set_images = ImageData("D:/Documents/Prepa/TIPE/bateaux.csv", transforms.Compose([transforms.ToTensor(),]))
imagesLoader = torch.utils.data.DataLoader(set_images, batch_size = 8, shuffle = True, pin_memory=True, num_workers=0)
print("Images chargées")
def load():
global set_images
global imagesLoader
set_images = ImageData("D:/Documents/Prepa/TIPE/bateaux.csv", transforms.Compose([transforms.ToTensor()]))
imagesLoader = torch.utils.data.DataLoader(set_images, batch_size = 32, shuffle = True, pin_memory=True, num_workers=0)
print('Images chargées.')
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.epochs = 0
self.conv = nn.Sequential(
nn.Conv2d(3, 16, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(16),
nn.Conv2d(16, 64, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 128, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.Conv2d(128, 128, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.MaxPool2d(2, 2),
nn.Conv2d(128, 128, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.MaxPool2d(2, 2),
nn.Conv2d(128, 128, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.MaxPool2d(2, 2),
nn.Conv2d(128, 128, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.MaxPool2d(2, 2),
nn.Conv2d(128, 128, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.MaxPool2d(2, 2),
nn.Conv2d(128, 128, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.MaxPool2d(2, 2),
)
self.classifier = nn.Sequential(
nn.Linear(2048, 512),
nn.ReLU(),
nn.Linear(512, 10),
nn.ReLU(),
nn.Linear(10, 2),
)
def forward(self, x):
x = self.conv(x)
x = self.review(x)
x = self.classifier(x)
return x
def review(self, x):
return x.view(-1, 2048)
net = Net()
net.to(device, non_blocking=True)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(list(net.parameters()), lr = 0.001, momentum = 0.9)
def train(number):
for epoch in range(number):
running_loss = 0.0
for i, data in enumerate(imagesLoader, 0):
input, expected = data[0].to(device, non_blocking=True), data[1].to(device, non_blocking=True)
optimizer.zero_grad()
outputs = net(input)
loss = criterion(outputs, expected)
loss.backward()
optimizer.step()
running_loss += loss.item()
print('Epoch : ' + str(epoch) + ' loss : ' + str(running_loss))
net.epochs += 1
def test(altered, numero):
if altered:
image = set_images[NUMBER + numero][0].unsqueeze(0).to(device)
else:
image = set_images[numero][0].unsqueeze(0).to(device)
return net(image)
def testSome(Number):
totalChaque = Number
global NUMBER
bateau = 0
for i in np.random.randint(0, NUMBER, size = (Number,)):
res = test(True, i)
if res[0][1] > res[0][0]:
bateau += 1
pas_bateau = 0
for i in np.random.randint(0, NUMBER, size = (Number,)):
res = test(False, i)
if res[0][1] < res[0][0]:
pas_bateau += 1
print("Pour normal : " + str(pas_bateau / totalChaque) + " et altéré : " + str(bateau / totalChaque))
def saveModel(nom):
torch.save(net.state_dict(), pathModels + nom)
def loadModel(nom):
net.load_state_dict(torch.load(pathModels + nom))
def show(layer, number, imageN):
activation = {}
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
net.conv1.register_forward_hook(get_activation('conv1'))
net.conv2.register_forward_hook(get_activation('conv2'))
net.conv3.register_forward_hook(get_activation('conv3'))
net.conv4.register_forward_hook(get_activation('conv4'))
net.conv5.register_forward_hook(get_activation('conv5'))
net.conv6.register_forward_hook(get_activation('conv6'))
net.conv7.register_forward_hook(get_activation('conv7'))
net.conv8.register_forward_hook(get_activation('conv8'))
net.conv9.register_forward_hook(get_activation('conv9'))
data = set_images[imageN][0].to(device, non_blocking=True)
output = net(data.unsqueeze(0))
act = activation['conv' + str(layer)].squeeze().cpu()
fir, axarr = plt.subplots(number)
for idx in range(number):
axarr[idx].imshow(act[idx])
plt.show()
def show2(number):
kernels = net.conv1.weight.detach().cpu()
fig, axarr = plt.subplots(number)
for idx in range(number):
axarr[idx].imshow(kernels[idx].squeeze())
plt.show()
def show3():
kernels = net.conv2.weight.detach().cpu().clone()
kernels = kernels - kernels.min()
kernels = kernels / kernels.max()
print(kernels.shape)
img = make_grid(kernels)
plt.imshow(img.permute(1, 2, 0))
plt.show()
| [
"matplotlib.pyplot.show",
"csv.reader",
"torch.utils.data.DataLoader",
"torch.nn.ReLU",
"torch.load",
"torch.nn.Conv2d",
"torch.nn.CrossEntropyLoss",
"torchvision.utils.make_grid",
"cv2.imread",
"torch.nn.BatchNorm2d",
"numpy.random.randint",
"torch.cuda.is_available",
"torch.nn.Linear",
"... | [((621, 644), 'os.listdir', 'os.listdir', (['pathBateaux'], {}), '(pathBateaux)\n', (631, 644), False, 'import os\n'), ((656, 675), 'os.listdir', 'os.listdir', (['pathMer'], {}), '(pathMer)\n', (666, 675), False, 'import os\n'), ((691, 721), 'bateauxCsv.generateCsv', 'bateauxCsv.generateCsv', (['NUMBER'], {}), '(NUMBER)\n', (713, 721), False, 'import bateauxCsv\n'), ((1796, 1899), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['set_images'], {'batch_size': '(8)', 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': '(0)'}), '(set_images, batch_size=8, shuffle=True,\n pin_memory=True, num_workers=0)\n', (1823, 1899), False, 'import torch\n'), ((3791, 3812), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3810, 3812), True, 'import torch.nn as nn\n'), ((2115, 2219), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['set_images'], {'batch_size': '(32)', 'shuffle': '(True)', 'pin_memory': '(True)', 'num_workers': '(0)'}), '(set_images, batch_size=32, shuffle=True,\n pin_memory=True, num_workers=0)\n', (2142, 2219), False, 'import torch\n'), ((4737, 4781), 'numpy.random.randint', 'np.random.randint', (['(0)', 'NUMBER'], {'size': '(Number,)'}), '(0, NUMBER, size=(Number,))\n', (4754, 4781), True, 'import numpy as np\n'), ((4903, 4947), 'numpy.random.randint', 'np.random.randint', (['(0)', 'NUMBER'], {'size': '(Number,)'}), '(0, NUMBER, size=(Number,))\n', (4920, 4947), True, 'import numpy as np\n'), ((6210, 6230), 'matplotlib.pyplot.subplots', 'plt.subplots', (['number'], {}), '(number)\n', (6222, 6230), True, 'import matplotlib.pyplot as plt\n'), ((6301, 6311), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6309, 6311), True, 'import matplotlib.pyplot as plt\n'), ((6395, 6415), 'matplotlib.pyplot.subplots', 'plt.subplots', (['number'], {}), '(number)\n', (6407, 6415), True, 'import matplotlib.pyplot as plt\n'), ((6500, 6510), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6508, 6510), True, 'import matplotlib.pyplot as plt\n'), ((6690, 6708), 'torchvision.utils.make_grid', 'make_grid', (['kernels'], {}), '(kernels)\n', (6699, 6708), False, 'from torchvision.utils import make_grid\n'), ((6750, 6760), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6758, 6760), True, 'import matplotlib.pyplot as plt\n'), ((757, 782), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (780, 782), False, 'import torch\n'), ((5265, 5293), 'torch.load', 'torch.load', (['(pathModels + nom)'], {}), '(pathModels + nom)\n', (5275, 5293), False, 'import torch\n'), ((1024, 1058), 'csv.reader', 'csv.reader', (['fichier'], {'delimiter': '""","""'}), "(fichier, delimiter=',')\n", (1034, 1058), False, 'import csv\n'), ((1755, 1776), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1774, 1776), False, 'from torchvision import transforms, utils\n'), ((2401, 2426), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(3)', '(1)', '(1)'], {}), '(3, 16, 3, 1, 1)\n', (2410, 2426), True, 'import torch.nn as nn\n'), ((2436, 2445), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2443, 2445), True, 'import torch.nn as nn\n'), ((2455, 2473), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(16)'], {}), '(16)\n', (2469, 2473), True, 'import torch.nn as nn\n'), ((2483, 2509), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(64)', '(3)', '(1)', '(1)'], {}), '(16, 64, 3, 1, 1)\n', (2492, 2509), True, 'import torch.nn as nn\n'), ((2519, 2528), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2526, 2528), True, 'import torch.nn as nn\n'), ((2538, 2556), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (2552, 2556), True, 'import torch.nn as nn\n'), ((2566, 2584), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (2578, 2584), True, 'import torch.nn as nn\n'), ((2595, 2622), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)', '(3)', '(1)', '(1)'], {}), '(64, 128, 3, 1, 1)\n', (2604, 2622), True, 'import torch.nn as nn\n'), ((2632, 2641), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2639, 2641), True, 'import torch.nn as nn\n'), ((2651, 2670), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (2665, 2670), True, 'import torch.nn as nn\n'), ((2680, 2708), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)', '(1)', '(1)'], {}), '(128, 128, 3, 1, 1)\n', (2689, 2708), True, 'import torch.nn as nn\n'), ((2718, 2727), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2725, 2727), True, 'import torch.nn as nn\n'), ((2737, 2756), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (2751, 2756), True, 'import torch.nn as nn\n'), ((2766, 2784), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (2778, 2784), True, 'import torch.nn as nn\n'), ((2795, 2823), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)', '(1)', '(1)'], {}), '(128, 128, 3, 1, 1)\n', (2804, 2823), True, 'import torch.nn as nn\n'), ((2833, 2842), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2840, 2842), True, 'import torch.nn as nn\n'), ((2852, 2871), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (2866, 2871), True, 'import torch.nn as nn\n'), ((2881, 2899), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (2893, 2899), True, 'import torch.nn as nn\n'), ((2910, 2938), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)', '(1)', '(1)'], {}), '(128, 128, 3, 1, 1)\n', (2919, 2938), True, 'import torch.nn as nn\n'), ((2948, 2957), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2955, 2957), True, 'import torch.nn as nn\n'), ((2967, 2986), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (2981, 2986), True, 'import torch.nn as nn\n'), ((2996, 3014), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (3008, 3014), True, 'import torch.nn as nn\n'), ((3025, 3053), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)', '(1)', '(1)'], {}), '(128, 128, 3, 1, 1)\n', (3034, 3053), True, 'import torch.nn as nn\n'), ((3063, 3072), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3070, 3072), True, 'import torch.nn as nn\n'), ((3082, 3101), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (3096, 3101), True, 'import torch.nn as nn\n'), ((3111, 3129), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (3123, 3129), True, 'import torch.nn as nn\n'), ((3140, 3168), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)', '(1)', '(1)'], {}), '(128, 128, 3, 1, 1)\n', (3149, 3168), True, 'import torch.nn as nn\n'), ((3178, 3187), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3185, 3187), True, 'import torch.nn as nn\n'), ((3197, 3216), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (3211, 3216), True, 'import torch.nn as nn\n'), ((3226, 3244), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (3238, 3244), True, 'import torch.nn as nn\n'), ((3255, 3283), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)', '(3)', '(1)', '(1)'], {}), '(128, 128, 3, 1, 1)\n', (3264, 3283), True, 'import torch.nn as nn\n'), ((3293, 3302), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3300, 3302), True, 'import torch.nn as nn\n'), ((3312, 3331), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (3326, 3331), True, 'import torch.nn as nn\n'), ((3341, 3359), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (3353, 3359), True, 'import torch.nn as nn\n'), ((3421, 3441), 'torch.nn.Linear', 'nn.Linear', (['(2048)', '(512)'], {}), '(2048, 512)\n', (3430, 3441), True, 'import torch.nn as nn\n'), ((3451, 3460), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3458, 3460), True, 'import torch.nn as nn\n'), ((3470, 3488), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(10)'], {}), '(512, 10)\n', (3479, 3488), True, 'import torch.nn as nn\n'), ((3498, 3507), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3505, 3507), True, 'import torch.nn as nn\n'), ((3517, 3533), 'torch.nn.Linear', 'nn.Linear', (['(10)', '(2)'], {}), '(10, 2)\n', (3526, 3533), True, 'import torch.nn as nn\n'), ((2071, 2092), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2090, 2092), False, 'from torchvision import transforms, utils\n'), ((1236, 1253), 'cv2.imread', 'cv2.imread', (['image'], {}), '(image)\n', (1246, 1253), False, 'import cv2\n')] |
import numpy as np
import tensorflow as tf
from tensorflow.feature_column import numeric_column as num
from tensorflow.estimator import RunConfig
from tensorflow.contrib.distribute import MirroredStrategy
def make_tfr_input_fn(filename_pattern, batch_size, board_size, options):
N_p = board_size + 2
feature_spec = {
'state': tf.FixedLenFeature([N_p * N_p * 2], tf.float32),
'advantage': tf.FixedLenFeature([N_p * N_p], tf.float32)
}
def _input_fn():
dataset = tf.data.experimental.make_batched_features_dataset(
file_pattern=filename_pattern,
batch_size=batch_size,
features=feature_spec,
shuffle_buffer_size=options['shuffle_buffer_size'],
prefetch_buffer_size=options['prefetch_buffer_size'],
reader_num_threads=options['reader_num_threads'],
parser_num_threads=options['parser_num_threads'],
label_key='advantage')
if options['distribute']:
return dataset
else:
return dataset.make_one_shot_iterator().get_next()
return _input_fn
def make_model_fn(board_size, options):
N = board_size
feature_columns = [num('state', shape=((N+2)*(N+2)*2))]
optimizers={
"sgd": tf.train.GradientDescentOptimizer(learning_rate=options['learning_rate']),
"adam": tf.train.AdamOptimizer(learning_rate=options['learning_rate']),
"adagrad": tf.train.AdagradOptimizer(learning_rate=options['learning_rate'])
}
def _model_fn(features, labels, mode):
mask = np.ones([22, 22], dtype=int)
mask[0] = 0
mask[21] = 0
mask[:,0]=0
mask[:,21]=0
mask = tf.constant(mask, dtype=tf.float32)
mask = tf.expand_dims(mask,-1)
from train.hypotheses import conv_2x1024_5, conv_1024_4, conv_512_3, conv_gomoku
hypotheses_dict = {
'conv_2x1024_5': conv_2x1024_5,
'conv_1024_4': conv_1024_4,
'conv_512_3': conv_512_3,
'conv_gomoku': conv_gomoku
}
choice=options['hypothesis']
hypothesis = hypotheses_dict[choice]
out = hypothesis(N, features, feature_columns, options)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode, predictions=out)
labels = tf.expand_dims(labels, -1)
labels = tf.reshape(labels, [-1, 22, 22, 1], name='model_reshape')
loss = tf.losses.mean_squared_error(labels, out*mask)
mean_error=tf.metrics.mean(tf.abs(labels-out))
if mode == tf.estimator.ModeKeys.EVAL:
return tf.estimator.EstimatorSpec(
mode=mode,
loss = loss,
eval_metric_ops={'mean_error': mean_error}
)
else:
optimizer = optimizers[options['optimizer']]
train_op = optimizer.minimize(loss, global_step=tf.train.get_or_create_global_step())
grads = optimizer.compute_gradients(loss)
for g in grads:
name = "%s-grad" % g[1].name
name = name.replace(":", "_")
tf.summary.histogram(name, g[0])
return tf.estimator.EstimatorSpec(
mode,
loss = loss,
train_op = train_op)
return _model_fn
def make_serving_input_fn(board_size):
N = board_size
def _serving_input_fn():
placeholders = {
'state': tf.placeholder(name='state', shape=[(N+2)*(N+2)*2, None], dtype=tf.float32)
}
return tf.estimator.export.ServingInputReceiver(placeholders, placeholders)
return _serving_input_fn
def train_and_evaluate(board_size, options):
train_input_fn = make_tfr_input_fn(options['train_data_pattern'],
options['train_batch_size'],
board_size, options)
eval_input_fn = make_tfr_input_fn(options['eval_data_pattern'],
options['eval_batch_size'],
board_size, options)
model_fn = make_model_fn(board_size, options)
serving_input_fn = make_serving_input_fn(board_size)
exporter = tf.estimator.LatestExporter('exporter', serving_input_fn)
train_spec = tf.estimator.TrainSpec(
input_fn=train_input_fn,
max_steps=options['max_train_steps'])
eval_spec = tf.estimator.EvalSpec(
input_fn=eval_input_fn, exporters=exporter,
steps = options['eval_steps'],
throttle_secs=options['throttle_secs'],
start_delay_secs=0)
strategy = MirroredStrategy() if options['distribute'] else None
config = RunConfig(model_dir=options['model_dir'],
save_summary_steps=options['save_summary_steps'],
train_distribute=strategy,
save_checkpoints_steps=options['save_checkpoints_steps'],
log_step_count_steps=options['log_step_count_steps'])
estimator = tf.estimator.Estimator(
config=config,
model_fn=model_fn)
##################################################################
# Finally, train and evaluate the model
##################################################################
final_eval = tf.estimator.train_and_evaluate(
estimator,
train_spec=train_spec,
eval_spec=eval_spec) | [
"tensorflow.estimator.export.ServingInputReceiver",
"tensorflow.reshape",
"numpy.ones",
"tensorflow.estimator.TrainSpec",
"tensorflow.estimator.Estimator",
"tensorflow.abs",
"tensorflow.data.experimental.make_batched_features_dataset",
"tensorflow.train.get_or_create_global_step",
"tensorflow.placeh... | [((4304, 4361), 'tensorflow.estimator.LatestExporter', 'tf.estimator.LatestExporter', (['"""exporter"""', 'serving_input_fn'], {}), "('exporter', serving_input_fn)\n", (4331, 4361), True, 'import tensorflow as tf\n'), ((4380, 4470), 'tensorflow.estimator.TrainSpec', 'tf.estimator.TrainSpec', ([], {'input_fn': 'train_input_fn', 'max_steps': "options['max_train_steps']"}), "(input_fn=train_input_fn, max_steps=options[\n 'max_train_steps'])\n", (4402, 4470), True, 'import tensorflow as tf\n'), ((4501, 4664), 'tensorflow.estimator.EvalSpec', 'tf.estimator.EvalSpec', ([], {'input_fn': 'eval_input_fn', 'exporters': 'exporter', 'steps': "options['eval_steps']", 'throttle_secs': "options['throttle_secs']", 'start_delay_secs': '(0)'}), "(input_fn=eval_input_fn, exporters=exporter, steps=\n options['eval_steps'], throttle_secs=options['throttle_secs'],\n start_delay_secs=0)\n", (4522, 4664), True, 'import tensorflow as tf\n'), ((4774, 5017), 'tensorflow.estimator.RunConfig', 'RunConfig', ([], {'model_dir': "options['model_dir']", 'save_summary_steps': "options['save_summary_steps']", 'train_distribute': 'strategy', 'save_checkpoints_steps': "options['save_checkpoints_steps']", 'log_step_count_steps': "options['log_step_count_steps']"}), "(model_dir=options['model_dir'], save_summary_steps=options[\n 'save_summary_steps'], train_distribute=strategy,\n save_checkpoints_steps=options['save_checkpoints_steps'],\n log_step_count_steps=options['log_step_count_steps'])\n", (4783, 5017), False, 'from tensorflow.estimator import RunConfig\n'), ((5115, 5171), 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', ([], {'config': 'config', 'model_fn': 'model_fn'}), '(config=config, model_fn=model_fn)\n', (5137, 5171), True, 'import tensorflow as tf\n'), ((5408, 5499), 'tensorflow.estimator.train_and_evaluate', 'tf.estimator.train_and_evaluate', (['estimator'], {'train_spec': 'train_spec', 'eval_spec': 'eval_spec'}), '(estimator, train_spec=train_spec, eval_spec\n =eval_spec)\n', (5439, 5499), True, 'import tensorflow as tf\n'), ((351, 398), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[N_p * N_p * 2]', 'tf.float32'], {}), '([N_p * N_p * 2], tf.float32)\n', (369, 398), True, 'import tensorflow as tf\n'), ((421, 464), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['[N_p * N_p]', 'tf.float32'], {}), '([N_p * N_p], tf.float32)\n', (439, 464), True, 'import tensorflow as tf\n'), ((511, 889), 'tensorflow.data.experimental.make_batched_features_dataset', 'tf.data.experimental.make_batched_features_dataset', ([], {'file_pattern': 'filename_pattern', 'batch_size': 'batch_size', 'features': 'feature_spec', 'shuffle_buffer_size': "options['shuffle_buffer_size']", 'prefetch_buffer_size': "options['prefetch_buffer_size']", 'reader_num_threads': "options['reader_num_threads']", 'parser_num_threads': "options['parser_num_threads']", 'label_key': '"""advantage"""'}), "(file_pattern=\n filename_pattern, batch_size=batch_size, features=feature_spec,\n shuffle_buffer_size=options['shuffle_buffer_size'],\n prefetch_buffer_size=options['prefetch_buffer_size'],\n reader_num_threads=options['reader_num_threads'], parser_num_threads=\n options['parser_num_threads'], label_key='advantage')\n", (561, 889), True, 'import tensorflow as tf\n'), ((1217, 1258), 'tensorflow.feature_column.numeric_column', 'num', (['"""state"""'], {'shape': '((N + 2) * (N + 2) * 2)'}), "('state', shape=(N + 2) * (N + 2) * 2)\n", (1220, 1258), True, 'from tensorflow.feature_column import numeric_column as num\n'), ((1287, 1360), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': "options['learning_rate']"}), "(learning_rate=options['learning_rate'])\n", (1320, 1360), True, 'import tensorflow as tf\n'), ((1378, 1440), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': "options['learning_rate']"}), "(learning_rate=options['learning_rate'])\n", (1400, 1440), True, 'import tensorflow as tf\n'), ((1461, 1526), 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', ([], {'learning_rate': "options['learning_rate']"}), "(learning_rate=options['learning_rate'])\n", (1486, 1526), True, 'import tensorflow as tf\n'), ((1597, 1625), 'numpy.ones', 'np.ones', (['[22, 22]'], {'dtype': 'int'}), '([22, 22], dtype=int)\n', (1604, 1625), True, 'import numpy as np\n'), ((1723, 1758), 'tensorflow.constant', 'tf.constant', (['mask'], {'dtype': 'tf.float32'}), '(mask, dtype=tf.float32)\n', (1734, 1758), True, 'import tensorflow as tf\n'), ((1774, 1798), 'tensorflow.expand_dims', 'tf.expand_dims', (['mask', '(-1)'], {}), '(mask, -1)\n', (1788, 1798), True, 'import tensorflow as tf\n'), ((2394, 2420), 'tensorflow.expand_dims', 'tf.expand_dims', (['labels', '(-1)'], {}), '(labels, -1)\n', (2408, 2420), True, 'import tensorflow as tf\n'), ((2438, 2495), 'tensorflow.reshape', 'tf.reshape', (['labels', '[-1, 22, 22, 1]'], {'name': '"""model_reshape"""'}), "(labels, [-1, 22, 22, 1], name='model_reshape')\n", (2448, 2495), True, 'import tensorflow as tf\n'), ((2511, 2559), 'tensorflow.losses.mean_squared_error', 'tf.losses.mean_squared_error', (['labels', '(out * mask)'], {}), '(labels, out * mask)\n', (2539, 2559), True, 'import tensorflow as tf\n'), ((3650, 3718), 'tensorflow.estimator.export.ServingInputReceiver', 'tf.estimator.export.ServingInputReceiver', (['placeholders', 'placeholders'], {}), '(placeholders, placeholders)\n', (3690, 3718), True, 'import tensorflow as tf\n'), ((4707, 4725), 'tensorflow.contrib.distribute.MirroredStrategy', 'MirroredStrategy', ([], {}), '()\n', (4723, 4725), False, 'from tensorflow.contrib.distribute import MirroredStrategy\n'), ((2326, 2375), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', (['mode'], {'predictions': 'out'}), '(mode, predictions=out)\n', (2352, 2375), True, 'import tensorflow as tf\n'), ((2593, 2613), 'tensorflow.abs', 'tf.abs', (['(labels - out)'], {}), '(labels - out)\n', (2599, 2613), True, 'import tensorflow as tf\n'), ((2684, 2781), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'loss', 'eval_metric_ops': "{'mean_error': mean_error}"}), "(mode=mode, loss=loss, eval_metric_ops={\n 'mean_error': mean_error})\n", (2710, 2781), True, 'import tensorflow as tf\n'), ((3266, 3328), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', (['mode'], {'loss': 'loss', 'train_op': 'train_op'}), '(mode, loss=loss, train_op=train_op)\n', (3292, 3328), True, 'import tensorflow as tf\n'), ((3549, 3637), 'tensorflow.placeholder', 'tf.placeholder', ([], {'name': '"""state"""', 'shape': '[(N + 2) * (N + 2) * 2, None]', 'dtype': 'tf.float32'}), "(name='state', shape=[(N + 2) * (N + 2) * 2, None], dtype=tf.\n float32)\n", (3563, 3637), True, 'import tensorflow as tf\n'), ((3201, 3233), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['name', 'g[0]'], {}), '(name, g[0])\n', (3221, 3233), True, 'import tensorflow as tf\n'), ((2973, 3009), 'tensorflow.train.get_or_create_global_step', 'tf.train.get_or_create_global_step', ([], {}), '()\n', (3007, 3009), True, 'import tensorflow as tf\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.