code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: a-poor
Sample Genetic Algorithm
"""
import numpy as np
from GenAlgLib import GeneticAlgorithm
# Defining custom fitness function
def fitness(genome):
total = 1
for n in genome:
total += np.sqrt(np.e**n)
return total
# Create instance of GA class
ga = GeneticAlgorithm(10, 30, 500, x_rate=0.9, mutation_rate=0.005, multithread=True, random_seed=0)
# Re-define object's fitness function to the one I created
ga.fitness = fitness
# Run the GA for 500 generations
ga.run(500, print_step=5, logfile='logtest.csv', stop_value=2500, stop_measure='max')
| [
"GenAlgLib.GeneticAlgorithm",
"numpy.sqrt"
] | [((336, 436), 'GenAlgLib.GeneticAlgorithm', 'GeneticAlgorithm', (['(10)', '(30)', '(500)'], {'x_rate': '(0.9)', 'mutation_rate': '(0.005)', 'multithread': '(True)', 'random_seed': '(0)'}), '(10, 30, 500, x_rate=0.9, mutation_rate=0.005, multithread=\n True, random_seed=0)\n', (352, 436), False, 'from GenAlgLib import GeneticAlgorithm\n'), ((266, 284), 'numpy.sqrt', 'np.sqrt', (['(np.e ** n)'], {}), '(np.e ** n)\n', (273, 284), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import unittest
import numpy as np
from arpym_template.estimation.flexible_probabilities import FlexibleProbabilities
from arpym_template.toolbox.min_rel_entropy import min_rel_entropy
class TestFP(unittest.TestCase):
def setUp(self):
pass
def test_mean_cov(self):
data = np.random.randn(100, 2)
fp = FlexibleProbabilities(data)
err_mean = np.linalg.norm(fp.mean() - np.mean(data, axis=0))
err_cov = np.linalg.norm(fp.cov() - np.cov(data.T)*0.99)
self.assertAlmostEqual(err_mean, 0)
self.assertAlmostEqual(err_cov, 0)
def test_mre(self):
fp_pri = FlexibleProbabilities(np.ones(4))
a_eq = np.array([[1., 1., 1., 1.], [0., 1., 1., 1.]])
b_eq = np.array([[1], [0.6]])
fp_pos = min_rel_entropy(fp_pri, None, None, a_eq, b_eq)[0]
err = np.linalg.norm(fp_pos.p -
np.array([[0.4000006, 0.2000002,
0.2000002, 0.2000002]]))
self.assertAlmostEqual(err, 0)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.random.randn",
"numpy.ones",
"arpym_template.toolbox.min_rel_entropy.min_rel_entropy",
"numpy.mean",
"arpym_template.estimation.flexible_probabilities.FlexibleProbabilities",
"numpy.array",
"numpy.cov"
] | [((1107, 1122), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1120, 1122), False, 'import unittest\n'), ((325, 348), 'numpy.random.randn', 'np.random.randn', (['(100)', '(2)'], {}), '(100, 2)\n', (340, 348), True, 'import numpy as np\n'), ((362, 389), 'arpym_template.estimation.flexible_probabilities.FlexibleProbabilities', 'FlexibleProbabilities', (['data'], {}), '(data)\n', (383, 389), False, 'from arpym_template.estimation.flexible_probabilities import FlexibleProbabilities\n'), ((706, 760), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0]])\n', (714, 760), True, 'import numpy as np\n'), ((774, 796), 'numpy.array', 'np.array', (['[[1], [0.6]]'], {}), '([[1], [0.6]])\n', (782, 796), True, 'import numpy as np\n'), ((678, 688), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (685, 688), True, 'import numpy as np\n'), ((815, 862), 'arpym_template.toolbox.min_rel_entropy.min_rel_entropy', 'min_rel_entropy', (['fp_pri', 'None', 'None', 'a_eq', 'b_eq'], {}), '(fp_pri, None, None, a_eq, b_eq)\n', (830, 862), False, 'from arpym_template.toolbox.min_rel_entropy import min_rel_entropy\n'), ((437, 458), 'numpy.mean', 'np.mean', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (444, 458), True, 'import numpy as np\n'), ((936, 992), 'numpy.array', 'np.array', (['[[0.4000006, 0.2000002, 0.2000002, 0.2000002]]'], {}), '([[0.4000006, 0.2000002, 0.2000002, 0.2000002]])\n', (944, 992), True, 'import numpy as np\n'), ((504, 518), 'numpy.cov', 'np.cov', (['data.T'], {}), '(data.T)\n', (510, 518), True, 'import numpy as np\n')] |
import os
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, sampler
import matplotlib.pyplot as plt
from torchvision import transforms as T
import argparse
from tqdm import tqdm
import cv2
from self_sup_data.mvtec import SelfSupMVTecDataset, CLASS_NAMES, TEXTURES, OBJECTS
from model.resnet import resnet18_enc_dec
from experiments.training_utils import train_and_save_model
SETTINGS = {
### ------------------------------------------------ NSA ------------------------------------------------ ###
'Shift' : {
'fname': 'shift.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.NORMAL_CLONE, 'label_mode':'binary'}
},
'Shift-923874273' : {
'fname': 'shift_923874273.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 923874273,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.NORMAL_CLONE, 'label_mode':'binary'}
},
'Shift-2388222932' : {
'fname': 'shift_2388222932.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 2388222932,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.NORMAL_CLONE, 'label_mode':'binary'}
},
'Shift-676346783' : {
'fname': 'shift_676346783.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 676346783,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.NORMAL_CLONE, 'label_mode':'binary'}
},
'Shift-123425' : {
'fname': 'shift_123425.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 123425,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.NORMAL_CLONE, 'label_mode':'binary'}
},
'Shift-Intensity' : {
'fname': 'shift_intensity.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.NORMAL_CLONE, 'label_mode':'logistic-intensity'}
},
'Shift-Intensity-923874273' : {
'fname': 'shift_intensity_923874273.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 923874273,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.NORMAL_CLONE, 'label_mode':'logistic-intensity'}
},
'Shift-Intensity-2388222932' : {
'fname': 'shift_intensity_2388222932.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 2388222932,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.NORMAL_CLONE, 'label_mode':'logistic-intensity'}
},
'Shift-Intensity-676346783' : {
'fname': 'shift_intensity_676346783.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 676346783,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.NORMAL_CLONE, 'label_mode':'logistic-intensity'}
},
'Shift-Intensity-123425' : {
'fname': 'shift_intensity_123425.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 123425,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.NORMAL_CLONE, 'label_mode':'logistic-intensity'}
},
'Shift-Raw-Intensity' : {
'fname': 'shift_raw_intensity.pt',
'out_dir': 'shift/',
'loss': nn.MSELoss,
'skip_background': True,
'final_activation': 'relu',
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.NORMAL_CLONE, 'label_mode':'intensity'}
},
'Shift-Raw-Intensity-923874273' : {
'fname': 'shift_raw_intensity_923874273.pt',
'out_dir': 'shift/',
'loss': nn.MSELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 923874273,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.NORMAL_CLONE, 'label_mode':'intensity'}
},
'Shift-Raw-Intensity-2388222932' : {
'fname': 'shift_raw_intensity_2388222932.pt',
'out_dir': 'shift/',
'loss': nn.MSELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 2388222932,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.NORMAL_CLONE, 'label_mode':'intensity'}
},
'Shift-Raw-Intensity-676346783' : {
'fname': 'shift_raw_intensity_676346783.pt',
'out_dir': 'shift/',
'loss': nn.MSELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 676346783,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.NORMAL_CLONE, 'label_mode':'intensity'}
},
'Shift-Raw-Intensity-123425' : {
'fname': 'shift_raw_intensity_123425.pt',
'out_dir': 'shift/',
'loss': nn.MSELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 123425,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.NORMAL_CLONE, 'label_mode':'intensity'}
},
'Shift-M' : {
'fname': 'shift_m.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.MIXED_CLONE, 'label_mode':'binary'}
},
'Shift-M-923874273' : {
'fname': 'shift_m_923874273.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 923874273,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.MIXED_CLONE, 'label_mode':'binary'}
},
'Shift-M-2388222932' : {
'fname': 'shift_m_2388222932.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 2388222932,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.MIXED_CLONE, 'label_mode':'binary'}
},
'Shift-M-676346783' : {
'fname': 'shift_m_676346783.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 676346783,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.MIXED_CLONE, 'label_mode':'binary'}
},
'Shift-M-123425' : {
'fname': 'shift_m_123425.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 123425,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.MIXED_CLONE, 'label_mode':'binary'}
},
'Shift-Intensity-M' : {
'fname': 'shift_intensity_m.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.MIXED_CLONE, 'label_mode':'logistic-intensity'}
},
'Shift-Intensity-M-923874273' : {
'fname': 'shift_intensity_m_923874273.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 923874273,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.MIXED_CLONE, 'label_mode':'logistic-intensity'}
},
'Shift-Intensity-M-2388222932' : {
'fname': 'shift_intensity_m_2388222932.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 2388222932,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.MIXED_CLONE, 'label_mode':'logistic-intensity'}
},
'Shift-Intensity-M-676346783' : {
'fname': 'shift_intensity_m_676346783.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 676346783,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.MIXED_CLONE, 'label_mode':'logistic-intensity'}
},
'Shift-Intensity-M-123425' : {
'fname': 'shift_intensity_m_123425.pt',
'out_dir': 'shift/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 123425,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.MIXED_CLONE, 'label_mode':'logistic-intensity'}
},
'Shift-Raw-Intensity-M' : {
'fname': 'shift_raw_intensity_m.pt',
'out_dir': 'shift/',
'loss': nn.MSELoss,
'skip_background': True,
'final_activation': 'relu',
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.MIXED_CLONE, 'label_mode':'intensity'}
},
'Shift-Raw-Intensity-M-923874273' : {
'fname': 'shift_raw_intensity_m_923874273.pt',
'out_dir': 'shift/',
'loss': nn.MSELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 923874273,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.MIXED_CLONE, 'label_mode':'intensity'}
},
'Shift-Raw-Intensity-M-2388222932' : {
'fname': 'shift_raw_intensity_m_2388222932.pt',
'out_dir': 'shift/',
'loss': nn.MSELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 2388222932,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.MIXED_CLONE, 'label_mode':'intensity'}
},
'Shift-Raw-Intensity-M-676346783' : {
'fname': 'shift_raw_intensity_m_676346783.pt',
'out_dir': 'shift/',
'loss': nn.MSELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 676346783,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.MIXED_CLONE, 'label_mode':'intensity'}
},
'Shift-Raw-Intensity-M-123425' : {
'fname': 'shift_raw_intensity_m_123425.pt',
'out_dir': 'shift/',
'loss': nn.MSELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'seed': 123425,
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':True,
'shift':True, 'same':False, 'mode':cv2.MIXED_CLONE, 'label_mode':'intensity'}
},
### ------------------------------------ Foreign patch poisson blending / interpolation ------------------------------------ ###
'FPI-Poisson' : {
'fname': 'fpi_poisson.pt',
'out_dir': 'fpi/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':False,
'shift':False, 'same':False, 'mode':cv2.MIXED_CLONE, 'label_mode':'continuous'}
},
'FPI' : {
'fname': 'fpi.pt',
'out_dir': 'fpi/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':False,
'shift':False, 'same':False, 'mode':'uniform', 'label_mode':'continuous'}
},
### ------------------------------------ Shifted patch pasting ------------------------------------ ###
'CutPaste' : {
'fname': 'cut_paste.pt',
'out_dir': 'cut_paste/',
'loss': nn.BCELoss,
'skip_background': True,
'final_activation': 'sigmoid',
'self_sup_args' : {'gamma_params':(2, 0.05, 0.03), 'resize':False,
'shift':True, 'same':True, 'mode':'swap', 'label_mode':'binary'}
},
}
# note: these are half-widths in [0, 0.5]
# ((h_min, h_max), (w_min, w_max))
WIDTH_BOUNDS_PCT = {'bottle':((0.03, 0.4), (0.03, 0.4)), 'cable':((0.05, 0.4), (0.05, 0.4)), 'capsule':((0.03, 0.15), (0.03, 0.4)),
'hazelnut':((0.03, 0.35), (0.03, 0.35)), 'metal_nut':((0.03, 0.4), (0.03, 0.4)), 'pill':((0.03, 0.2), (0.03, 0.4)),
'screw':((0.03, 0.12), (0.03, 0.12)), 'toothbrush':((0.03, 0.4), (0.03, 0.2)), 'transistor':((0.03, 0.4), (0.03, 0.4)),
'zipper':((0.03, 0.4), (0.03, 0.2)),
'carpet':((0.03, 0.4), (0.03, 0.4)), 'grid':((0.03, 0.4), (0.03, 0.4)),
'leather':((0.03, 0.4), (0.03, 0.4)), 'tile':((0.03, 0.4), (0.03, 0.4)), 'wood':((0.03, 0.4), (0.03, 0.4))}
MIN_OVERLAP_PCT = {'bottle': 0.25, 'capsule':0.25,
'hazelnut':0.25, 'metal_nut':0.25, 'pill':0.25,
'screw':0.25, 'toothbrush':0.25,
'zipper':0.25}
MIN_OBJECT_PCT = {'bottle': 0.7, 'capsule':0.7,
'hazelnut':0.7, 'metal_nut':0.5, 'pill':0.7,
'screw':.5, 'toothbrush':0.25,
'zipper':0.7}
NUM_PATCHES = {'bottle':3, 'cable':3, 'capsule':3, 'hazelnut':3, 'metal_nut':3,
'pill':3, 'screw':4, 'toothbrush':3, 'transistor':3, 'zipper':4,
'carpet':4, 'grid':4, 'leather':4, 'tile':4, 'wood':4}
# k, x0 pairs
INTENSITY_LOGISTIC_PARAMS = {'bottle':(1/12, 24), 'cable':(1/12, 24), 'capsule':(1/2, 4), 'hazelnut':(1/12, 24), 'metal_nut':(1/3, 7),
'pill':(1/3, 7), 'screw':(1, 3), 'toothbrush':(1/6, 15), 'transistor':(1/6, 15), 'zipper':(1/6, 15),
'carpet':(1/3, 7), 'grid':(1/3, 7), 'leather':(1/3, 7), 'tile':(1/3, 7), 'wood':(1/6, 15)}
# bottle is aligned but it's symmetric under rotation
UNALIGNED_OBJECTS = ['bottle', 'hazelnut', 'metal_nut', 'screw']
# non-aligned objects get extra time
EPOCHS = {'bottle':320, 'cable':320, 'capsule':320, 'hazelnut':560, 'metal_nut':560,
'pill':320, 'screw':560, 'toothbrush':320, 'transistor':320, 'zipper':320,
'carpet':320, 'grid':320, 'leather':320, 'tile':320, 'wood':320}
# brightness, threshold pairs
BACKGROUND = {'bottle':(200, 60), 'screw':(200, 60), 'capsule':(200, 60), 'zipper':(200, 60),
'hazelnut':(20, 20), 'pill':(20, 20), 'toothbrush':(20, 20), 'metal_nut':(20, 20)}
def set_seed(seed_value):
"""Set seed for reproducibility.
"""
np.random.seed(seed_value)
torch.manual_seed(seed_value)
torch.cuda.manual_seed_all(seed_value)
def train(class_name, data_dir, out_dir, setting, device, pool, preact,
min_lr = 1e-6, max_lr = 1e-3, batch_size = 64, seed = 1982342):
set_seed(setting.get('seed', seed))
num_epochs = EPOCHS.get(class_name)
if setting.get('batch_size'):
batch_size = setting.get('batch_size')
# load data
if class_name in UNALIGNED_OBJECTS:
train_transform = T.Compose([
T.RandomRotation(5),
T.CenterCrop(230),
T.RandomCrop(224)])
res = 256
elif class_name in OBJECTS:
# no rotation for aligned objects
train_transform = T.Compose([
T.CenterCrop(230),
T.RandomCrop(224)])
res = 256
else: # texture
train_transform = T.Compose([
T.RandomVerticalFlip(),
T.RandomCrop(256)])
res = 264
train_dat = SelfSupMVTecDataset(root_path=data_dir, class_name=class_name, is_train=True,
low_res=res, download=False, transform=train_transform)
train_dat.configure_self_sup(self_sup_args=setting.get('self_sup_args'))
if setting.get('skip_background', False):
train_dat.configure_self_sup(self_sup_args={'skip_background': BACKGROUND.get(class_name)})
if class_name in TEXTURES:
train_dat.configure_self_sup(self_sup_args={'resize_bounds': (.5, 2)})
train_dat.configure_self_sup(on=True, self_sup_args={'width_bounds_pct': WIDTH_BOUNDS_PCT.get(class_name),
'intensity_logistic_params': INTENSITY_LOGISTIC_PARAMS.get(class_name),
'num_patches': NUM_PATCHES.get(class_name),
'min_object_pct': MIN_OBJECT_PCT.get(class_name),
'min_overlap_pct': MIN_OVERLAP_PCT.get(class_name)})
loader_train = DataLoader(train_dat, batch_size, shuffle=True, num_workers=os.cpu_count(),
worker_init_fn=lambda _: np.random.seed(torch.utils.data.get_worker_info().seed % 2**32))
model = resnet18_enc_dec(num_classes=1, pool=pool, preact=preact,
final_activation=setting.get('final_activation')).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=max_lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, num_epochs, eta_min=min_lr)
loss_func = setting.get('loss')()
out_dir = os.path.join(out_dir, setting.get('out_dir'), class_name)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
train_and_save_model(model, optimizer, loss_func, loader_train, class_name + '_'+ setting.get('fname'), out_dir,
num_epochs=num_epochs, save_freq=80, device=device, scheduler=scheduler, save_intermediate_model=False)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--out_dir", required=True, type=str)
parser.add_argument("-d", "--data_dir", required=True, type=str)
parser.add_argument("-s", "--setting", required=True, type=str)
parser.add_argument("-c", "--categories", required=False, type=str, default='all')
parser.add_argument("-n", "--class_name", required=False, type=str, default=None)
parser.add_argument("--no_pool", required=False, action='store_true')
parser.add_argument("--preact", required=False, action='store_true')
args = parser.parse_args()
out_dir = args.out_dir
if not os.path.exists(out_dir):
os.makedirs(out_dir)
data_dir = args.data_dir
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f'Using {device}')
setting = SETTINGS.get(args.setting)
if args.class_name is not None:
categories = [args.class_name]
elif args.categories == 'texture':
categories = TEXTURES
elif args.categories == 'object':
categories = OBJECTS
else:
categories = CLASS_NAMES
for class_name in tqdm(categories):
train(class_name, data_dir, out_dir, setting, device, not args.no_pool, args.preact)
| [
"tqdm.tqdm",
"torch.utils.data.get_worker_info",
"numpy.random.seed",
"argparse.ArgumentParser",
"os.makedirs",
"torch.manual_seed",
"torchvision.transforms.RandomRotation",
"torchvision.transforms.RandomVerticalFlip",
"os.path.exists",
"self_sup_data.mvtec.SelfSupMVTecDataset",
"torch.optim.lr_... | [((16895, 16921), 'numpy.random.seed', 'np.random.seed', (['seed_value'], {}), '(seed_value)\n', (16909, 16921), True, 'import numpy as np\n'), ((16926, 16955), 'torch.manual_seed', 'torch.manual_seed', (['seed_value'], {}), '(seed_value)\n', (16943, 16955), False, 'import torch\n'), ((16960, 16998), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed_value'], {}), '(seed_value)\n', (16986, 16998), False, 'import torch\n'), ((17900, 18038), 'self_sup_data.mvtec.SelfSupMVTecDataset', 'SelfSupMVTecDataset', ([], {'root_path': 'data_dir', 'class_name': 'class_name', 'is_train': '(True)', 'low_res': 'res', 'download': '(False)', 'transform': 'train_transform'}), '(root_path=data_dir, class_name=class_name, is_train=\n True, low_res=res, download=False, transform=train_transform)\n', (17919, 18038), False, 'from self_sup_data.mvtec import SelfSupMVTecDataset, CLASS_NAMES, TEXTURES, OBJECTS\n'), ((19426, 19512), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'torch.optim.lr_scheduler.CosineAnnealingLR', (['optimizer', 'num_epochs'], {'eta_min': 'min_lr'}), '(optimizer, num_epochs, eta_min=\n min_lr)\n', (19468, 19512), False, 'import torch\n'), ((19974, 19999), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (19997, 19999), False, 'import argparse\n'), ((21104, 21120), 'tqdm.tqdm', 'tqdm', (['categories'], {}), '(categories)\n', (21108, 21120), False, 'from tqdm import tqdm\n'), ((19630, 19653), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (19644, 19653), False, 'import os\n'), ((19663, 19683), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (19674, 19683), False, 'import os\n'), ((20595, 20618), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (20609, 20618), False, 'import os\n'), ((20628, 20648), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (20639, 20648), False, 'import os\n'), ((19047, 19061), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (19059, 19061), False, 'import os\n'), ((20717, 20742), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (20740, 20742), False, 'import torch\n'), ((17417, 17436), 'torchvision.transforms.RandomRotation', 'T.RandomRotation', (['(5)'], {}), '(5)\n', (17433, 17436), True, 'from torchvision import transforms as T\n'), ((17454, 17471), 'torchvision.transforms.CenterCrop', 'T.CenterCrop', (['(230)'], {}), '(230)\n', (17466, 17471), True, 'from torchvision import transforms as T\n'), ((17490, 17507), 'torchvision.transforms.RandomCrop', 'T.RandomCrop', (['(224)'], {}), '(224)\n', (17502, 17507), True, 'from torchvision import transforms as T\n'), ((17656, 17673), 'torchvision.transforms.CenterCrop', 'T.CenterCrop', (['(230)'], {}), '(230)\n', (17668, 17673), True, 'from torchvision import transforms as T\n'), ((17692, 17709), 'torchvision.transforms.RandomCrop', 'T.RandomCrop', (['(224)'], {}), '(224)\n', (17704, 17709), True, 'from torchvision import transforms as T\n'), ((17805, 17827), 'torchvision.transforms.RandomVerticalFlip', 'T.RandomVerticalFlip', ([], {}), '()\n', (17825, 17827), True, 'from torchvision import transforms as T\n'), ((17846, 17863), 'torchvision.transforms.RandomCrop', 'T.RandomCrop', (['(256)'], {}), '(256)\n', (17858, 17863), True, 'from torchvision import transforms as T\n'), ((19133, 19167), 'torch.utils.data.get_worker_info', 'torch.utils.data.get_worker_info', ([], {}), '()\n', (19165, 19167), False, 'import torch\n')] |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import time
import numpy as np
import os
import defenses
import data_utils as data
import cvxpy as cvx
import tensorflow as tf
import random
def poison_with_influence_proj_gradient_step(model, general_train_idx,
sensitive_file, attack_method, advantaged, test_idx, indices_to_poison,
projection_fn,
step_size=0.01,
shrink_towards='cluster_center',
loss_type='normal_loss',
force_refresh=True,
test_description=None,
output_root=None):
"""
Returns poisoned_X_train, a subset of model.train_dataset (marked by indices_to_poison)
that has been modified by a attack iteration step.
"""
train_dataset = model.train_dataset
validation_dataset = model.validation_dataset
test_dataset = model.test_dataset
if test_description is None:
test_description = test_idx
grad_filename = os.path.join(output_root, 'grad_influence_wrt_input_val_%s_testidx_%s.npy' % (
model.model_name, test_description))
if (force_refresh == False) and (os.path.exists(grad_filename)):
grad_influence_wrt_input_val = np.load(grad_filename)
else:
grad_influence_wrt_input_val = model.get_grad_of_influence_wrt_input(
indices_to_poison,
test_idx,
verbose=False,
force_refresh=force_refresh,
test_description=test_description,
loss_type=loss_type)
poisoned_X_train = train_dataset.x[indices_to_poison, :]
poisoned_X_train -= step_size * grad_influence_wrt_input_val
poisoned_labels = train_dataset.labels[indices_to_poison]
weights = model.sess.run(model.weights)
if(attack_method == "RAA"):
DATA_FOLDER = './data'
dataset_path = os.path.join(DATA_FOLDER)
f = np.load(os.path.join(dataset_path, sensitive_file))
group_label = f['group_label']
male_train_index = np.where(group_label[0:general_train_idx] == 0)[
0].astype(np.int32)
female_train_index = np.where(group_label[0:general_train_idx] == 1)[
0].astype(np.int32)
male_test_index = np.where(group_label[general_train_idx:] == 0)[
0].astype(np.int32)
female_test_index = np.where(group_label[general_train_idx:] == 1)[
0].astype(np.int32)
gender_labels = np.zeros(train_dataset.labels.shape[0])
for k in range(general_train_idx):
if(k in male_train_index):
gender_labels[k] = 1
elif(k in female_train_index):
gender_labels[k] = -1
if(advantaged == -1):
op_indx = np.where((train_dataset.labels == -1)
& (gender_labels == -1))[0]
else:
op_indx = np.where((train_dataset.labels == -1)
& (gender_labels == 1))[0]
rand1 = random.randint(0, op_indx.shape[0] - 1)
print("Hello\n" * 3)
print(rand1)
poisoned_X_train[0] = train_dataset.x[op_indx[rand1], :]
if(advantaged == -1):
op_indx = np.where((train_dataset.labels == 1)
& (gender_labels == 1))[0]
else:
op_indx = np.where((train_dataset.labels == 1)
& (gender_labels == -1))[0]
rand2 = random.randint(0, op_indx.shape[0] - 1)
poisoned_X_train[1] = train_dataset.x[op_indx[rand2], :]
elif(attack_method == "NRAA"):
DATA_FOLDER = './data'
dataset_path = os.path.join(DATA_FOLDER)
f = np.load(os.path.join(dataset_path, sensitive_file))
group_label = f['group_label']
male_train_index = np.where(group_label[0:general_train_idx] == 0)[
0].astype(np.int32)
female_train_index = np.where(group_label[0:general_train_idx] == 1)[
0].astype(np.int32)
male_test_index = np.where(group_label[general_train_idx:] == 0)[
0].astype(np.int32)
female_test_index = np.where(group_label[general_train_idx:] == 1)[
0].astype(np.int32)
gender_labels = np.zeros(train_dataset.labels.shape[0])
for k in range(general_train_idx):
if(k in male_train_index):
gender_labels[k] = 1
elif(k in female_train_index):
gender_labels[k] = -1
if(advantaged == -1):
op_indx = np.where((train_dataset.labels == -1)
& (gender_labels == -1))[0]
else:
op_indx = np.where((train_dataset.labels == -1)
& (gender_labels == 1))[0]
maxdist = 0
maxpoint = 0
for points in range(op_indx.shape[0]):
temp = 0
for p in range(op_indx.shape[0]):
if(np.allclose(train_dataset.x[op_indx[points], :], train_dataset.x[op_indx[p], :], rtol=0, atol=1)):
temp = temp + 1
if(temp > maxdist):
maxdist = temp
maxpoint = points
poisoned_X_train[0] = train_dataset.x[op_indx[maxpoint], :]
if(advantaged == -1):
op_indx = np.where((train_dataset.labels == 1)
& (gender_labels == 1))[0]
else:
op_indx = np.where((train_dataset.labels == 1)
& (gender_labels == -1))[0]
maxdist = 0
maxpoint = 0
for points in range(op_indx.shape[0]):
temp = 0
for p in range(op_indx.shape[0]):
if(np.allclose(train_dataset.x[op_indx[points], :], train_dataset.x[op_indx[p], :], rtol=0, atol=3)):
temp = temp + 1
if(temp > maxdist):
maxdist = temp
maxpoint = points
poisoned_X_train[1] = train_dataset.x[op_indx[maxpoint], :]
print('weights shape is ', weights.shape)
poisoned_X_train = projection_fn(
poisoned_X_train,
poisoned_labels,
theta=weights[:-1],
bias=weights[-1])
return poisoned_X_train
def iterative_attack(
model,
general_train_idx,
sensitive_file,
attack_method,
advantaged,
indices_to_poison,
test_idx,
test_description=None,
step_size=0.01,
num_iter=10,
loss_type='normal_loss',
projection_fn=None,
output_root=None,
num_copies=None,
stop_after=3,
start_time=None,
display_iter_time=False,
stopping_method='Accuracy'):
"""
Performs the main specified adversarial attack
:param model: Model to attack
:general_train_idx: Index of last element in the training data
:sensitive_file: File specifiying the sensitive group labels (dataset_group_label)
:attack_method: Used attack method
:advantaged: Index of advantaged group (1 or -1)
:indices_to_poison: Indeces to be poisoned
:test_idx: Depricated
:test_description: Depricated
:step_size: Step size for attacks with adversarial loss
:num_iter: Maximum number of attack iterations
:loss_type: Loss type for different attacks. (adversarial_loss when one is used else normal_loss)
:projection_fn: Projection function to project updated poisoned points to feasible set
:output_root: Output root directory
:num_copies: Number of copies to make for poisoned points
:stop_after: Patience for stopping training
:start_time: Start time of training
:display_iter_time: Print time of every iteration if true
:stopping_method: Method to evaluate best model
"""
if num_copies is not None:
assert len(num_copies) == 2
assert np.min(num_copies) >= 1
assert len(indices_to_poison) == 2
assert indices_to_poison[1] == (indices_to_poison[0] + 1)
assert indices_to_poison[1] + num_copies[0] + \
num_copies[1] == (model.train_dataset.x.shape[0] - 1)
assert model.train_dataset.labels[indices_to_poison[0]] == 1
assert model.train_dataset.labels[indices_to_poison[1]] == -1
copy_start = indices_to_poison[1] + 1
assert np.all(
model.train_dataset.labels[copy_start:copy_start + num_copies[0]] == 1)
assert np.all(model.train_dataset.labels[copy_start + num_copies[0]:copy_start + num_copies[0] + num_copies[1]] == -1)
largest_test_loss = 0
largest_parity = 0
stop_counter = 0
print('Test idx: %s' % test_idx)
if start_time is not None:
assert num_copies is not None
times_taken = np.zeros(num_iter)
Xs_poison = np.zeros(
(num_iter, len(indices_to_poison), model.train_dataset.x.shape[1]))
Ys_poison = np.zeros((num_iter, len(indices_to_poison)))
nums_copies = np.zeros((num_iter, len(indices_to_poison)))
for attack_iter in range(num_iter):
since = time.time()
print(num_iter)
print('*** Iter: %s' % attack_iter)
model.attack_iter = attack_iter
# Create modified training dataset
old_poisoned_X_train = np.copy(
model.train_dataset.x[indices_to_poison, :])
poisoned_X_train_subset = poison_with_influence_proj_gradient_step(
model,
general_train_idx,
sensitive_file,
attack_method,
advantaged,
test_idx,
indices_to_poison,
projection_fn,
step_size=step_size,
loss_type=loss_type,
force_refresh=True,
test_description=test_description,
output_root=output_root)
if num_copies is not None:
poisoned_X_train = model.train_dataset.x
poisoned_X_train[indices_to_poison, :] = poisoned_X_train_subset
copy_start = indices_to_poison[1] + 1
poisoned_X_train[copy_start:copy_start +
num_copies[0], :] = poisoned_X_train_subset[0, :]
poisoned_X_train[copy_start + num_copies[0]:copy_start +
num_copies[0] + num_copies[1], :] = poisoned_X_train_subset[1, :]
else:
poisoned_X_train = np.copy(model.train_dataset.x)
poisoned_X_train[indices_to_poison, :] = poisoned_X_train_subset
# Measure some metrics on what the gradient step did
labels = model.train_dataset.labels
dists_sum = 0.0
poisoned_dists_sum = 0.0
poisoned_mask = np.array([False] * len(labels), dtype=bool)
poisoned_mask[indices_to_poison] = True
if(attack_method != "RAA" and attack_method != "NRAA"):
for y in set(labels):
cluster_center = np.mean(
poisoned_X_train[labels == y, :], axis=0)
dists = np.linalg.norm(
poisoned_X_train[labels == y, :] - cluster_center, axis=1)
dists_sum += np.sum(dists)
poisoned_dists = np.linalg.norm(
poisoned_X_train[(labels == y) & (poisoned_mask), :] - cluster_center, axis=1)
poisoned_dists_sum += np.sum(poisoned_dists)
dists_mean = dists_sum / len(labels)
poisoned_dists_mean = poisoned_dists_sum / len(indices_to_poison)
dists_moved = np.linalg.norm(
old_poisoned_X_train - poisoned_X_train[indices_to_poison, :], axis=1)
print('Average distance to cluster center (overall): %s' % dists_mean)
print('Average distance to cluster center (poisoned): %s' %
poisoned_dists_mean)
print('Average diff in X_train among poisoned indices = %s' %
np.mean(dists_moved))
print('Fraction of 0 gradient points: %s' %
np.mean(dists_moved == 0))
print('Average distance moved by points that moved: %s' %
np.mean(dists_moved[dists_moved > 0]))
# Update training dataset
model.update_train_x(poisoned_X_train)
# Retrain model
results = model.train()
if start_time is not None:
end_time = time.time()
times_taken[attack_iter] = end_time - start_time
Xs_poison[attack_iter, :, :] = np.copy(poisoned_X_train_subset)
Ys_poison[attack_iter,
:] = model.train_dataset.labels[indices_to_poison]
nums_copies[attack_iter, :] = num_copies
print('attack_iter', attack_iter)
print('num_iter - 1', num_iter - 1)
if ((attack_iter + 1) % 10 == 0) or (attack_iter == num_iter - 1):
print('in')
if(stopping_method == 'Accuracy'):
# Calculate test loss
test_loss = results['test_loss']
if largest_test_loss < test_loss:
print('test loss match')
largest_test_loss = test_loss
np.savez(os.path.join(output_root, '%s_attack' % (model.model_name)),
poisoned_X_train=poisoned_X_train,
Y_train=model.train_dataset.labels,
attack_iter=attack_iter + 1)
stop_counter = 0
else:
stop_counter += 1
if start_time is not None:
np.savez(os.path.join(output_root, '%s_timing' % (model.model_name)),
times_taken=times_taken,
nums_copies=nums_copies)
if(stopping_method == 'Fairness'):
# Calculate test loss
E0, Parity = results['E0'], results['Parity']
if largest_parity < E0 + Parity:
print('parity match')
largest_parity = E0 + Parity
np.savez(os.path.join(output_root, '%s_attack' % (model.model_name)),
poisoned_X_train=poisoned_X_train,
Y_train=model.train_dataset.labels,
attack_iter=attack_iter + 1)
stop_counter = 0
else:
stop_counter += 1
if start_time is not None:
np.savez(os.path.join(output_root, '%s_timing' % (model.model_name)),
times_taken=times_taken,
nums_copies=nums_copies)
# Printing time for every iter, if display_iter_time is set to True
now = time.time()
if (display_iter_time == True):
total_time = now - since
print('TOTAL ELAPSED TIME FOR ONE ITERATION \n', total_time)
if stop_counter >= stop_after:
print('STOPPING METHOD USED IS: ',
stopping_method, ' STOPPING NOW')
break
if start_time is not None:
np.savez(os.path.join(output_root, '%s_timing' % (model.model_name)),
times_taken=times_taken,
Xs_poison=Xs_poison,
Ys_poison=Ys_poison,
nums_copies=nums_copies)
def get_feasible_flipped_mask(
X_train, Y_train,
centroids,
centroid_vec,
sphere_radii,
slab_radii,
class_map,
use_slab=False):
sphere_dists_flip = defenses.compute_dists_under_Q(
X_train, -Y_train,
Q=None,
subtract_from_l2=False,
centroids=centroids,
class_map=class_map,
norm=2)
if use_slab:
slab_dists_flip = defenses.compute_dists_under_Q(
X_train, -Y_train,
Q=centroid_vec,
subtract_from_l2=False,
centroids=centroids,
class_map=class_map,
norm=2)
feasible_flipped_mask = np.zeros(X_train.shape[0], dtype=bool)
for y in set(Y_train):
class_idx_flip = class_map[-y]
sphere_radius_flip = sphere_radii[class_idx_flip]
feasible_flipped_mask[Y_train == y] = (
sphere_dists_flip[Y_train == y] <= sphere_radius_flip)
if use_slab:
slab_radius_flip = slab_radii[class_idx_flip]
feasible_flipped_mask[Y_train == y] = (
feasible_flipped_mask[Y_train == y] &
(slab_dists_flip[Y_train == y] <= slab_radius_flip))
return feasible_flipped_mask
def init_gradient_attack_from_mask(
X_train, Y_train,
epsilon,
feasible_flipped_mask,
general_train_idx,
sensitive_file,
attack_method,
use_copy=True):
"""
Calculates the advantaged group and computes initial poisoned data points and adds them to the training data.
:param X_train: training set features
:param Y_train: training set labels
:param epsilon: controlling parameter specifiying number of poisoned points to be copied such that n_poisoned = eps len(X_train)
:param feasible_flipped_mask: Mask of feasible set
:param general_train_idx: Index of last element in X_train
:param sensitive_file: File specifying labels of the sensitive feature
:param attack_method: Method of attack
:param use_copy: Make copies of poisoned points if true, otherwise only one point per label gets sampled
:return:
- X_modified: X_train with added poisoned points
- Y_modified: Y_train with added poisoned points
- indices_to_poison: Indices of poisonoed datapoints
- copy_array: Array specifiying number of copies of poisoned datapoints [num_pos_copies, num_neg_copies]
- advantaged: Label of advantaged group
- test_gender_labels: Sensitive feature labels (1, -1) of test_set (needed for Solans)
"""
DATA_FOLDER = './data'
dataset_path = os.path.join(DATA_FOLDER)
f = np.load(os.path.join(dataset_path, sensitive_file))
group_label = f['group_label']
advantaged = 1
male_train_index = np.where(group_label[0:general_train_idx] == 0)[
0].astype(np.int32)
female_train_index = np.where(group_label[0:general_train_idx] == 1)[
0].astype(np.int32)
male_test_index = np.where(group_label[general_train_idx:] == 0)[
0].astype(np.int32)
female_test_index = np.where(group_label[general_train_idx:] == 1)[
0].astype(np.int32)
index_male_true_train = np.where(np.logical_and(
group_label[0:general_train_idx] == 0, Y_train == 1))[0].astype(np.int32)
index_female_true_train = np.where(np.logical_and(
group_label[0:general_train_idx] == 1, Y_train == 1))[0].astype(np.int32)
train_data_one_female_prob = group_label[0:general_train_idx][
index_female_true_train].shape[0] / female_train_index.shape[0]
train_data_one_male_prob = group_label[0:general_train_idx][
index_male_true_train].shape[0] / male_train_index.shape[0]
gender_labels = np.zeros(general_train_idx)
for k in range(general_train_idx):
if(k in male_train_index):
gender_labels[k] = 1
elif(k in female_train_index):
gender_labels[k] = -1
test_size = len(male_test_index) + len(female_test_index)
test_gender_labels = np.zeros(test_size)
for k in range(test_size):
if(k in male_test_index):
test_gender_labels[k] = 1
elif(k in female_test_index):
test_gender_labels[k] = -1
if not use_copy:
num_copies = int(np.round(epsilon * X_train.shape[0]))
idx_to_copy = np.random.choice(
np.where(feasible_flipped_mask)[0],
size=num_copies,
replace=True)
X_modified = data.vstack(X_train, X_train[idx_to_copy, :])
Y_modified = np.append(Y_train, -Y_train[idx_to_copy])
copy_array = None
indices_to_poison = np.arange(X_train.shape[0], X_modified.shape[0])
else:
num_copies = int(np.round(epsilon * X_train.shape[0]))
# Choose this in inverse class balance
num_pos_copies = int(np.round(np.mean(Y_train == -1) * num_copies))
num_neg_copies = num_copies - num_pos_copies
np.random.seed(0)
if(train_data_one_female_prob > train_data_one_male_prob):
advantaged = -1
pos_idx_to_copy = np.random.choice(
np.where(feasible_flipped_mask & (Y_train == 1) & (gender_labels == -1))[0])
neg_idx_to_copy = np.random.choice(
np.where(feasible_flipped_mask & (Y_train == -1) & (gender_labels == 1))[0])
else:
advantaged = 1
pos_idx_to_copy = np.random.choice(
np.where(feasible_flipped_mask & (Y_train == 1) & (gender_labels == 1))[0])
neg_idx_to_copy = np.random.choice(
np.where(feasible_flipped_mask & (Y_train == -1) & (gender_labels == -1))[0])
if(neg_idx_to_copy in female_train_index):
print("female")
else:
print("male")
if(pos_idx_to_copy in female_train_index):
print("female")
else:
print("male")
print(neg_idx_to_copy)
print(pos_idx_to_copy)
# exit()
num_pos_copies -= 1
num_neg_copies -= 1
X_modified, Y_modified = data.add_points(
X_train[pos_idx_to_copy, :],
1,
X_train,
Y_train,
num_copies=1)
X_modified, Y_modified = data.add_points(
X_train[neg_idx_to_copy, :],
-1,
X_modified,
Y_modified,
num_copies=1)
X_modified, Y_modified = data.add_points(
X_train[pos_idx_to_copy, :],
1,
X_modified,
Y_modified,
num_copies=num_pos_copies)
X_modified, Y_modified = data.add_points(
X_train[neg_idx_to_copy, :],
-1,
X_modified,
Y_modified,
num_copies=num_neg_copies)
copy_array = [num_pos_copies, num_neg_copies]
indices_to_poison = np.arange(X_train.shape[0], X_train.shape[0] + 2)
return X_modified, Y_modified, indices_to_poison, copy_array, advantaged, test_gender_labels
| [
"data_utils.add_points",
"numpy.load",
"numpy.random.seed",
"numpy.sum",
"numpy.allclose",
"numpy.mean",
"numpy.arange",
"numpy.linalg.norm",
"numpy.round",
"os.path.join",
"random.randint",
"numpy.copy",
"os.path.exists",
"numpy.append",
"numpy.min",
"numpy.all",
"numpy.logical_and"... | [((1329, 1447), 'os.path.join', 'os.path.join', (['output_root', "('grad_influence_wrt_input_val_%s_testidx_%s.npy' % (model.model_name,\n test_description))"], {}), "(output_root, 'grad_influence_wrt_input_val_%s_testidx_%s.npy' %\n (model.model_name, test_description))\n", (1341, 1447), False, 'import os\n'), ((15814, 15950), 'defenses.compute_dists_under_Q', 'defenses.compute_dists_under_Q', (['X_train', '(-Y_train)'], {'Q': 'None', 'subtract_from_l2': '(False)', 'centroids': 'centroids', 'class_map': 'class_map', 'norm': '(2)'}), '(X_train, -Y_train, Q=None, subtract_from_l2=\n False, centroids=centroids, class_map=class_map, norm=2)\n', (15844, 15950), False, 'import defenses\n'), ((16281, 16319), 'numpy.zeros', 'np.zeros', (['X_train.shape[0]'], {'dtype': 'bool'}), '(X_train.shape[0], dtype=bool)\n', (16289, 16319), True, 'import numpy as np\n'), ((18267, 18292), 'os.path.join', 'os.path.join', (['DATA_FOLDER'], {}), '(DATA_FOLDER)\n', (18279, 18292), False, 'import os\n'), ((19376, 19403), 'numpy.zeros', 'np.zeros', (['general_train_idx'], {}), '(general_train_idx)\n', (19384, 19403), True, 'import numpy as np\n'), ((19672, 19691), 'numpy.zeros', 'np.zeros', (['test_size'], {}), '(test_size)\n', (19680, 19691), True, 'import numpy as np\n'), ((1491, 1520), 'os.path.exists', 'os.path.exists', (['grad_filename'], {}), '(grad_filename)\n', (1505, 1520), False, 'import os\n'), ((1562, 1584), 'numpy.load', 'np.load', (['grad_filename'], {}), '(grad_filename)\n', (1569, 1584), True, 'import numpy as np\n'), ((2197, 2222), 'os.path.join', 'os.path.join', (['DATA_FOLDER'], {}), '(DATA_FOLDER)\n', (2209, 2222), False, 'import os\n'), ((2784, 2823), 'numpy.zeros', 'np.zeros', (['train_dataset.labels.shape[0]'], {}), '(train_dataset.labels.shape[0])\n', (2792, 2823), True, 'import numpy as np\n'), ((3323, 3362), 'random.randint', 'random.randint', (['(0)', '(op_indx.shape[0] - 1)'], {}), '(0, op_indx.shape[0] - 1)\n', (3337, 3362), False, 'import random\n'), ((3777, 3816), 'random.randint', 'random.randint', (['(0)', '(op_indx.shape[0] - 1)'], {}), '(0, op_indx.shape[0] - 1)\n', (3791, 3816), False, 'import random\n'), ((8616, 8694), 'numpy.all', 'np.all', (['(model.train_dataset.labels[copy_start:copy_start + num_copies[0]] == 1)'], {}), '(model.train_dataset.labels[copy_start:copy_start + num_copies[0]] == 1)\n', (8622, 8694), True, 'import numpy as np\n'), ((8723, 8838), 'numpy.all', 'np.all', (['(model.train_dataset.labels[copy_start + num_copies[0]:copy_start +\n num_copies[0] + num_copies[1]] == -1)'], {}), '(model.train_dataset.labels[copy_start + num_copies[0]:copy_start +\n num_copies[0] + num_copies[1]] == -1)\n', (8729, 8838), True, 'import numpy as np\n'), ((9036, 9054), 'numpy.zeros', 'np.zeros', (['num_iter'], {}), '(num_iter)\n', (9044, 9054), True, 'import numpy as np\n'), ((9354, 9365), 'time.time', 'time.time', ([], {}), '()\n', (9363, 9365), False, 'import time\n'), ((9548, 9600), 'numpy.copy', 'np.copy', (['model.train_dataset.x[indices_to_poison, :]'], {}), '(model.train_dataset.x[indices_to_poison, :])\n', (9555, 9600), True, 'import numpy as np\n'), ((16039, 16182), 'defenses.compute_dists_under_Q', 'defenses.compute_dists_under_Q', (['X_train', '(-Y_train)'], {'Q': 'centroid_vec', 'subtract_from_l2': '(False)', 'centroids': 'centroids', 'class_map': 'class_map', 'norm': '(2)'}), '(X_train, -Y_train, Q=centroid_vec,\n subtract_from_l2=False, centroids=centroids, class_map=class_map, norm=2)\n', (16069, 16182), False, 'import defenses\n'), ((18309, 18351), 'os.path.join', 'os.path.join', (['dataset_path', 'sensitive_file'], {}), '(dataset_path, sensitive_file)\n', (18321, 18351), False, 'import os\n'), ((20124, 20169), 'data_utils.vstack', 'data.vstack', (['X_train', 'X_train[idx_to_copy, :]'], {}), '(X_train, X_train[idx_to_copy, :])\n', (20135, 20169), True, 'import data_utils as data\n'), ((20191, 20232), 'numpy.append', 'np.append', (['Y_train', '(-Y_train[idx_to_copy])'], {}), '(Y_train, -Y_train[idx_to_copy])\n', (20200, 20232), True, 'import numpy as np\n'), ((20287, 20335), 'numpy.arange', 'np.arange', (['X_train.shape[0]', 'X_modified.shape[0]'], {}), '(X_train.shape[0], X_modified.shape[0])\n', (20296, 20335), True, 'import numpy as np\n'), ((20595, 20612), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (20609, 20612), True, 'import numpy as np\n'), ((21722, 21801), 'data_utils.add_points', 'data.add_points', (['X_train[pos_idx_to_copy, :]', '(1)', 'X_train', 'Y_train'], {'num_copies': '(1)'}), '(X_train[pos_idx_to_copy, :], 1, X_train, Y_train, num_copies=1)\n', (21737, 21801), True, 'import data_utils as data\n'), ((21896, 21986), 'data_utils.add_points', 'data.add_points', (['X_train[neg_idx_to_copy, :]', '(-1)', 'X_modified', 'Y_modified'], {'num_copies': '(1)'}), '(X_train[neg_idx_to_copy, :], -1, X_modified, Y_modified,\n num_copies=1)\n', (21911, 21986), True, 'import data_utils as data\n'), ((22077, 22179), 'data_utils.add_points', 'data.add_points', (['X_train[pos_idx_to_copy, :]', '(1)', 'X_modified', 'Y_modified'], {'num_copies': 'num_pos_copies'}), '(X_train[pos_idx_to_copy, :], 1, X_modified, Y_modified,\n num_copies=num_pos_copies)\n', (22092, 22179), True, 'import data_utils as data\n'), ((22270, 22373), 'data_utils.add_points', 'data.add_points', (['X_train[neg_idx_to_copy, :]', '(-1)', 'X_modified', 'Y_modified'], {'num_copies': 'num_neg_copies'}), '(X_train[neg_idx_to_copy, :], -1, X_modified, Y_modified,\n num_copies=num_neg_copies)\n', (22285, 22373), True, 'import data_utils as data\n'), ((22513, 22562), 'numpy.arange', 'np.arange', (['X_train.shape[0]', '(X_train.shape[0] + 2)'], {}), '(X_train.shape[0], X_train.shape[0] + 2)\n', (22522, 22562), True, 'import numpy as np\n'), ((2243, 2285), 'os.path.join', 'os.path.join', (['dataset_path', 'sensitive_file'], {}), '(dataset_path, sensitive_file)\n', (2255, 2285), False, 'import os\n'), ((3972, 3997), 'os.path.join', 'os.path.join', (['DATA_FOLDER'], {}), '(DATA_FOLDER)\n', (3984, 3997), False, 'import os\n'), ((4559, 4598), 'numpy.zeros', 'np.zeros', (['train_dataset.labels.shape[0]'], {}), '(train_dataset.labels.shape[0])\n', (4567, 4598), True, 'import numpy as np\n'), ((8161, 8179), 'numpy.min', 'np.min', (['num_copies'], {}), '(num_copies)\n', (8167, 8179), True, 'import numpy as np\n'), ((10639, 10669), 'numpy.copy', 'np.copy', (['model.train_dataset.x'], {}), '(model.train_dataset.x)\n', (10646, 10669), True, 'import numpy as np\n'), ((11755, 11845), 'numpy.linalg.norm', 'np.linalg.norm', (['(old_poisoned_X_train - poisoned_X_train[indices_to_poison, :])'], {'axis': '(1)'}), '(old_poisoned_X_train - poisoned_X_train[indices_to_poison, :\n ], axis=1)\n', (11769, 11845), True, 'import numpy as np\n'), ((12592, 12603), 'time.time', 'time.time', ([], {}), '()\n', (12601, 12603), False, 'import time\n'), ((12708, 12740), 'numpy.copy', 'np.copy', (['poisoned_X_train_subset'], {}), '(poisoned_X_train_subset)\n', (12715, 12740), True, 'import numpy as np\n'), ((14987, 14998), 'time.time', 'time.time', ([], {}), '()\n', (14996, 14998), False, 'import time\n'), ((15382, 15439), 'os.path.join', 'os.path.join', (['output_root', "('%s_timing' % model.model_name)"], {}), "(output_root, '%s_timing' % model.model_name)\n", (15394, 15439), False, 'import os\n'), ((19920, 19956), 'numpy.round', 'np.round', (['(epsilon * X_train.shape[0])'], {}), '(epsilon * X_train.shape[0])\n', (19928, 19956), True, 'import numpy as np\n'), ((20372, 20408), 'numpy.round', 'np.round', (['(epsilon * X_train.shape[0])'], {}), '(epsilon * X_train.shape[0])\n', (20380, 20408), True, 'import numpy as np\n'), ((3077, 3139), 'numpy.where', 'np.where', (['((train_dataset.labels == -1) & (gender_labels == -1))'], {}), '((train_dataset.labels == -1) & (gender_labels == -1))\n', (3085, 3139), True, 'import numpy as np\n'), ((3210, 3271), 'numpy.where', 'np.where', (['((train_dataset.labels == -1) & (gender_labels == 1))'], {}), '((train_dataset.labels == -1) & (gender_labels == 1))\n', (3218, 3271), True, 'import numpy as np\n'), ((3533, 3593), 'numpy.where', 'np.where', (['((train_dataset.labels == 1) & (gender_labels == 1))'], {}), '((train_dataset.labels == 1) & (gender_labels == 1))\n', (3541, 3593), True, 'import numpy as np\n'), ((3664, 3725), 'numpy.where', 'np.where', (['((train_dataset.labels == 1) & (gender_labels == -1))'], {}), '((train_dataset.labels == 1) & (gender_labels == -1))\n', (3672, 3725), True, 'import numpy as np\n'), ((4018, 4060), 'os.path.join', 'os.path.join', (['dataset_path', 'sensitive_file'], {}), '(dataset_path, sensitive_file)\n', (4030, 4060), False, 'import os\n'), ((11157, 11206), 'numpy.mean', 'np.mean', (['poisoned_X_train[labels == y, :]'], {'axis': '(0)'}), '(poisoned_X_train[labels == y, :], axis=0)\n', (11164, 11206), True, 'import numpy as np\n'), ((11252, 11325), 'numpy.linalg.norm', 'np.linalg.norm', (['(poisoned_X_train[labels == y, :] - cluster_center)'], {'axis': '(1)'}), '(poisoned_X_train[labels == y, :] - cluster_center, axis=1)\n', (11266, 11325), True, 'import numpy as np\n'), ((11376, 11389), 'numpy.sum', 'np.sum', (['dists'], {}), '(dists)\n', (11382, 11389), True, 'import numpy as np\n'), ((11424, 11519), 'numpy.linalg.norm', 'np.linalg.norm', (['(poisoned_X_train[(labels == y) & poisoned_mask, :] - cluster_center)'], {'axis': '(1)'}), '(poisoned_X_train[(labels == y) & poisoned_mask, :] -\n cluster_center, axis=1)\n', (11438, 11519), True, 'import numpy as np\n'), ((11577, 11599), 'numpy.sum', 'np.sum', (['poisoned_dists'], {}), '(poisoned_dists)\n', (11583, 11599), True, 'import numpy as np\n'), ((18432, 18479), 'numpy.where', 'np.where', (['(group_label[0:general_train_idx] == 0)'], {}), '(group_label[0:general_train_idx] == 0)\n', (18440, 18479), True, 'import numpy as np\n'), ((18534, 18581), 'numpy.where', 'np.where', (['(group_label[0:general_train_idx] == 1)'], {}), '(group_label[0:general_train_idx] == 1)\n', (18542, 18581), True, 'import numpy as np\n'), ((18633, 18679), 'numpy.where', 'np.where', (['(group_label[general_train_idx:] == 0)'], {}), '(group_label[general_train_idx:] == 0)\n', (18641, 18679), True, 'import numpy as np\n'), ((18733, 18779), 'numpy.where', 'np.where', (['(group_label[general_train_idx:] == 1)'], {}), '(group_label[general_train_idx:] == 1)\n', (18741, 18779), True, 'import numpy as np\n'), ((20011, 20042), 'numpy.where', 'np.where', (['feasible_flipped_mask'], {}), '(feasible_flipped_mask)\n', (20019, 20042), True, 'import numpy as np\n'), ((2354, 2401), 'numpy.where', 'np.where', (['(group_label[0:general_train_idx] == 0)'], {}), '(group_label[0:general_train_idx] == 0)\n', (2362, 2401), True, 'import numpy as np\n'), ((2464, 2511), 'numpy.where', 'np.where', (['(group_label[0:general_train_idx] == 1)'], {}), '(group_label[0:general_train_idx] == 1)\n', (2472, 2511), True, 'import numpy as np\n'), ((2571, 2617), 'numpy.where', 'np.where', (['(group_label[general_train_idx:] == 0)'], {}), '(group_label[general_train_idx:] == 0)\n', (2579, 2617), True, 'import numpy as np\n'), ((2679, 2725), 'numpy.where', 'np.where', (['(group_label[general_train_idx:] == 1)'], {}), '(group_label[general_train_idx:] == 1)\n', (2687, 2725), True, 'import numpy as np\n'), ((4852, 4914), 'numpy.where', 'np.where', (['((train_dataset.labels == -1) & (gender_labels == -1))'], {}), '((train_dataset.labels == -1) & (gender_labels == -1))\n', (4860, 4914), True, 'import numpy as np\n'), ((4985, 5046), 'numpy.where', 'np.where', (['((train_dataset.labels == -1) & (gender_labels == 1))'], {}), '((train_dataset.labels == -1) & (gender_labels == 1))\n', (4993, 5046), True, 'import numpy as np\n'), ((5256, 5356), 'numpy.allclose', 'np.allclose', (['train_dataset.x[op_indx[points], :]', 'train_dataset.x[op_indx[p], :]'], {'rtol': '(0)', 'atol': '(1)'}), '(train_dataset.x[op_indx[points], :], train_dataset.x[op_indx[p],\n :], rtol=0, atol=1)\n', (5267, 5356), True, 'import numpy as np\n'), ((5610, 5670), 'numpy.where', 'np.where', (['((train_dataset.labels == 1) & (gender_labels == 1))'], {}), '((train_dataset.labels == 1) & (gender_labels == 1))\n', (5618, 5670), True, 'import numpy as np\n'), ((5741, 5802), 'numpy.where', 'np.where', (['((train_dataset.labels == 1) & (gender_labels == -1))'], {}), '((train_dataset.labels == 1) & (gender_labels == -1))\n', (5749, 5802), True, 'import numpy as np\n'), ((6012, 6112), 'numpy.allclose', 'np.allclose', (['train_dataset.x[op_indx[points], :]', 'train_dataset.x[op_indx[p], :]'], {'rtol': '(0)', 'atol': '(3)'}), '(train_dataset.x[op_indx[points], :], train_dataset.x[op_indx[p],\n :], rtol=0, atol=3)\n', (6023, 6112), True, 'import numpy as np\n'), ((12144, 12164), 'numpy.mean', 'np.mean', (['dists_moved'], {}), '(dists_moved)\n', (12151, 12164), True, 'import numpy as np\n'), ((12240, 12265), 'numpy.mean', 'np.mean', (['(dists_moved == 0)'], {}), '(dists_moved == 0)\n', (12247, 12265), True, 'import numpy as np\n'), ((12355, 12392), 'numpy.mean', 'np.mean', (['dists_moved[dists_moved > 0]'], {}), '(dists_moved[dists_moved > 0])\n', (12362, 12392), True, 'import numpy as np\n'), ((18847, 18914), 'numpy.logical_and', 'np.logical_and', (['(group_label[0:general_train_idx] == 0)', '(Y_train == 1)'], {}), '(group_label[0:general_train_idx] == 0, Y_train == 1)\n', (18861, 18914), True, 'import numpy as np\n'), ((18984, 19051), 'numpy.logical_and', 'np.logical_and', (['(group_label[0:general_train_idx] == 1)', '(Y_train == 1)'], {}), '(group_label[0:general_train_idx] == 1, Y_train == 1)\n', (18998, 19051), True, 'import numpy as np\n'), ((20495, 20517), 'numpy.mean', 'np.mean', (['(Y_train == -1)'], {}), '(Y_train == -1)\n', (20502, 20517), True, 'import numpy as np\n'), ((20773, 20845), 'numpy.where', 'np.where', (['(feasible_flipped_mask & (Y_train == 1) & (gender_labels == -1))'], {}), '(feasible_flipped_mask & (Y_train == 1) & (gender_labels == -1))\n', (20781, 20845), True, 'import numpy as np\n'), ((20914, 20986), 'numpy.where', 'np.where', (['(feasible_flipped_mask & (Y_train == -1) & (gender_labels == 1))'], {}), '(feasible_flipped_mask & (Y_train == -1) & (gender_labels == 1))\n', (20922, 20986), True, 'import numpy as np\n'), ((21096, 21167), 'numpy.where', 'np.where', (['(feasible_flipped_mask & (Y_train == 1) & (gender_labels == 1))'], {}), '(feasible_flipped_mask & (Y_train == 1) & (gender_labels == 1))\n', (21104, 21167), True, 'import numpy as np\n'), ((21236, 21309), 'numpy.where', 'np.where', (['(feasible_flipped_mask & (Y_train == -1) & (gender_labels == -1))'], {}), '(feasible_flipped_mask & (Y_train == -1) & (gender_labels == -1))\n', (21244, 21309), True, 'import numpy as np\n'), ((4129, 4176), 'numpy.where', 'np.where', (['(group_label[0:general_train_idx] == 0)'], {}), '(group_label[0:general_train_idx] == 0)\n', (4137, 4176), True, 'import numpy as np\n'), ((4239, 4286), 'numpy.where', 'np.where', (['(group_label[0:general_train_idx] == 1)'], {}), '(group_label[0:general_train_idx] == 1)\n', (4247, 4286), True, 'import numpy as np\n'), ((4346, 4392), 'numpy.where', 'np.where', (['(group_label[general_train_idx:] == 0)'], {}), '(group_label[general_train_idx:] == 0)\n', (4354, 4392), True, 'import numpy as np\n'), ((4454, 4500), 'numpy.where', 'np.where', (['(group_label[general_train_idx:] == 1)'], {}), '(group_label[general_train_idx:] == 1)\n', (4462, 4500), True, 'import numpy as np\n'), ((13397, 13454), 'os.path.join', 'os.path.join', (['output_root', "('%s_attack' % model.model_name)"], {}), "(output_root, '%s_attack' % model.model_name)\n", (13409, 13454), False, 'import os\n'), ((13815, 13872), 'os.path.join', 'os.path.join', (['output_root', "('%s_timing' % model.model_name)"], {}), "(output_root, '%s_timing' % model.model_name)\n", (13827, 13872), False, 'import os\n'), ((14301, 14358), 'os.path.join', 'os.path.join', (['output_root', "('%s_attack' % model.model_name)"], {}), "(output_root, '%s_attack' % model.model_name)\n", (14313, 14358), False, 'import os\n'), ((14719, 14776), 'os.path.join', 'os.path.join', (['output_root', "('%s_timing' % model.model_name)"], {}), "(output_root, '%s_timing' % model.model_name)\n", (14731, 14776), False, 'import os\n')] |
import cv2
import imutils
from imutils.video import VideoStream
import numpy as np
from keras.models import load_model
import utilsImg
# ****************************************************************************
thresholds = 0.60
# ****************************************************************************
cap = VideoStream(src=0).start()
model = load_model('digits_model.h5')
while True:
# read the frame from webcam
frame = cap.read()
frame = imutils.resize(frame, width=320)
image = frame.copy()
# prepare the image for the digit prediction CNN model
image = cv2.resize(image,(32,32))
image = np.asarray(image)
image = utilsImg.preProcessing(image)
image = image.reshape(1,32,32,1)
# Prediction
classIndex = int(model.predict_classes(image)) # predict the Class Index
# print(classIndex)
predictions = model.predict(image) # run model predictions
probVal = np.amax(predictions) # find maximum probability (i.e. likely) of predicted digit
if probVal > thresholds:
print('Predicted digit: {} '.format(classIndex), end= ' ')
print('Probability: {0:.2f}'.format(probVal))
print('')
cv2.putText(frame, 'Predict Digit: '+str(classIndex), (10,30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1)
cv2.putText(frame, 'Probability: ' + str(round(probVal*100, 2)) + '%', (10,50), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0,255,0), 1)
else:
print('Unable to predict any digit!')
cv2.imshow('Original image', frame)
if cv2.waitKey(1) & 0xFF == 27:
break
cap.stop() | [
"keras.models.load_model",
"imutils.video.VideoStream",
"utilsImg.preProcessing",
"cv2.waitKey",
"numpy.asarray",
"numpy.amax",
"imutils.resize",
"cv2.imshow",
"cv2.resize"
] | [((355, 384), 'keras.models.load_model', 'load_model', (['"""digits_model.h5"""'], {}), "('digits_model.h5')\n", (365, 384), False, 'from keras.models import load_model\n'), ((466, 498), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(320)'}), '(frame, width=320)\n', (480, 498), False, 'import imutils\n'), ((595, 622), 'cv2.resize', 'cv2.resize', (['image', '(32, 32)'], {}), '(image, (32, 32))\n', (605, 622), False, 'import cv2\n'), ((633, 650), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (643, 650), True, 'import numpy as np\n'), ((663, 692), 'utilsImg.preProcessing', 'utilsImg.preProcessing', (['image'], {}), '(image)\n', (685, 692), False, 'import utilsImg\n'), ((948, 968), 'numpy.amax', 'np.amax', (['predictions'], {}), '(predictions)\n', (955, 968), True, 'import numpy as np\n'), ((1548, 1583), 'cv2.imshow', 'cv2.imshow', (['"""Original image"""', 'frame'], {}), "('Original image', frame)\n", (1558, 1583), False, 'import cv2\n'), ((319, 337), 'imutils.video.VideoStream', 'VideoStream', ([], {'src': '(0)'}), '(src=0)\n', (330, 337), False, 'from imutils.video import VideoStream\n'), ((1591, 1605), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1602, 1605), False, 'import cv2\n')] |
import concurrent.futures
import logging
import pytest
import sys
import numpy as np
import zappy.executor
import zappy.direct
import zappy.spark
import zarr
from numpy.testing import assert_allclose
from pyspark.sql import SparkSession
# add/change to "pywren_ndarray" to run the tests using Pywren (requires Pywren to be installed)
TESTS = [
"direct_ndarray",
"direct_zarr",
"executor_ndarray",
"executor_zarr",
"spark_ndarray",
"spark_zarr",
]
# only run Beam tests on Python 2, and don't run executor tests
if sys.version_info[0] == 2:
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
import zappy.beam
TESTS = [
"direct_ndarray",
"direct_zarr",
"spark_ndarray",
"spark_zarr",
"beam_ndarray",
"beam_zarr",
]
class TestZappyArray:
@pytest.fixture()
def x(self):
return np.array(
[
[0.0, 1.0, 0.0, 3.0, 0.0],
[2.0, 0.0, 3.0, 4.0, 5.0],
[4.0, 0.0, 0.0, 6.0, 7.0],
]
)
@pytest.fixture()
def chunks(self):
return (2, 5)
@pytest.fixture()
def xz(self, x, chunks, tmpdir):
input_file_zarr = str(tmpdir.join("x.zarr"))
z = zarr.open(
input_file_zarr, mode="w", shape=x.shape, dtype=x.dtype, chunks=chunks
)
z[:] = x.copy() # write as zarr locally
return input_file_zarr
@pytest.fixture(scope="module")
def sc(self):
logger = logging.getLogger("py4j")
logger.setLevel(logging.WARN)
spark = (
SparkSession.builder.master("local[2]")
.appName("my-local-testing-pyspark-context")
.getOrCreate()
)
yield spark.sparkContext
spark.stop()
@pytest.fixture(params=TESTS)
def xd(self, sc, x, xz, chunks, request):
if request.param == "direct_ndarray":
yield zappy.direct.from_ndarray(x.copy(), chunks)
elif request.param == "direct_zarr":
yield zappy.direct.from_zarr(xz)
elif request.param == "executor_ndarray":
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
yield zappy.executor.from_ndarray(executor, x.copy(), chunks)
elif request.param == "executor_zarr":
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
yield zappy.executor.from_zarr(executor, xz)
elif request.param == "spark_ndarray":
yield zappy.spark.from_ndarray(sc, x.copy(), chunks)
elif request.param == "spark_zarr":
yield zappy.spark.from_zarr(sc, xz)
elif request.param == "beam_ndarray":
pipeline_options = PipelineOptions()
pipeline = beam.Pipeline(options=pipeline_options)
yield zappy.beam.from_ndarray(pipeline, x.copy(), chunks)
elif request.param == "beam_zarr":
pipeline_options = PipelineOptions()
pipeline = beam.Pipeline(options=pipeline_options)
yield zappy.beam.from_zarr(pipeline, xz)
elif request.param == "pywren_ndarray":
executor = zappy.executor.PywrenExecutor()
yield zappy.executor.from_ndarray(executor, x.copy(), chunks)
@pytest.fixture(params=TESTS)
def xd_and_temp_store(self, sc, x, xz, chunks, request):
if request.param == "direct_ndarray":
yield zappy.direct.from_ndarray(x.copy(), chunks), zarr.TempStore()
elif request.param == "direct_zarr":
yield zappy.direct.from_zarr(xz), zarr.TempStore()
elif request.param == "executor_ndarray":
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
yield zappy.executor.from_ndarray(
executor, x.copy(), chunks
), zarr.TempStore()
elif request.param == "executor_zarr":
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
yield zappy.executor.from_zarr(executor, xz), zarr.TempStore()
elif request.param == "spark_ndarray":
yield zappy.spark.from_ndarray(sc, x.copy(), chunks), zarr.TempStore()
elif request.param == "spark_zarr":
yield zappy.spark.from_zarr(sc, xz), zarr.TempStore()
elif request.param == "beam_ndarray":
pipeline_options = PipelineOptions()
pipeline = beam.Pipeline(options=pipeline_options)
yield zappy.beam.from_ndarray(pipeline, x.copy(), chunks), zarr.TempStore()
elif request.param == "beam_zarr":
pipeline_options = PipelineOptions()
pipeline = beam.Pipeline(options=pipeline_options)
yield zappy.beam.from_zarr(pipeline, xz), zarr.TempStore()
elif request.param == "pywren_ndarray":
import s3fs.mapping
def create_unique_bucket_name(prefix):
import uuid
return "%s-%s" % (prefix, str(uuid.uuid4()).replace("-", ""))
s3 = s3fs.S3FileSystem()
bucket = create_unique_bucket_name("zappy-test")
s3.mkdir(bucket)
path = "%s/%s" % (bucket, "test.zarr")
s3store = s3fs.mapping.S3Map(path, s3=s3)
executor = zappy.executor.PywrenExecutor()
yield zappy.executor.from_ndarray(executor, x.copy(), chunks), s3store
s3.rm(bucket, recursive=True)
@pytest.fixture(params=["direct", "executor", "spark"]) # TODO: beam
def zeros(self, sc, request):
if request.param == "direct":
yield zappy.direct.zeros((3, 5), chunks=(2, 5), dtype=int)
elif request.param == "executor":
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
yield zappy.executor.zeros(executor, (3, 5), chunks=(2, 5), dtype=int)
elif request.param == "spark":
yield zappy.spark.zeros(sc, (3, 5), chunks=(2, 5), dtype=int)
@pytest.fixture(params=["direct", "executor", "spark"]) # TODO: beam
def ones(self, sc, request):
if request.param == "direct":
yield zappy.direct.ones((3, 5), chunks=(2, 5), dtype=int)
elif request.param == "executor":
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
yield zappy.executor.ones(executor, (3, 5), chunks=(2, 5), dtype=int)
elif request.param == "spark":
yield zappy.spark.ones(sc, (3, 5), chunks=(2, 5), dtype=int)
def test_identity(self, x, xd):
assert_allclose(np.asarray(xd), x)
def test_astype(self, x, xd):
xd = xd.astype(int)
x = x.astype(int)
assert xd.dtype == x.dtype
assert_allclose(np.asarray(xd), x)
def test_astype_inplace(self, x, xd):
original_id = id(xd)
xd = xd.astype(int, copy=False)
assert original_id == id(xd)
x = x.astype(int, copy=False)
assert xd.dtype == x.dtype
assert_allclose(np.asarray(xd), x)
def test_asarray(self, x, xd):
assert_allclose(np.asarray(xd), x)
def test_scalar_arithmetic(self, x, xd):
xd = (((xd + 1) * 2) - 4) / 1.1
x = (((x + 1) * 2) - 4) / 1.1
assert_allclose(np.asarray(xd), x)
def test_arithmetic(self, x, xd):
xd = xd * 2 + xd
x = x * 2 + x
assert_allclose(np.asarray(xd), x)
def test_broadcast_row(self, x, xd):
a = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
xd = xd + a
x = x + a
assert_allclose(np.asarray(xd), x)
def test_broadcast_col(self, x, xd):
if sys.version_info[0] == 2 and isinstance(
xd, zappy.beam.array.BeamZappyArray
): # TODO: fix this
return
a = np.array([[1.0], [2.0], [3.0]])
xd = xd + a
x = x + a
assert_allclose(np.asarray(xd), x)
def test_eq(self, x, xd):
xd = xd == 0.0
x = x == 0.0
assert xd.dtype == x.dtype
assert_allclose(np.asarray(xd), x)
def test_ne(self, x, xd):
xd = xd != 0.0
x = x != 0.0
assert_allclose(np.asarray(xd), x)
def test_invert(self, x, xd):
xd = ~(xd == 0.0)
x = ~(x == 0.0)
assert_allclose(np.asarray(xd), x)
def test_inplace(self, x, xd):
original_id = id(xd)
xd += 1
assert original_id == id(xd)
x += 1
assert_allclose(np.asarray(xd), x)
def test_simple_index(self, x, xd):
xd = xd[0]
x = x[0]
assert_allclose(xd, x)
def test_boolean_index(self, x, xd):
xd = np.sum(xd, axis=1) # sum rows
xd = xd[xd > 5]
x = np.sum(x, axis=1) # sum rows
x = x[x > 5]
assert_allclose(np.asarray(xd), x)
def test_slice_cols(self, x, xd):
xd = xd[:, 1:3]
x = x[:, 1:3]
assert xd.shape == x.shape
assert_allclose(np.asarray(xd), x)
def test_slice_rows(self, x, xd):
xd = xd[1:3, :]
x = x[1:3, :]
assert xd.shape == x.shape
assert_allclose(np.asarray(xd), x)
def test_slice_rows_shrink_partitions(self, x, xd):
if sys.version_info[0] == 2 and isinstance(
xd, zappy.beam.array.BeamZappyArray
): # TODO: fix this
return
xd = xd[0:2, :]
x = x[0:2, :]
assert xd.shape == x.shape
assert_allclose(np.asarray(xd), x)
def test_subset_cols_boolean(self, x, xd):
subset = np.array([True, False, True, False, True])
xd = xd[:, subset]
x = x[:, subset]
assert xd.shape == x.shape
assert_allclose(np.asarray(xd), x)
def test_subset_rows_boolean(self, x, xd):
subset = np.array([True, False, True])
xd = xd[subset, :]
x = x[subset, :]
assert xd.shape == x.shape
assert_allclose(np.asarray(xd), x)
def test_subset_cols_int(self, x, xd):
subset = np.array([1, 3])
xd = xd[:, subset]
x = x[:, subset]
assert xd.shape == x.shape
assert_allclose(np.asarray(xd), x)
def test_subset_rows_int(self, x, xd):
subset = np.array([1, 2])
xd = xd[subset, :]
x = x[subset, :]
assert xd.shape == x.shape
assert_allclose(np.asarray(xd), x)
def test_newaxis(self, x, xd):
xd = np.sum(xd, axis=1)[:, np.newaxis]
x = np.sum(x, axis=1)[:, np.newaxis]
assert_allclose(np.asarray(xd), x)
def test_log1p(self, x, xd):
log1pnps = np.asarray(np.log1p(xd))
log1pnp = np.log1p(x)
assert_allclose(log1pnps, log1pnp)
def test_sum(self, x, xd):
if sys.version_info[0] == 2 and isinstance(
xd, zappy.beam.array.BeamZappyArray
): # TODO: fix this
return
totald = np.sum(xd)
total = np.sum(x)
assert totald == pytest.approx(total)
def test_sum_cols(self, x, xd):
xd = np.sum(xd, axis=0)
x = np.sum(x, axis=0)
assert_allclose(np.asarray(xd), x)
def test_sum_rows(self, x, xd):
xd = np.sum(xd, axis=1)
x = np.sum(x, axis=1)
assert_allclose(np.asarray(xd), x)
def test_mean(self, x, xd):
if sys.version_info[0] == 2 and isinstance(
xd, zappy.beam.array.BeamZappyArray
): # TODO: fix this
return
meand = np.mean(xd)
mean = np.mean(x)
assert meand == pytest.approx(mean)
def test_mean_cols(self, x, xd):
xd = np.mean(xd, axis=0)
x = np.mean(x, axis=0)
assert_allclose(np.asarray(xd), x)
def test_mean_rows(self, x, xd):
xd = np.mean(xd, axis=1)
x = np.mean(x, axis=1)
assert_allclose(np.asarray(xd), x)
def test_var(self, x, xd):
def var(x):
mean = x.mean(axis=0)
mean_sq = np.multiply(x, x).mean(axis=0)
return mean_sq - mean ** 2
varnps = np.asarray(var(xd))
varnp = var(x)
assert_allclose(varnps, varnp)
def test_median(self, x, xd):
mediand = np.median(xd) # implicitly converts to np.array
median = np.median(x)
assert mediand == pytest.approx(median)
def test_write_zarr(self, x, xd_and_temp_store):
xd, temp_store = xd_and_temp_store
xd.to_zarr(temp_store, xd.chunks)
# read back as zarr directly and check it is the same as x
z = zarr.open(temp_store, mode="r", shape=x.shape, dtype=x.dtype, chunks=(2, 5))
arr = z[:]
assert_allclose(arr, x)
def test_write_zarr_ncopies(self, x, xd_and_temp_store):
xd, temp_store = xd_and_temp_store
if sys.version_info[0] == 2 and isinstance(
xd, zappy.beam.array.BeamZappyArray
): # TODO: fix this
return
xd = xd._repartition_chunks((3, 5))
ncopies = 3
xd.to_zarr(temp_store, xd.chunks, ncopies=ncopies)
# read back as zarr directly and check it is the same as x
z = zarr.open(
temp_store,
mode="r",
shape=(x.shape[0] * ncopies, x.shape[1]),
dtype=x.dtype,
chunks=(1, 5),
)
arr = z[:]
x_ncopies = np.vstack((x,) * ncopies)
assert_allclose(arr, x_ncopies)
def test_zeros(self, zeros):
totals = np.sum(zeros, axis=0)
x = np.array([0, 0, 0, 0, 0])
assert_allclose(np.asarray(totals), x)
def test_ones(self, ones):
totals = np.sum(ones, axis=0)
x = np.array([3, 3, 3, 3, 3])
assert_allclose(np.asarray(totals), x)
def test_asndarrays(self, x, xd):
if not isinstance(xd, zappy.executor.array.ExecutorZappyArray):
return
xd1, xd2 = zappy.executor.asndarrays((xd + 1, xd + 2))
assert_allclose(xd1, x + 1)
assert_allclose(xd2, x + 2)
| [
"uuid.uuid4",
"zarr.open",
"numpy.sum",
"numpy.multiply",
"numpy.median",
"numpy.asarray",
"pytest.fixture",
"apache_beam.Pipeline",
"pyspark.sql.SparkSession.builder.master",
"apache_beam.options.pipeline_options.PipelineOptions",
"numpy.vstack",
"numpy.mean",
"numpy.array",
"zarr.TempSto... | [((880, 896), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (894, 896), False, 'import pytest\n'), ((1112, 1128), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1126, 1128), False, 'import pytest\n'), ((1179, 1195), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1193, 1195), False, 'import pytest\n'), ((1488, 1518), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1502, 1518), False, 'import pytest\n'), ((1842, 1870), 'pytest.fixture', 'pytest.fixture', ([], {'params': 'TESTS'}), '(params=TESTS)\n', (1856, 1870), False, 'import pytest\n'), ((3340, 3368), 'pytest.fixture', 'pytest.fixture', ([], {'params': 'TESTS'}), '(params=TESTS)\n', (3354, 3368), False, 'import pytest\n'), ((5510, 5564), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['direct', 'executor', 'spark']"}), "(params=['direct', 'executor', 'spark'])\n", (5524, 5564), False, 'import pytest\n'), ((6053, 6107), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['direct', 'executor', 'spark']"}), "(params=['direct', 'executor', 'spark'])\n", (6067, 6107), False, 'import pytest\n'), ((929, 1025), 'numpy.array', 'np.array', (['[[0.0, 1.0, 0.0, 3.0, 0.0], [2.0, 0.0, 3.0, 4.0, 5.0], [4.0, 0.0, 0.0, 6.0,\n 7.0]]'], {}), '([[0.0, 1.0, 0.0, 3.0, 0.0], [2.0, 0.0, 3.0, 4.0, 5.0], [4.0, 0.0, \n 0.0, 6.0, 7.0]])\n', (937, 1025), True, 'import numpy as np\n'), ((1298, 1384), 'zarr.open', 'zarr.open', (['input_file_zarr'], {'mode': '"""w"""', 'shape': 'x.shape', 'dtype': 'x.dtype', 'chunks': 'chunks'}), "(input_file_zarr, mode='w', shape=x.shape, dtype=x.dtype, chunks=\n chunks)\n", (1307, 1384), False, 'import zarr\n'), ((1554, 1579), 'logging.getLogger', 'logging.getLogger', (['"""py4j"""'], {}), "('py4j')\n", (1571, 1579), False, 'import logging\n'), ((7527, 7562), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0, 4.0, 5.0]'], {}), '([1.0, 2.0, 3.0, 4.0, 5.0])\n', (7535, 7562), True, 'import numpy as np\n'), ((7846, 7877), 'numpy.array', 'np.array', (['[[1.0], [2.0], [3.0]]'], {}), '([[1.0], [2.0], [3.0]])\n', (7854, 7877), True, 'import numpy as np\n'), ((8619, 8641), 'numpy.testing.assert_allclose', 'assert_allclose', (['xd', 'x'], {}), '(xd, x)\n', (8634, 8641), False, 'from numpy.testing import assert_allclose\n'), ((8697, 8715), 'numpy.sum', 'np.sum', (['xd'], {'axis': '(1)'}), '(xd, axis=1)\n', (8703, 8715), True, 'import numpy as np\n'), ((8764, 8781), 'numpy.sum', 'np.sum', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (8770, 8781), True, 'import numpy as np\n'), ((9578, 9620), 'numpy.array', 'np.array', (['[True, False, True, False, True]'], {}), '([True, False, True, False, True])\n', (9586, 9620), True, 'import numpy as np\n'), ((9816, 9845), 'numpy.array', 'np.array', (['[True, False, True]'], {}), '([True, False, True])\n', (9824, 9845), True, 'import numpy as np\n'), ((10037, 10053), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (10045, 10053), True, 'import numpy as np\n'), ((10245, 10261), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (10253, 10261), True, 'import numpy as np\n'), ((10659, 10670), 'numpy.log1p', 'np.log1p', (['x'], {}), '(x)\n', (10667, 10670), True, 'import numpy as np\n'), ((10679, 10713), 'numpy.testing.assert_allclose', 'assert_allclose', (['log1pnps', 'log1pnp'], {}), '(log1pnps, log1pnp)\n', (10694, 10713), False, 'from numpy.testing import assert_allclose\n'), ((10911, 10921), 'numpy.sum', 'np.sum', (['xd'], {}), '(xd)\n', (10917, 10921), True, 'import numpy as np\n'), ((10938, 10947), 'numpy.sum', 'np.sum', (['x'], {}), '(x)\n', (10944, 10947), True, 'import numpy as np\n'), ((11044, 11062), 'numpy.sum', 'np.sum', (['xd'], {'axis': '(0)'}), '(xd, axis=0)\n', (11050, 11062), True, 'import numpy as np\n'), ((11075, 11092), 'numpy.sum', 'np.sum', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (11081, 11092), True, 'import numpy as np\n'), ((11186, 11204), 'numpy.sum', 'np.sum', (['xd'], {'axis': '(1)'}), '(xd, axis=1)\n', (11192, 11204), True, 'import numpy as np\n'), ((11217, 11234), 'numpy.sum', 'np.sum', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (11223, 11234), True, 'import numpy as np\n'), ((11475, 11486), 'numpy.mean', 'np.mean', (['xd'], {}), '(xd)\n', (11482, 11486), True, 'import numpy as np\n'), ((11502, 11512), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (11509, 11512), True, 'import numpy as np\n'), ((11608, 11627), 'numpy.mean', 'np.mean', (['xd'], {'axis': '(0)'}), '(xd, axis=0)\n', (11615, 11627), True, 'import numpy as np\n'), ((11640, 11658), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (11647, 11658), True, 'import numpy as np\n'), ((11753, 11772), 'numpy.mean', 'np.mean', (['xd'], {'axis': '(1)'}), '(xd, axis=1)\n', (11760, 11772), True, 'import numpy as np\n'), ((11785, 11803), 'numpy.mean', 'np.mean', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (11792, 11803), True, 'import numpy as np\n'), ((12094, 12124), 'numpy.testing.assert_allclose', 'assert_allclose', (['varnps', 'varnp'], {}), '(varnps, varnp)\n', (12109, 12124), False, 'from numpy.testing import assert_allclose\n'), ((12178, 12191), 'numpy.median', 'np.median', (['xd'], {}), '(xd)\n', (12187, 12191), True, 'import numpy as np\n'), ((12244, 12256), 'numpy.median', 'np.median', (['x'], {}), '(x)\n', (12253, 12256), True, 'import numpy as np\n'), ((12523, 12599), 'zarr.open', 'zarr.open', (['temp_store'], {'mode': '"""r"""', 'shape': 'x.shape', 'dtype': 'x.dtype', 'chunks': '(2, 5)'}), "(temp_store, mode='r', shape=x.shape, dtype=x.dtype, chunks=(2, 5))\n", (12532, 12599), False, 'import zarr\n'), ((12627, 12650), 'numpy.testing.assert_allclose', 'assert_allclose', (['arr', 'x'], {}), '(arr, x)\n', (12642, 12650), False, 'from numpy.testing import assert_allclose\n'), ((13106, 13213), 'zarr.open', 'zarr.open', (['temp_store'], {'mode': '"""r"""', 'shape': '(x.shape[0] * ncopies, x.shape[1])', 'dtype': 'x.dtype', 'chunks': '(1, 5)'}), "(temp_store, mode='r', shape=(x.shape[0] * ncopies, x.shape[1]),\n dtype=x.dtype, chunks=(1, 5))\n", (13115, 13213), False, 'import zarr\n'), ((13320, 13345), 'numpy.vstack', 'np.vstack', (['((x,) * ncopies)'], {}), '((x,) * ncopies)\n', (13329, 13345), True, 'import numpy as np\n'), ((13354, 13385), 'numpy.testing.assert_allclose', 'assert_allclose', (['arr', 'x_ncopies'], {}), '(arr, x_ncopies)\n', (13369, 13385), False, 'from numpy.testing import assert_allclose\n'), ((13437, 13458), 'numpy.sum', 'np.sum', (['zeros'], {'axis': '(0)'}), '(zeros, axis=0)\n', (13443, 13458), True, 'import numpy as np\n'), ((13471, 13496), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0])\n', (13479, 13496), True, 'import numpy as np\n'), ((13593, 13613), 'numpy.sum', 'np.sum', (['ones'], {'axis': '(0)'}), '(ones, axis=0)\n', (13599, 13613), True, 'import numpy as np\n'), ((13626, 13651), 'numpy.array', 'np.array', (['[3, 3, 3, 3, 3]'], {}), '([3, 3, 3, 3, 3])\n', (13634, 13651), True, 'import numpy as np\n'), ((13900, 13927), 'numpy.testing.assert_allclose', 'assert_allclose', (['xd1', '(x + 1)'], {}), '(xd1, x + 1)\n', (13915, 13927), False, 'from numpy.testing import assert_allclose\n'), ((13936, 13963), 'numpy.testing.assert_allclose', 'assert_allclose', (['xd2', '(x + 2)'], {}), '(xd2, x + 2)\n', (13951, 13963), False, 'from numpy.testing import assert_allclose\n'), ((6647, 6661), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (6657, 6661), True, 'import numpy as np\n'), ((6814, 6828), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (6824, 6828), True, 'import numpy as np\n'), ((7079, 7093), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (7089, 7093), True, 'import numpy as np\n'), ((7158, 7172), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (7168, 7172), True, 'import numpy as np\n'), ((7325, 7339), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (7335, 7339), True, 'import numpy as np\n'), ((7454, 7468), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (7464, 7468), True, 'import numpy as np\n'), ((7625, 7639), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (7635, 7639), True, 'import numpy as np\n'), ((7940, 7954), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (7950, 7954), True, 'import numpy as np\n'), ((8093, 8107), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (8103, 8107), True, 'import numpy as np\n'), ((8211, 8225), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (8221, 8225), True, 'import numpy as np\n'), ((8339, 8353), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (8349, 8353), True, 'import numpy as np\n'), ((8515, 8529), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (8525, 8529), True, 'import numpy as np\n'), ((8839, 8853), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (8849, 8853), True, 'import numpy as np\n'), ((9002, 9016), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (9012, 9016), True, 'import numpy as np\n'), ((9165, 9179), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (9175, 9179), True, 'import numpy as np\n'), ((9494, 9508), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (9504, 9508), True, 'import numpy as np\n'), ((9732, 9746), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (9742, 9746), True, 'import numpy as np\n'), ((9957, 9971), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (9967, 9971), True, 'import numpy as np\n'), ((10165, 10179), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (10175, 10179), True, 'import numpy as np\n'), ((10373, 10387), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (10383, 10387), True, 'import numpy as np\n'), ((10441, 10459), 'numpy.sum', 'np.sum', (['xd'], {'axis': '(1)'}), '(xd, axis=1)\n', (10447, 10459), True, 'import numpy as np\n'), ((10487, 10504), 'numpy.sum', 'np.sum', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (10493, 10504), True, 'import numpy as np\n'), ((10544, 10558), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (10554, 10558), True, 'import numpy as np\n'), ((10627, 10639), 'numpy.log1p', 'np.log1p', (['xd'], {}), '(xd)\n', (10635, 10639), True, 'import numpy as np\n'), ((10973, 10993), 'pytest.approx', 'pytest.approx', (['total'], {}), '(total)\n', (10986, 10993), False, 'import pytest\n'), ((11117, 11131), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (11127, 11131), True, 'import numpy as np\n'), ((11259, 11273), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (11269, 11273), True, 'import numpy as np\n'), ((11537, 11556), 'pytest.approx', 'pytest.approx', (['mean'], {}), '(mean)\n', (11550, 11556), False, 'import pytest\n'), ((11683, 11697), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (11693, 11697), True, 'import numpy as np\n'), ((11828, 11842), 'numpy.asarray', 'np.asarray', (['xd'], {}), '(xd)\n', (11838, 11842), True, 'import numpy as np\n'), ((12283, 12304), 'pytest.approx', 'pytest.approx', (['median'], {}), '(median)\n', (12296, 12304), False, 'import pytest\n'), ((13521, 13539), 'numpy.asarray', 'np.asarray', (['totals'], {}), '(totals)\n', (13531, 13539), True, 'import numpy as np\n'), ((13676, 13694), 'numpy.asarray', 'np.asarray', (['totals'], {}), '(totals)\n', (13686, 13694), True, 'import numpy as np\n'), ((3539, 3555), 'zarr.TempStore', 'zarr.TempStore', ([], {}), '()\n', (3553, 3555), False, 'import zarr\n'), ((11955, 11972), 'numpy.multiply', 'np.multiply', (['x', 'x'], {}), '(x, x)\n', (11966, 11972), True, 'import numpy as np\n'), ((1648, 1687), 'pyspark.sql.SparkSession.builder.master', 'SparkSession.builder.master', (['"""local[2]"""'], {}), "('local[2]')\n", (1675, 1687), False, 'from pyspark.sql import SparkSession\n'), ((3647, 3663), 'zarr.TempStore', 'zarr.TempStore', ([], {}), '()\n', (3661, 3663), False, 'import zarr\n'), ((3914, 3930), 'zarr.TempStore', 'zarr.TempStore', ([], {}), '()\n', (3928, 3930), False, 'import zarr\n'), ((2798, 2815), 'apache_beam.options.pipeline_options.PipelineOptions', 'PipelineOptions', ([], {}), '()\n', (2813, 2815), False, 'from apache_beam.options.pipeline_options import PipelineOptions\n'), ((2839, 2878), 'apache_beam.Pipeline', 'beam.Pipeline', ([], {'options': 'pipeline_options'}), '(options=pipeline_options)\n', (2852, 2878), True, 'import apache_beam as beam\n'), ((4123, 4139), 'zarr.TempStore', 'zarr.TempStore', ([], {}), '()\n', (4137, 4139), False, 'import zarr\n'), ((4253, 4269), 'zarr.TempStore', 'zarr.TempStore', ([], {}), '()\n', (4267, 4269), False, 'import zarr\n'), ((4457, 4474), 'apache_beam.options.pipeline_options.PipelineOptions', 'PipelineOptions', ([], {}), '()\n', (4472, 4474), False, 'from apache_beam.options.pipeline_options import PipelineOptions\n'), ((4498, 4537), 'apache_beam.Pipeline', 'beam.Pipeline', ([], {'options': 'pipeline_options'}), '(options=pipeline_options)\n', (4511, 4537), True, 'import apache_beam as beam\n'), ((3023, 3040), 'apache_beam.options.pipeline_options.PipelineOptions', 'PipelineOptions', ([], {}), '()\n', (3038, 3040), False, 'from apache_beam.options.pipeline_options import PipelineOptions\n'), ((3064, 3103), 'apache_beam.Pipeline', 'beam.Pipeline', ([], {'options': 'pipeline_options'}), '(options=pipeline_options)\n', (3077, 3103), True, 'import apache_beam as beam\n'), ((4363, 4379), 'zarr.TempStore', 'zarr.TempStore', ([], {}), '()\n', (4377, 4379), False, 'import zarr\n'), ((4700, 4717), 'apache_beam.options.pipeline_options.PipelineOptions', 'PipelineOptions', ([], {}), '()\n', (4715, 4717), False, 'from apache_beam.options.pipeline_options import PipelineOptions\n'), ((4741, 4780), 'apache_beam.Pipeline', 'beam.Pipeline', ([], {'options': 'pipeline_options'}), '(options=pipeline_options)\n', (4754, 4780), True, 'import apache_beam as beam\n'), ((4609, 4625), 'zarr.TempStore', 'zarr.TempStore', ([], {}), '()\n', (4623, 4625), False, 'import zarr\n'), ((4835, 4851), 'zarr.TempStore', 'zarr.TempStore', ([], {}), '()\n', (4849, 4851), False, 'import zarr\n'), ((5059, 5071), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5069, 5071), False, 'import uuid\n')] |
import random, re, numpy, operator
import macro_utils
from error_messages import error_messages
tokens = ["+", "-", "*", "/", "^", "(", ")"]
async def tokenize(request):
tokenized = []
temp_str = ""
for char in request:
if char == ' ':
if temp_str:
tokenized += [temp_str]
temp_str = ""
elif char not in tokens:
temp_str += char
else:
if temp_str:
tokenized += [temp_str]
temp_str = ""
tokenized += [char]
if temp_str:
tokenized += [temp_str]
temp_str = ""
return tokenized
async def error_message(error):
return f"{random.choice(error_messages)}! ({error})"
async def handle_dice(ctx, message_in):
repeat = [re.compile(r" *repeat *\( *(?P<command>.*) *, *(?P<count>[0-9]+) *\) *"),
re.compile(r" *rp(?P<count>[0-9]+) +(?P<command>.*) *")]
for target in repeat:
match = target.match(message_in)
if match:
break
if match:
dic = match.groupdict()
results = []
for i in range(int(dic['count'])):
results += [await handle_command(ctx, dic['command'])]
if ',' in dic['command']:
result = "\n\n".join(results)
else:
result = "\n".join(results)
return result
return await handle_command(ctx, message_in)
async def handle_command(ctx, message_in):
roll_requests = [i.strip() for i in message_in.split(',')]
roll_results = []
for roll_request in roll_requests:
roll_results += [await handle_request(ctx, roll_request)]
return "\n".join(roll_results)
async def handle_request(ctx, roll_request):
comment = re.compile(r"(?P<roll>.*?) *# *(?P<comment>.*)")
match = comment.match(roll_request)
comment = None
if match:
request = match.groupdict()['roll']
comment = match.groupdict()['comment']
print(comment)
else:
request = roll_request
calculate = await tokenize(request)
if ctx and ctx.guild:
matched = False
for i in range(len(calculate)):
print(f"checking if {calculate[i]} is a macro")
expanded = await macro_utils.expand_macro(ctx, calculate[i])
if expanded:
print("It is!")
print(expanded)
calculate[i] = f"({expanded})"
print(calculate[i])
matched = True
if matched:
print("We matched some stuff")
request = ''.join(calculate)
calculate = await tokenize(request)
print(calculate)
printable = calculate[:]
request = ''.join(printable)
for i in range(len(calculate)):
if calculate[i] in tokens:
continue
for roll_func in options:
rolls, result, error = await roll_func(calculate[i])
if rolls or error:
break
if not rolls and not error:
return await error_message(f"`{calculate[i]}` not recognized")
elif error:
return await error_message(error)
else:
calculate[i] = str(result)
printable[i] = rolls
for i in range(len(calculate)):
if calculate[i] == '^':
calculate[i] = '**'
resulting_calc = ' '.join(calculate)
print(resulting_calc)
i_rolls = ''.join(printable)
print(i_rolls)
try:
total = eval(resulting_calc)
except SyntaxError as e:
return await error_message(f"Invalid syntax")
except ZeroDivisionError as e:
return await error_message(f"Tried to divide by zero")
if comment:
return f"[{comment}] `{request}` = {i_rolls} = {total}"
return f"`{request}` = {i_rolls} = {total}"
def get_operator(op_string):
if op_string == "=":
return operator.eq
if op_string == "<":
return operator.lt
if op_string == ">":
return operator.gt
if op_string == "<=":
return operator.le
if op_string == ">=":
return operator.ge
return None
async def check_count_dice(message, roll_string):
dice_roll = re.compile(rf"^{roll_string}{count_suffix}$")
match = dice_roll.match(message)
if match:
print(f"Succeeds if die {match.groupdict()['operator']} {match.groupdict()['comp_num']}")
if match.groupdict()['failure_vals']:
print(f"Fails if die matches {match.groupdict()['failure_vals'].split('f')[1:]}")
if match.groupdict()['failure_cond']:
print(f"Fails if die {match.groupdict()['failure_cond']} {match.groupdict()['failure_cond_val']}")
print("Matches count formula")
return True, match
return None, None
async def count_the_dice(message, roll_string, rolls):
dice_roll = re.compile(rf"^{roll_string}{count_suffix}$")
match = dice_roll.match(message)
if match:
print(f"Succeeds if die {match.groupdict()['operator']} {match.groupdict()['comp_num']}")
info = match.groupdict()
roll_strings = [str(i) for i in rolls]
success_op = get_operator(info['operator'])
s_cond_val = int(info['comp_num'])
successes = 0
if info['twice_vals']:
t_vals = [int(i) for i in info['twice_vals'].split('t')[1:]]
for i, die in enumerate(rolls):
if success_op(die, s_cond_val):
successes += 1
if die in t_vals:
successes += 1
roll_strings[i] = f"**{roll_strings[i]}**"
elif info['twice_cond']:
twice_op = get_operator(info['twice_cond'])
t_cond = int(info['twice_cond_val'])
for i, die in enumerate(rolls):
if success_op(die, s_cond_val):
successes += 1
if twice_op(die, t_cond):
successes += 1
roll_strings[i] = f"**{roll_strings[i]}**"
else:
for i, die in enumerate(rolls):
if success_op(die, s_cond_val):
successes += 1
roll_strings[i] = f"**{roll_strings[i]}**"
if match.groupdict()['failure_vals']:
f_vals = [int(i) for i in info['failure_vals'].split('f')[1:]]
failures = 0
for i, die in enumerate(rolls):
if die in f_vals:
failures += 1
roll_strings[i] = f"~~{roll_strings[i]}~~"
return f"({' '.join(roll_strings)}, {successes} successes, {failures} failures)", successes - failures, None
elif match.groupdict()['failure_cond']:
failure_op = get_operator(info['failure_cond'])
f_cond_val = int(info['failure_cond_val'])
failures = 0
for i, die in enumerate(rolls):
if failure_op(die, f_cond_val):
failures += 1
roll_strings[i] = f"~~{roll_strings[i]}~~"
print(f"Fails if die {match.groupdict()['failure_cond']} {match.groupdict()['failure_cond_val']}")
return f"({' '.join(roll_strings)}, {successes} successes, {failures} failures)", successes - failures, None
else:
return f"({' '.join(roll_strings)}, {successes} successes)", successes, None
print("Massive Error")
return None, None, "Idk what happened, but something bad"
async def basic_roll(message):
roll_string = r"(?P<num>[0-9]+)d(?P<dice>[0-9]+)"
dice_roll = re.compile(rf"^{roll_string}$")
match = dice_roll.match(message)
count = False
if not match:
count, match = await check_count_dice(message, roll_string)
if not count:
return None, None, None
command = match.groupdict()
rolls = []
max_dice = int(command['dice'])
if max_dice == 0:
return None, None, "Can't roll a d0"
for i in range(int(command['num'])):
rolls += [random.randint(1, max_dice)]
if count:
return await count_the_dice(message, roll_string, rolls)
return f"({'+'.join(str(i) for i in rolls)})", sum(rolls), None
async def drop_dice(message):
roll_string = r"^(?P<num>[0-9]+)d(?P<dice>[0-9]+)(?P<drop>k|kl|kh)(?P<keep_num>[0-9]+)$"
dice_roll = re.compile(rf"^{roll_string}$")
match = dice_roll.match(message)
if not match:
return None, None, None
command = match.groupdict()
rolls = []
max_dice = int(command['dice'])
if max_dice == 0:
return None, None, "Can't roll a d0"
for i in range(int(command['num'])):
rolls += [random.randint(1, max_dice)]
keep_num = int(command['keep_num'])
if keep_num > int(command['num']):
keep_num = command['num']
if keep_num < 0:
keep_num = 0
if command['drop'] == 'kl':
mask_rolls = [f'~~{i}~~' for i in rolls]
cap = int(command['dice']) + 1
for i in range(keep_num):
m = numpy.argmin(rolls)
mask_rolls[m] = rolls[m]
rolls[m] = cap
rolls = mask_rolls
return f"({'+'.join(str(i) for i in rolls)})", sum([die for die in rolls if type(die)==int]), None
else:
mask_rolls = [f'~~{i}~~' for i in rolls]
for i in range(keep_num):
m = numpy.argmax(rolls)
mask_rolls[m] = rolls[m]
rolls[m] = 0
rolls = mask_rolls
return f"({'+'.join(str(i) for i in rolls)})", sum([die for die in rolls if type(die)==int]), None
async def explode_dice(message):
roll_string = r"(?P<num>[0-9]+)d(?P<dice>[0-9]+)(!|(?P<explode_vals>(e[0-9]+)+)|e(?P<explode_cond>(<|>|<=|>=))(?P<cond_val>[0-9]+))"
dice_roll = re.compile(rf"^{roll_string}$")
match = dice_roll.match(message)
count = False
if not match:
count, match = await check_count_dice(message, roll_string)
if not count:
return None, None, None
command = match.groupdict()
rolls = []
max_dice = int(command['dice'])
if max_dice == 0:
return None, None, "Can't roll a d0"
if max_dice == 1:
return None, None, "Can't explode a d1"
dice_to_roll = int(command['num'])
if command['explode_vals']:
# Targeted explode
explode_vals = list(set([int(i) for i in command['explode_vals'].split('e')[1:]]))
if len(explode_vals) == max_dice:
return None, None, "Can't explode on every value"
while dice_to_roll > 0:
roll = random.randint(1, max_dice)
rolls += [roll]
if not roll in explode_vals:
dice_to_roll -= 1
elif command['explode_cond']:
# Conditional explode
explode_cond = get_operator(command['explode_cond'])
explode_cond_val = int(command['cond_val'])
passes = False
for i in range(max_dice+1):
if not explode_cond(i, explode_cond_val):
passes = True
break
if not passes:
return False, False, "Can't explode on every value"
while dice_to_roll > 0:
roll = random.randint(1, max_dice)
rolls += [roll]
if not explode_cond(roll, explode_cond_val):
dice_to_roll -= 1
else:
# Base explode
print("explode")
dice_to_roll = int(command['num'])
while dice_to_roll > 0:
roll = random.randint(1, max_dice)
rolls += [roll]
if not roll == max_dice:
dice_to_roll -= 1
if count:
return await count_the_dice(message, roll_string, rolls)
return f"({'+'.join(str(i) for i in rolls)})", sum(rolls), None
async def reroll_dice(message):
roll_string = r"(?P<num>[0-9]+)d(?P<dice>[0-9]+)((?P<reroll_vals>(r[0-9]+)+)|r(?P<reroll_cond>(<|>|<=|>=))(?P<cond_val>[0-9]+))"
dice_roll = re.compile(rf"^{roll_string}$")
match = dice_roll.match(message)
# count = False
if not match:
# count, match = await check_count_dice(message, roll_string)
# if not count:
return None, None, None
print("matched reroll")
command = match.groupdict()
rolls = []
roll_strings = []
max_dice = int(command['dice'])
if max_dice == 0:
return None, None, "Can't roll a d0"
if command['reroll_vals']:
print("targeted reroll")
reroll_vals = list(set([int(i) for i in command['reroll_vals'].split('r')[1:]]))
print("reroll_vals", reroll_vals)
for i in range(int(command['num'])):
roll = random.randint(1, max_dice)
rolls += [roll]
roll_strings += [str(roll)]
print("rolls", rolls)
new_rolls = []
new_roll_strings = []
for i, roll in enumerate(rolls):
if roll in reroll_vals:
new_roll = random.randint(1, max_dice)
rolls[i] = new_roll
roll_strings[i] = f"~~{roll_strings[i]}~~"
new_roll_strings += [str(new_roll)]
roll_strings += new_roll_strings
print("rolls", rolls)
print("rollstrings", roll_strings)
if command['reroll_cond']:
print("conditional reroll")
reroll_cond = get_operator(command['reroll_cond'])
reroll_cond_val = int(command['cond_val'])
for i in range(int(command['num'])):
roll = random.randint(1, max_dice)
rolls += [roll]
roll_strings += [str(roll)]
print("rolls", rolls)
new_rolls = []
new_roll_strings = []
for i, roll in enumerate(rolls):
if reroll_cond(roll, reroll_cond_val):
new_roll = random.randint(1, max_dice)
rolls[i] = new_roll
roll_strings[i] = f"~~{roll_strings[i]}~~"
new_roll_strings += [str(new_roll)]
roll_strings += new_roll_strings
print("rolls", rolls)
# if count:
# return await count_the_dice(message, roll_string, rolls)
return f"({'+'.join(str(i) for i in roll_strings)})", sum(rolls), None
async def fate_roll(message):
roll_string = r"(?P<num>[0-9]+)dF"
dice_roll = re.compile(rf"^{roll_string}$")
match = dice_roll.match(message)
if not match:
return None, None, None
symbols = ['-', 'b', '+']
command = match.groupdict()
rolls = []
total = 0
for i in range(int(command['num'])):
r = random.randint(-1, 1)
total += r
rolls += [symbols[r+1]]
return f"({''.join(str(i) for i in rolls)})", total, None
async def just_an_int(message):
roll_string = r"-?[0-9]+"
dice_roll = re.compile(rf"^{roll_string}$")
match = dice_roll.match(message)
if not match:
return None, None, None
return message, int(message), None
async def just_a_float(message):
roll_string = r"-?[0-9]+.[0-9]+"
dice_roll = re.compile(rf"^{roll_string}$")
match = dice_roll.match(message)
if not match:
return None, None, None
return message, float(message), None
options = [
basic_roll,
drop_dice,
explode_dice,
reroll_dice,
fate_roll,
just_an_int,
just_a_float
]
count_suffix = r"(?P<operator>>|<|>=|<=|=)(?P<comp_num>[0-9]+)((?P<twice_vals>(t[0-9]+)+)|t(?P<twice_cond><|>|<=|>=)(?P<twice_cond_val>[0-9]+))?((?P<failure_vals>(f[0-9]+)+)|f(?P<failure_cond><|>|<=|>=)(?P<failure_cond_val>[0-9]+))?"
| [
"macro_utils.expand_macro",
"random.randint",
"numpy.argmax",
"random.choice",
"numpy.argmin",
"re.compile"
] | [((1742, 1789), 're.compile', 're.compile', (['"""(?P<roll>.*?) *# *(?P<comment>.*)"""'], {}), "('(?P<roll>.*?) *# *(?P<comment>.*)')\n", (1752, 1789), False, 'import random, re, numpy, operator\n'), ((4178, 4222), 're.compile', 're.compile', (['f"""^{roll_string}{count_suffix}$"""'], {}), "(f'^{roll_string}{count_suffix}$')\n", (4188, 4222), False, 'import random, re, numpy, operator\n'), ((4830, 4874), 're.compile', 're.compile', (['f"""^{roll_string}{count_suffix}$"""'], {}), "(f'^{roll_string}{count_suffix}$')\n", (4840, 4874), False, 'import random, re, numpy, operator\n'), ((7569, 7599), 're.compile', 're.compile', (['f"""^{roll_string}$"""'], {}), "(f'^{roll_string}$')\n", (7579, 7599), False, 'import random, re, numpy, operator\n'), ((8325, 8355), 're.compile', 're.compile', (['f"""^{roll_string}$"""'], {}), "(f'^{roll_string}$')\n", (8335, 8355), False, 'import random, re, numpy, operator\n'), ((9737, 9767), 're.compile', 're.compile', (['f"""^{roll_string}$"""'], {}), "(f'^{roll_string}$')\n", (9747, 9767), False, 'import random, re, numpy, operator\n'), ((11909, 11939), 're.compile', 're.compile', (['f"""^{roll_string}$"""'], {}), "(f'^{roll_string}$')\n", (11919, 11939), False, 'import random, re, numpy, operator\n'), ((14222, 14252), 're.compile', 're.compile', (['f"""^{roll_string}$"""'], {}), "(f'^{roll_string}$')\n", (14232, 14252), False, 'import random, re, numpy, operator\n'), ((14699, 14729), 're.compile', 're.compile', (['f"""^{roll_string}$"""'], {}), "(f'^{roll_string}$')\n", (14709, 14729), False, 'import random, re, numpy, operator\n'), ((14944, 14974), 're.compile', 're.compile', (['f"""^{roll_string}$"""'], {}), "(f'^{roll_string}$')\n", (14954, 14974), False, 'import random, re, numpy, operator\n'), ((793, 866), 're.compile', 're.compile', (['""" *repeat *\\\\( *(?P<command>.*) *, *(?P<count>[0-9]+) *\\\\) *"""'], {}), "(' *repeat *\\\\( *(?P<command>.*) *, *(?P<count>[0-9]+) *\\\\) *')\n", (803, 866), False, 'import random, re, numpy, operator\n'), ((875, 929), 're.compile', 're.compile', (['""" *rp(?P<count>[0-9]+) +(?P<command>.*) *"""'], {}), "(' *rp(?P<count>[0-9]+) +(?P<command>.*) *')\n", (885, 929), False, 'import random, re, numpy, operator\n'), ((14485, 14506), 'random.randint', 'random.randint', (['(-1)', '(1)'], {}), '(-1, 1)\n', (14499, 14506), False, 'import random, re, numpy, operator\n'), ((695, 724), 'random.choice', 'random.choice', (['error_messages'], {}), '(error_messages)\n', (708, 724), False, 'import random, re, numpy, operator\n'), ((8009, 8036), 'random.randint', 'random.randint', (['(1)', 'max_dice'], {}), '(1, max_dice)\n', (8023, 8036), False, 'import random, re, numpy, operator\n'), ((8653, 8680), 'random.randint', 'random.randint', (['(1)', 'max_dice'], {}), '(1, max_dice)\n', (8667, 8680), False, 'import random, re, numpy, operator\n'), ((9007, 9026), 'numpy.argmin', 'numpy.argmin', (['rolls'], {}), '(rolls)\n', (9019, 9026), False, 'import random, re, numpy, operator\n'), ((9334, 9353), 'numpy.argmax', 'numpy.argmax', (['rolls'], {}), '(rolls)\n', (9346, 9353), False, 'import random, re, numpy, operator\n'), ((10534, 10561), 'random.randint', 'random.randint', (['(1)', 'max_dice'], {}), '(1, max_dice)\n', (10548, 10561), False, 'import random, re, numpy, operator\n'), ((12612, 12639), 'random.randint', 'random.randint', (['(1)', 'max_dice'], {}), '(1, max_dice)\n', (12626, 12639), False, 'import random, re, numpy, operator\n'), ((13430, 13457), 'random.randint', 'random.randint', (['(1)', 'max_dice'], {}), '(1, max_dice)\n', (13444, 13457), False, 'import random, re, numpy, operator\n'), ((2239, 2282), 'macro_utils.expand_macro', 'macro_utils.expand_macro', (['ctx', 'calculate[i]'], {}), '(ctx, calculate[i])\n', (2263, 2282), False, 'import macro_utils\n'), ((11145, 11172), 'random.randint', 'random.randint', (['(1)', 'max_dice'], {}), '(1, max_dice)\n', (11159, 11172), False, 'import random, re, numpy, operator\n'), ((11444, 11471), 'random.randint', 'random.randint', (['(1)', 'max_dice'], {}), '(1, max_dice)\n', (11458, 11471), False, 'import random, re, numpy, operator\n'), ((12897, 12924), 'random.randint', 'random.randint', (['(1)', 'max_dice'], {}), '(1, max_dice)\n', (12911, 12924), False, 'import random, re, numpy, operator\n'), ((13730, 13757), 'random.randint', 'random.randint', (['(1)', 'max_dice'], {}), '(1, max_dice)\n', (13744, 13757), False, 'import random, re, numpy, operator\n')] |
from collections import deque, namedtuple
import operator
import datetime
import numpy as np
import pandas as pa
import inspect
import types
import sys
import cython
from .nodes import (
MDFNode,
MDFEvalNode,
MDFIterator,
MDFIteratorFactory,
MDFCallable,
_isgeneratorfunction,
_is_member_of,
_get_func_name,
now,
)
from .context import MDFContext, _get_current_context
from .ctx_pickle import _unpickle_custom_node, _pickle_custom_node
from .parser import get_assigned_node_name
from .common import DIRTY_FLAGS
_python_version = cython.declare(int, sys.version_info[0])
@cython.cfunc
def _dict_iteritems(d):
if _python_version > 2:
return iter(d.items())
return d.iteritems()
# strings are used as dict lookups using nans and np.float64 can be problematic
_special_floats = cython.declare(dict, {
str(np.nan): "1.#QNAN",
str(np.inf): "1.#INF",
str(-np.inf): "-1.#INF"
})
class MDFCustomNodeIteratorFactory(MDFIteratorFactory):
def __init__(self, custom_node):
self.custom_node = custom_node
@property
def func(self):
return self.custom_node._custom_iterator_func
@property
def func_doc(self):
func_doc = getattr(self.func, "func_doc", None)
if func_doc is None:
func_doc = getattr(self.func, "__doc__", None)
return func_doc
@property
def node_type_func(self):
return self.custom_node._custom_iterator_node_type_func
def __call__(self):
return MDFCustomNodeIterator(self.custom_node)
class MDFCustomNodeIterator(MDFIterator):
def __init__(self, custom_node):
self.custom_node = custom_node
self.func = custom_node._custom_iterator_func
self.node_type_func = self.custom_node._custom_iterator_node_type_func
self.value_generator = None
self.is_generator = _isgeneratorfunction(self.func)
self.node_type_is_generator = _isgeneratorfunction(self.node_type_func)
self.node_type_generator = None
def __iter__(self):
return self
def next(self):
if self.custom_node._call_with_no_value:
value = None
else:
if self.is_generator:
if not self.value_generator:
self.value_generator = self.func()
value = next(self.value_generator)
else:
value = self.func()
if self.node_type_is_generator:
if not self.node_type_generator:
# create the new node type generator and return
kwargs = self.custom_node._get_kwargs()
self.node_type_generator = self.node_type_func(value, **kwargs)
return next(self.node_type_generator)
return self.node_type_generator.send(value)
# node type is plain function
kwargs = self.custom_node._get_kwargs()
return self.custom_node._custom_iterator_node_type_func(value, **kwargs)
class MDFCustomNode(MDFEvalNode):
"""
subclass of MDFEvalNode that forms the base for all over custom
node types.
"""
# override this in a subclass if any kwargs should be passed as nodes
# instead of being evaluated
nodetype_node_kwargs = set()
# override this is the function being wrapped by the subclass can't
# be inspected (eg is a cython function) and it takes some keyword
# arguments.
nodetype_kwargs = None
# if set to True on the subclass the first parameter passed to the node
# type function will always be none and the underlying node won't be
# evaluated.
call_with_no_value = False
def __init__(self,
func,
node_type_func=None,
name=None,
short_name=None,
fqname=None,
cls=None,
category=None,
filter=None,
base_node=None, # set if created via MDFCustomNodeMethod
base_node_method_name=None,
nodetype_func_kwargs={}):
if isinstance(func, MDFCustomNodeIteratorFactory):
node_type_func = func.node_type_func
func = func.func
self._base_node = base_node
self._base_node_method_name = base_node_method_name
self._node_type_func = node_type_func
self._cn_func = self._validate_func(func)
self._category = category
self._kwargs = dict(nodetype_func_kwargs)
self._kwnodes = dict([(k, v) for (k, v) in _dict_iteritems(nodetype_func_kwargs) if isinstance(v, MDFNode)])
self._kwfuncs = {} # reserved for functions added via decorators
# if 'filter_node_value' is in the node type generator args we pass in the value of the filter
kwargs = self._get_nodetype_func_kwargs(False)
self._call_with_filter_node = "filter_node" in kwargs
self._call_with_filter = "filter_node_value" in kwargs
self._call_with_self = "owner_node" in kwargs
self._call_with_no_value = self.call_with_no_value
eval_func = self._cn_eval_func
if _isgeneratorfunction(node_type_func) or _isgeneratorfunction(func):
eval_func = MDFCustomNodeIteratorFactory(self)
MDFEvalNode.__init__(self,
eval_func,
name=name or self._get_func_name(func),
short_name=short_name,
fqname=fqname,
cls=cls,
category=category,
filter=filter)
# set func_doc from the inner function's docstring
self.func_doc = getattr(func, "func_doc", None)
if self.func_doc is None:
self.func_doc = getattr(func, "__doc__", None)
def __reduce__(self):
"""support for pickling"""
kwargs = dict(self._kwargs)
# add filter and category to the kwargs
filter = self.get_filter()
if filter is not None:
kwargs["filter"] = filter
if self._category is not None:
kwargs["category"] = self._category
return (
_unpickle_custom_node,
_pickle_custom_node(self, self._base_node, self._base_node_method_name, kwargs),
None,
None,
None,
)
def _get_nodetype_func_kwargs(self, remove_special=True):
"""return a list of named arguments for the node type function"""
kwargs = self.nodetype_kwargs
if kwargs is None:
# try and get them from the func/iterator object
node_type_func = self._node_type_func
argspec = None
try:
kwargs = node_type_func._init_kwargs_
except AttributeError:
init_kwargs = None
if kwargs is None:
# try and get them from the func/iterator object
if isinstance(node_type_func, types.TypeType):
node_type_func = node_type_func.__init__
try:
argspec = inspect.getargspec(node_type_func).args
except TypeError:
return []
kwargs = argspec[1:]
# remove 'special' kwargs
if remove_special:
for special in ("filter_node", "filter_node_value", "owner_node"):
if special in kwargs:
kwargs = list(kwargs)
kwargs.remove(special)
return kwargs
def __getattr__(self, attr):
# give the superclass a go first
try:
# super doesn't work well with cython
return MDFEvalNode.__getattr__(self, attr)
except AttributeError:
pass
# return a decorator function for setting kwargs for the inner function
# if attr is in the argspec for the node function
if attr.startswith("_") or self._node_type_func is None:
raise AttributeError(attr)
kwargs = self._get_nodetype_func_kwargs()
if attr not in kwargs:
raise AttributeError(attr)
def _make_decorator(attr):
# the decorator takes either a value, function or node
# do you can do things like:
#
# @mynodetype
# def func():
# ...
#
# @func.some_kwarg
# def kwarg_value():
# ...
#
def _decorator(func):
self._kwargs[attr] = func
if isinstance(func, MDFNode):
self._kwnodes[attr] = func
elif isinstance(func, types.FunctionType):
self._kwfuncs[attr] = func
return func
return _decorator
return _make_decorator(attr)
@property
def node_type(self):
"""returns the name of the node type of this node"""
try:
return _get_func_name(self._node_type_func)
except AttributeError:
return "customnode"
@property
def func(self):
return self._cn_func
@property
def base_node(self):
"""node this custom node was derived from if created via a method call."""
return self._base_node
#
# Properties for use with MDFCustomNodeIterator
#
# The iterator uses these instead of being constructed with them
# as it needs to be pickleable, and so by keeping a reference
# to the node that can be unpickled more easily than a function.
#
@property
def _custom_iterator_node_type_func(self):
return self._node_type_func
@property
def _custom_iterator_func(self):
return self._cn_func
def _set_func(self, func):
# set the underlying MDFEvalNode func
if _isgeneratorfunction(func) or _isgeneratorfunction(self._node_type_func):
MDFEvalNode._set_func(self, MDFCustomNodeIteratorFactory(self))
else:
MDFEvalNode._set_func(self, self._cn_eval_func)
# update the docstring
self.func_doc = getattr(func, "func_doc", None)
if self.func_doc is None:
self.func_doc = getattr(func, "__doc__", None)
# set the func used by this class
self._cn_func = func
def _bind(self, other_evalnode, owner):
other = cython.declare(MDFCustomNode)
other = other_evalnode
MDFEvalNode._bind(self, other, owner)
self._node_type_func = other._node_type_func
func = self._bind_function(other._cn_func, owner)
self._set_func(func)
# copy the kwargs
self._kwargs = dict(other._kwargs)
# bind the eval nodes (won't do anything if they're already bound)
self._kwnodes = {}
for k, node in _dict_iteritems(other._kwnodes):
if isinstance(node, MDFEvalNode) and _is_member_of(owner, node):
self._kwnodes[k] = node.__get__(None, owner)
else:
self._kwnodes[k] = node
# bind the functions in case they're classmethods
self._kwfuncs = {}
for k, func in _dict_iteritems(other._kwfuncs):
if _is_member_of(owner, func):
self._kwfuncs[k] = self._bind_function(func, owner)
# set the docstring for the bound node to the same as the unbound one
self.func_doc = other.func_doc
def _get_kwargs(self):
kwargs = cython.declare(dict)
kwargs = self._kwargs
if self._kwnodes or self._kwfuncs:
kwargs = dict(kwargs)
node = cython.declare(MDFNode)
for key, node in _dict_iteritems(self._kwnodes):
if key not in self.nodetype_node_kwargs:
kwargs[key] = node()
else:
kwargs[key] = node
for key, value in _dict_iteritems(self._kwfuncs):
kwargs[key] = value()
# if the filter value should be passed in as a kwarg
# add it in now (defaulting to True)
if self._call_with_filter:
filter_node = self.get_filter()
filter_node_value = True
if filter_node is not None:
filter_node_value = filter_node()
kwargs["filter_node_value"] = filter_node_value
if self._call_with_filter_node:
kwargs["filter_node"] = self.get_filter()
if self._call_with_self:
kwargs["owner_node"] = self
return kwargs
def _cn_eval_func(self):
# get the inner node value
if self._call_with_no_value:
value = None
else:
value = self._cn_func()
# and call the node type function
kwargs = cython.declare(dict)
kwargs = self._get_kwargs()
return self._node_type_func(value, **kwargs)
class MDFCustomNodeMethod(object):
"""
Callable object that is added to MDFNode's set of attributes
to work as an additional method for calling node types
directly from a node rather than explicitly creating
derived nodes.
eg: instead of:
@delaynode(periods=10)
def my_delayed_node():
return my_node()
it is possible to simply call:
my_node.delay(periods=10)
"""
# this class behaves like a method, but types.MethodType isn't subclass-able
def __init__(self,
node_type_func,
node_cls,
method_name,
node=None,
call=True): # if call is True __call__ will call the derived node
self._node_type_func = node_type_func
self._node_cls = node_cls
self._method_name = method_name
self._node = node
self._call = call
self._derived_nodes = node._derived_nodes if node else {}
def __get__(self, instance, cls=None):
if instance and self._node is instance:
return self
return MDFCustomNodeMethod(self._node_type_func,
self._node_cls,
self._method_name,
node=instance,
call=self._call)
def __repr__(self):
# try to get the args directly from the iterator (if it is one)
args = self._node_cls.nodetype_kwargs
if args is None:
try:
args = self._node_type_func._init_kwargs_
except AttributeError:
args = None
if args is None:
# otherwise try and use inspect to get the args, but this won't
# work for cythoned functions
try:
if isinstance(self._node_type_func, types.TypeType):
args = inspect.getargspec(self._node_type_func.__init__).args[2:]
else:
args = inspect.getargspec(self._node_type_func).args[1:]
except TypeError:
args = ["..."]
args = ", ".join(args)
if self._node:
return "<MDFCustomNodeMethod %s.%s(%s)>" % (self._node.name,
self._method_name,
args)
return "<unbound MDFCustomNodeMethod %s(%s)>" % (self._method_name, args)
def __call__(self,
name=None,
short_name=None,
filter=None,
category=None,
**kwargs):
# get the derived node and call it
derived_node = self._get_derived_node(name=name,
short_name=short_name,
filter=filter,
category=category,
nodetype_func_kwargs=kwargs)
if self._call:
return derived_node()
return derived_node
def _get_derived_node(self,
name=None,
short_name=None,
filter=None,
category=None,
nodetype_func_kwargs={}):
"""
return a new or cached node made from the base node with
the node type func applied
"""
# find the derived node for these arguments
derived_node_key = cython.declare(tuple)
derived_node = cython.declare(MDFEvalNode)
# replace any special floats with string versions so they compare correctly
# when looking for an existing node
kwargs_in_key = []
for key, value in _dict_iteritems(nodetype_func_kwargs):
if value != value:
try:
value = _special_floats[str(value)]
except KeyError:
value = "1.#%s" % value
kwargs_in_key.append((key, value))
derived_node_key = (self._node_type_func,
self._node_cls,
filter,
category,
frozenset(kwargs_in_key))
try:
derived_node = self._derived_nodes[derived_node_key]
except KeyError:
# use name and short_name if present. If name is present but short_name
# isn't, use name for short_name too.
if short_name is None and name is not None:
short_name = name
# get all kwargs for the node func and the others passed to this func
# to build the node name
if name is None:
kwargs = dict(nodetype_func_kwargs)
if filter is not None:
kwargs["filter"] = filter
kwarg_strs = [None] * len(kwargs)
short_kwarg_strs = [None] * len(kwargs)
for i, (k, v) in enumerate(sorted(kwargs.items())):
vs = v
if isinstance(v, MDFNode):
vs = v.short_name
v = v.name
kwarg_strs[i] = "%s=%s" % (k, v)
short_kwarg_strs[i] = "%s=%s" % (k, vs)
args = ", ".join(kwarg_strs)
name = "%s.%s(%s)" % (self._node.name, self._method_name, args)
if short_name is None:
short_args = ", ".join(short_kwarg_strs)
short_name = "%s.%s(%s)" % (self._node.short_name,
self._method_name,
short_args)
derived_node = self._node_cls(self._node.__call__,
self._node_type_func,
name=name,
short_name=short_name,
fqname=name,
category=category,
base_node=self._node,
base_node_method_name=self._method_name,
filter=filter,
nodetype_func_kwargs=nodetype_func_kwargs)
# update the docstring
derived_node.func_doc = "\n".join(("*Derived Node* ::", "",
" " + short_name, "",
derived_node.func_doc or "")).strip()
self._derived_nodes[derived_node_key] = derived_node
return derived_node
class MDFCustomNodeDecorator(object):
"""
decorator that applies a custom node type to a function to
create a node.
"""
def __init__(self,
node_type_func,
node_type_cls,
name=None,
short_name=None,
filter=None,
category=None,
kwargs={}):
"""
functor type object that can be used as a decorator to create an
instance of 'node_type_cls' with 'node_type_func'
"""
self.func = node_type_func
self.__node_type_cls = node_type_cls
self.__filter = filter
self.__name = name
self.__short_name = short_name
self._category = category
self._kwargs = dict(kwargs)
# set the docs for this object to the same as the underlying function
if hasattr(node_type_func, "__doc__"):
self.__doc__ = node_type_func.__doc__
def __call__(self,
_func=None,
name=None,
short_name=None,
filter=None,
category=None,
**kwargs):
"""
If func is None return a copy of self with category, filter
and kwargs bound to what's passed in.
Otherwise if func is not None decorate func with the node type.
"""
filter = filter or self.__filter
category = category or self._category
kwargs = kwargs or self._kwargs
if _func is None:
return MDFCustomNodeDecorator(self.func,
self.__node_type_cls,
name,
short_name,
filter,
category,
kwargs)
node = self.__node_type_cls(_func,
self.func,
name=name,
short_name=short_name,
category=category,
filter=filter,
nodetype_func_kwargs=kwargs)
return node
def nodetype(func=None, cls=MDFCustomNode, method=None):
"""
decorator for creating a custom node type::
#
# create a new node type 'new_node_type'
#
@nodetype
def new_node_type(value, fast, slow):
return (value + fast) * slow
#
# use the new type to create a node
#
@new_node_type(fast=1, slow=10)
def my_node():
return some_value
# ctx[my_node] returns new_node_type(value=my_node(), fast=1, slow=10)
The node type function takes the value of the decorated node
and any other keyword arguments that may be supplied when
the node is created.
The node type function may be a plain function, in which case
it is simply called for every evaluation of the node, or it
may be a co-routine in which case it is sent the new value
for each iteration::
@nodetype
def nansumnode(value):
accum = 0.
while True:
accum = np.nansum([value, accum])
value = yield accum
@nansumnode
def my_nansum_node():
return some_value
The kwargs passed to the node decorator may be values (as shown above)
or nodes which will be evaluated before the node type function is
called.
Nodes defined using the @nodetype decorator may be applied to
classmethods as well as functions and also support the standard
node kwargs 'filter' and 'category'.
Node types may also be used to add methods to the MDFNode class
(See :ref:`nodetype_method_syntax`)::
@nodetype(method="my_nodetype_method")
def my_nodetype(value, scale=1):
return value * scale
@evalnode
def x():
return ...
@my_nodetype(scale=10)
def y():
return x()
# can be re-written as:
y = x.my_nodetype_method(scale=10)
"""
if func is None:
return lambda func: nodetype(func, cls, method)
# set a new method on MDFNode if required
if method is not None:
# this method gets the node and evaluates it
method_func = MDFCustomNodeMethod(func, cls, method, call=True)
MDFNode._additional_attrs_[method] = method_func
# add another method to access the node
getnode_func = MDFCustomNodeMethod(func, cls, method, call=False)
MDFNode._additional_attrs_[method + "node"] = getnode_func
return MDFCustomNodeDecorator(func, cls)
class MDFQueueNode(MDFCustomNode):
pass
class _queuenode(MDFIterator):
"""
Decorator for creating an :py:class:`MDFNode` that accumulates
values in a `collections.deque` each time the context's date
is advanced.
The values that are accumulated are the results of the function
`func`. `func` is a node function and takes no arguments.
If `size` is specified the queue will grow to a maximum of that size
and then values will be dropped off the queue (FIFO).
`size` may either be a value or a callable (i.e. a function or a node)::
@queuenode(size=10)
def node():
return x
or::
# could be an evalnode also
queue_size = varnode("queue_size", 10)
@queunode(size=queue_size)
def node():
return x
or using the nodetype method syntax (see :ref:`nodetype_method_syntax`)::
@evalnode
def some_value():
return ...
@evalnode
def node():
return some_value.queue(size=5)
"""
_init_kwargs_ = ["filter_node_value", "size", "as_list"]
def __init__(self, value, filter_node_value, size=None, as_list=False):
if size is not None:
size = max(size, 1)
self.as_list = as_list
# create the queue used for the queue data
self.queue = deque([], size)
# only include the current value if the filter is
# True (or if there's no filter being applied)
if filter_node_value:
self.queue.append(value)
def next(self):
if self.as_list:
return list(self.queue)
return self.queue
def send(self, value):
self.queue.append(value)
if self.as_list:
return list(self.queue)
return self.queue
# decorators don't work on cythoned classes
queuenode = nodetype(_queuenode, cls=MDFQueueNode, method="queue")
class MDFDelayNode(MDFCustomNode):
"""
MDFDelayedNode is different from other MDFCustomNodes.
The value passed to the node function is actually the
value *from the previous day*.
This breaks the recursion for nodes that want to access
a delayed version of themself, eg::
@delaynode(periods=1, initial_value=0, lazy=True)
def delayed_foo():
return foo()
@evalnode
def foo():
return 1 + delayed_foo()
Here calling delayed_foo doesn't causes a recursive call to
foo since MDFDelayedNode doesn't call the function
immediately, it waits until the timestep is about to be
advanced.
"""
PerCtxData = namedtuple("PerCtxData", ["value", "generator", "date", "is_valid"])
def __init__(self,
func,
node_type_func=None,
name=None,
short_name=None,
fqname=None,
cls=None,
category=None,
filter=None,
nodetype_func_kwargs={},
**kwargs):
if isinstance(func, MDFCustomNodeIteratorFactory):
node_type_func = func.node_type_func
func = func.func
self._dn_func = func
self._dn_per_ctx_data = {}
self._dn_is_generator = _isgeneratorfunction(func)
self._dn_lazy = nodetype_func_kwargs.get("lazy", False)
#
# the node is initialized either just with the plain node function
# (if lazy is False), or with the _dn_get_prev_value method if
# lazy is true. The previous value is evaluated during
# MDFContext.set_date by the _on_set_date callback.
#
MDFCustomNode.__init__(self,
self._dn_get_prev_value if self._dn_lazy else func,
node_type_func,
name=name or self._get_func_name(func),
short_name=short_name,
fqname=fqname,
cls=cls,
category=category,
filter=filter,
nodetype_func_kwargs=nodetype_func_kwargs,
**kwargs)
@property
def func(self):
return self._dn_func
def clear_value(self, ctx):
try:
del self._dn_per_ctx_data[ctx._id]
except KeyError:
pass
MDFCustomNode.clear_value(self, ctx)
def clear(self, ctx):
self.clear_value(ctx)
MDFCustomNode.clear(self, ctx)
def _bind(self, other_node, owner):
other = cython.declare(MDFDelayNode)
other = other_node
MDFCustomNode._bind(self, other, owner)
self._dn_func = self._bind_function(other._dn_func, owner)
self._dn_is_generator = other._dn_is_generator
self._dn_lazy = other._dn_lazy
func = self._dn_get_prev_value if self._dn_lazy else self._dn_func
self._set_func(func)
# set the docstring for the bound node to the same as the unbound one
self.func_doc = other.func_doc
def _dn_get_prev_value(self):
# The value returned on date 'now' is the value for the previous day.
# This means that the value for now doesn't have to be calculated
# until just before now is advanced. This breaks the recursion
# of functions that want to call a node that is a delayed version
# of the same node.
ctx = _get_current_context()
alt_ctx = self.get_alt_context(ctx)
try:
data = self._dn_per_ctx_data[alt_ctx._id]
if data.is_valid:
return data.value
except KeyError:
pass
kwargs = self._get_kwargs()
return kwargs["initial_value"]
def on_set_date(self, ctx_, date):
"""called just before 'now' is advanced"""
ctx = cython.declare(MDFContext)
ctx = ctx_
# if not lazy there's nothing to do
if not self._dn_lazy:
return False
# grab the original alt ctx before it's modified by calling the node func
orig_alt_ctx = self.get_alt_context(ctx)
if date > ctx._now:
# if this node hasn't been valued before, clear any previous
# alt context set as the dependencies wouldn't have been set
# up when the alt ctx was determined
if ctx._id not in self._dn_per_ctx_data:
self._reset_alt_context(ctx)
# if there's a filter set don't update the previous value unless
# the filter returns True
filter = self.get_filter()
if filter is not None and not filter():
return False
# if there's already a value for this date then don't do anything
alt_ctx = self.get_alt_context(ctx)
alt_data = self._dn_per_ctx_data.get(alt_ctx._id)
if alt_data and alt_data.is_valid and alt_data.date == date:
return False
# get the current value of the node function/generator
generator = None
if self._dn_is_generator:
if alt_data:
generator = alt_data.generator
if not generator:
generator = self._dn_func()
value = next(generator)
else:
value = self._dn_func()
# get the alt context again as it could have changed because of new
# dependencies added when calling the node function/generator
alt_ctx = self.get_alt_context(ctx)
# store the generator and value in the alt_ctx, and put an invalid entry
# in the original context so this context doesn't get its alt ctx reset next time
self._dn_per_ctx_data[alt_ctx._id] = self.PerCtxData(value, generator, date, True)
if alt_ctx is not ctx:
self._dn_per_ctx_data[ctx._id] = self.PerCtxData(None, None, date, False)
elif date < ctx._now:
self.clear_value(ctx)
alt_ctx = self.get_alt_context(ctx)
if alt_ctx is not ctx:
self.clear_value(alt_ctx)
# return True to indicate the value of this node will change after the date has
# finished being changed.
return True
class _delaynode(MDFIterator):
"""
Decorator for creating an :py:class:`MDFNode` that delays
values for a number of periods corresponding to each time
the context's date is advanced.
The values that are delayed are the results of the function
`func`. `func` is a node function and takes no arguments.
``periods`` is the number of timesteps to delay the value by.
``initial_value`` is the value of the node to be used before
the specified number of periods have elapsed.
`periods`, `initial_value` and `filter` can either be values
or callable objects (e.g. a node or a function)::
@delaynode(periods=5)
def node():
return x
or::
# could be an evalnode also
periods = varnode("periods", 5)
@delaynode(periods=periods)
def node():
return x
If ``lazy`` is True the node value is calculated after any calling
nodes have returned. This allows nodes to call delayed version of
themselves without ending up in infinite recursion.
The default for ``lazy`` is False as in most cases it's not
necessary and can cause problems because the dependencies aren't
all discovered when the node is first evaluated.
e.g.::
@delaynode(periods=10)
def node():
return some_value
or using the nodetype method syntax (see :ref:`nodetype_method_syntax`)::
@evalnode
def some_value():
return ...
@evalnode
def node():
return some_value.delay(periods=5)
"""
_init_kwargs_ = ["filter_node_value", "periods", "initial_value", "lazy", "ffill"]
def __init__(self, value, filter_node_value, periods=1,
initial_value=None, lazy=False, ffill=False):
self.lazy = lazy
self.skip_nans = ffill
max_queue_size = 0
# if the initial value is a scalar but the value is a vector
# broadcast the initial value
if isinstance(initial_value, (int, float)):
if isinstance(value, pa.Series):
initial_value = pa.Series(initial_value, index=value.index, dtype=value.dtype)
elif isinstance(value, np.ndarray):
tmp = np.ndarray(value.shape, dtype=value.dtype)
tmp.fill(initial_value)
initial_value = tmp
if lazy:
# NOTE: when lazy the value is *already delayed by 1* (see MDFDelayNode)
assert periods is not None and periods > 0, "lazy delay nodes must have 'periods' set to > 0"
max_queue_size = periods
else:
# max size is periods+1 (if a value is delayed 0 periods the length of the queue must be 1)
assert periods is not None and periods >= 0, "delay nodes must have 'periods' set to >= 0"
max_queue_size = periods + 1
# create the queue and fill it with the initial value
self.queue = deque([initial_value] * max_queue_size, max_queue_size)
# send the current value if the filter value is True, or if the node
# is lazy. If it's lazy the filtering is done by the on_set_date callback
# since it needs to be filtered based on the previous filter value.
if filter_node_value or lazy:
self.send(value)
def next(self):
return self.queue[0]
def send(self, value):
skip = False
if self.skip_nans:
if isinstance(value, float):
if np.isnan(value):
skip = True
elif isinstance(value, np.ndarray):
if np.isnan(value).all():
skip = True
if not skip:
self.queue.append(value)
return self.queue[0]
# decorators don't work on cythoned classes
delaynode = nodetype(_delaynode, cls=MDFDelayNode, method="delay")
class MDFSampleNode(MDFCustomNode):
# always pass date_node as the node rather than evaluate it
nodetype_node_kwargs = ["date_node"]
class _samplenode(MDFIterator):
"""
samples value on the given date offset and yields that value
until the next date offset.
offset is a pandas.datetools.DateOffset instance,
eg pandas.datetools.BMonthEnd()
"""
_init_kwargs_ = ["filter_node_value", "offset", "date_node", "initial_value"]
def __init__(self, value, filter_node_value, offset, date_node=now, initial_value=None):
self._offset = offset
self._date_node = date_node
# if the initial value is a scalar but the value is a vector
# broadcast the initial value
if isinstance(initial_value, (int, float)):
if isinstance(value, pa.Series):
initial_value = pa.Series(initial_value, index=value.index, dtype=value.dtype)
elif isinstance(value, np.ndarray):
tmp = np.ndarray(value.shape, dtype=value.dtype)
tmp.fill(initial_value)
initial_value = tmp
self._sample = initial_value
if filter_node_value:
self.send(value)
def next(self):
return self._sample
def send(self, value):
date = self._date_node()
if date is not None and self._offset.onOffset(date):
self._sample = value
return self._sample
# decorators don't work on cythoned classes
samplenode = nodetype(_samplenode, cls=MDFSampleNode, method="sample")
class MDFNanSumNode(MDFCustomNode):
pass
class _nansumnode(MDFIterator):
"""
Decorator that creates an :py:class:`MDFNode` that maintains
the `nansum` of the result of `func`.
Each time the context's date is advanced the value of this
node is calculated as the nansum of the previous value
and the new value returned by `func`.
e.g.::
@nansumnode
def node():
return some_value
or using the nodetype method syntax (see :ref:`nodetype_method_syntax`)::
@evalnode
def some_value():
return ...
@evalnode
def node():
return some_value.nansum()
"""
_init_kwargs_ = ["filter_node_value"]
def __init__(self, value, filter_node_value):
self.is_float = False
if isinstance(value, pa.Series):
self.accum = pa.Series(np.nan, index=value.index, dtype=value.dtype)
elif isinstance(value, np.ndarray):
self.accum = np.ndarray(value.shape, dtype=value.dtype)
self.accum.fill(np.nan)
else:
self.is_float = True
self.accum_f = np.nan
if filter_node_value:
self.send(value)
def _send_vector(self, value):
mask = ~np.isnan(value)
# set an nans in the accumulator where the value is not
# NaN to zero
accum_mask = np.isnan(self.accum)
if accum_mask.any():
self.accum[accum_mask & mask] = 0.0
self.accum[mask] += value[mask]
return self.accum.copy()
def _send_float(self, value):
if value == value:
if self.accum_f != self.accum_f:
self.accum_f = 0.0
self.accum_f += value
return self.accum_f
def next(self):
if self.is_float:
return self.accum_f
return self.accum.copy()
def send(self, value):
if self.is_float:
return self._send_float(value)
return self._send_vector(value)
# decorators don't work on cythoned types
nansumnode = nodetype(cls=MDFNanSumNode, method="nansum")(_nansumnode)
class MDFCumulativeProductNode(MDFCustomNode):
pass
class _cumprodnode(MDFIterator):
"""
Decorator that creates an :py:class:`MDFNode` that maintains
the cumulative product of the result of `func`.
Each time the context's date is advanced the value of this
node is calculated as the previous value muliplied by
the new value returned by `func`.
e.g.::
@cumprodnode
def node():
return some_value
or using the nodetype method syntax (see :ref:`nodetype_method_syntax`)::
@evalnode
def some_value():
return ...
@evalnode
def node():
return some_value.cumprod()
TODO: That node needs a test for the argument skipna, since it is not entirely clear what it should do if the first value is na.
It would be nice to be able to specify an initial value.
"""
_init_kwargs_ = ["filter_node_value", "skipna"]
def __init__(self, value, filter_node_value, skipna=True):
self.is_float = False
self.skipna = skipna
if isinstance(value, pa.Series):
self.accum = pa.Series(np.nan, index=value.index, dtype=value.dtype)
self.nan_mask = np.isnan(self.accum)
elif isinstance(value, np.ndarray):
self.accum = np.ndarray(value.shape, dtype=value.dtype)
self.accum.fill(np.nan)
self.nan_mask = np.isnan(self.accum)
else:
self.is_float = True
self.accum_f = np.nan
self.nan_mask_f = True
if filter_node_value:
self.send(value)
def _send_vector(self, value):
# we keep track of a nan mask rather than re-evalute it each time
# because if accum became nan after starting we wouldn't want to
# start it from 1 again
if self.nan_mask.any():
self.nan_mask = self.nan_mask & np.isnan(value)
self.accum[~self.nan_mask & ~np.isnan(value)] = 1.0
if self.skipna:
mask = ~np.isnan(value)
self.accum[mask] *= value[mask]
else:
self.accum *= value
return self.accum.copy()
def _send_float(self, value):
if self.nan_mask_f:
if value == value:
self.nan_mask_f = False
self.accum_f = 1.0
if not self.skipna \
or value == value:
self.accum_f *= value
return self.accum_f
def next(self):
if self.is_float:
return self.accum_f
return self.accum.copy()
def send(self, value):
if self.is_float:
return self._send_float(value)
return self._send_vector(value)
# decorators don't work on cythoned types
cumprodnode = nodetype(cls=MDFCumulativeProductNode, method="cumprod")(_cumprodnode)
class MDFForwardFillNode(MDFCustomNode):
pass
class _ffillnode(MDFIterator):
"""
Decorator that creates an :py:class:`MDFNode` that returns
the current result of the decoratored function forward
filled from the previous value where the current value
is NaN.
The decorated function may return a float, pandas Series
or numpy array.
e.g.::
@ffillnode
def node():
return some_value
or using the nodetype method syntax (see :ref:`nodetype_method_syntax`)::
@evalnode
def some_value():
return ...
@evalnode
def node():
return some_value.ffill()
"""
_init_kwargs_ = ["filter_node_value", "initial_value"]
def __init__(self, value, filter_node_value, initial_value=None):
self.is_float = False
if isinstance(value, float):
#
# floating point fill forward
#
self.is_float = True
self.current_value_f = initial_value if initial_value is not None else np.nan
else:
#
# Series or ndarray fill forward
#
if not isinstance(value, (pa.Series, np.ndarray)):
raise RuntimeError("fillnode expects a float, pa.Series or ndarray")
if initial_value is not None:
if isinstance(initial_value, (float, int)):
if isinstance(value, pa.Series):
self.current_value = pa.Series(initial_value,
index=value.index,
dtype=value.dtype)
else:
self.current_value = np.ndarray(value.shape, dtype=value.dtype)
self.current_value.fill(initial_value)
else:
# this ensures the current_value ends up being the same type
# as value, even if initial_value is another vector type.
self.current_value = value.copy()
self.current_value[:] = initial_value[:]
else:
if isinstance(value, pa.Series):
self.current_value = pa.Series(np.nan, index=value.index, dtype=value.dtype)
else:
self.current_value = np.ndarray(value.shape, dtype=value.dtype)
self.current_value.fill(np.nan)
# update the current value
if filter_node_value:
self.send(value)
def next(self):
if self.is_float:
return self.current_value_f
return self.current_value.copy()
def send(self, value):
if self.is_float:
# update the current value if value is not Nan
value_f = cython.declare(cython.double, value)
if value_f == value_f:
self.current_value_f = value
return self.current_value_f
# update the current value with the non-nan values
mask = ~np.isnan(value)
self.current_value[mask] = value[mask]
return self.current_value.copy()
# decorators don't work on cythoned types
ffillnode = nodetype(cls=MDFForwardFillNode, method="ffill")(_ffillnode)
class MDFReturnsNode(MDFCustomNode):
pass
class _returnsnode(MDFIterator):
"""
Decorator that creates an :py:class:`MDFNode` that returns
the returns of a price series.
NaN prices are filled forward.
If there is a NaN price at the beginning of the series, we set
the return to zero.
The decorated function may return a float, pandas Series
or numpy array.
e.g.::
@returnsnode
def node():
return some_price
or using the nodetype method syntax (see :ref:`nodetype_method_syntax`)::
@evalnode
def some_price():
return ...
@evalnode
def node():
return some_price.returns()
The value at any timestep is the return for that timestep, so the methods
ideally would be called 'return', but that's a keyword and so returns is
used.
"""
_init_kwargs_ = ["filter_node_value"]
def __init__(self, value, filter_node_value):
self.is_float = False
if isinstance(value, float):
# floating point returns
self.is_float = True
self.prev_value_f = np.nan
self.current_value_f = np.nan
self.return_f = 0.0
else:
# Series or ndarray returns
if not isinstance(value, (pa.Series, np.ndarray)):
raise RuntimeError("returns node expects a float, pa.Series or ndarray")
if isinstance(value, pa.Series):
self.prev_value = pa.Series(np.nan, index=value.index)
self.current_value = pa.Series(np.nan, index=value.index)
else:
self.prev_value = np.ndarray(value.shape, dtype=value.dtype)
self.current_value = np.ndarray(value.shape, dtype=value.dtype)
self.returns = np.ndarray(value.shape, dtype=value.dtype)
self.prev_value.fill(np.nan)
self.current_value.fill(np.nan)
self.returns.fill(0.0)
# update the current value
if filter_node_value:
self.send(value)
def next(self):
if self.is_float:
return self.return_f
return self.returns
def send(self, value):
if self.is_float:
value_f = cython.declare(cython.double, value)
# advance previous to the current value and update current
# value with the new value unless it's nan (in which case we
# leave it as it is - ie fill forward).
self.prev_value_f = self.current_value_f
if value_f == value_f:
self.current_value_f = value_f
self.return_f = (self.current_value_f / self.prev_value_f) - 1.0
if np.isnan(self.return_f):
self.return_f = 0.0
return self.return_f
# advance prev_value and update current value with any new
# non-nan values
mask = ~np.isnan(value)
self.prev_value = self.current_value.copy()
self.current_value[mask] = value[mask]
self.returns = (self.current_value / self.prev_value) - 1.0
self.returns[np.isnan(self.returns)] = 0.0
return self.returns
# decorators don't work on cythoned types
returnsnode = nodetype(cls=MDFReturnsNode, method="returns")(_returnsnode)
#
# datarownode is used to construct nodes from either DataFrames, WidePanels or
# TimeSeries.
#
class MDFRowIteratorNode(MDFCustomNode):
# always pass index_node as the node rather than evaluate it
nodetype_node_kwargs = ["index_node"]
class _rowiternode(MDFIterator):
"""
Decorator that creates an :py:class:`MDFNode` that returns
the current row of item of a pandas DataFrame, WidePanel
or Series returned by the decoratored function.
What row is considered current depends on the `index_node`
parameter, which by default is `now`.
`missing_value` may be specified as the value to use when
the index_node isn't included in the data's index. The
default is NaN.
`delay` can be a number of timesteps to delay the index_node
by, effectively shifting the data.
`ffill` causes the value to get forward filled if True, default is False.
e.g.::
@rowiternode
def datarow_node():
# construct a dataframe indexed by date
return a_dataframe
@evalnode
def another_node():
# the rowiternode returns the row from the dataframe
# for the current date 'now'
current_row = datarow_node()
or using the nodetype method syntax (see :ref:`nodetype_method_syntax`)::
@evalnode
def dataframe_node():
# construct a dataframe indexed by date
return a_dataframe
@evalnode
def another_node():
# get the row from dataframe_node for the current_date 'now'
current_row = dataframe_node.rowiter()
"""
_init_kwargs_ = ["owner_node", "index_node", "missing_value", "delay", "ffill"]
def __init__(self, data, owner_node, index_node=now, missing_value=np.nan, delay=0, ffill=False):
"""data should be a dataframe, widepanel or timeseries"""
self._current_index = None
self._current_value = None
self._prev_value = None
self._missing_value_orig = missing_value
self._index_to_date = False
self._ffill = ffill
# call the index node to make sure this node depends on it and remember the type
index_value = index_node()
self._index_node_type = type(index_value)
# store the node and delay it if necessary
self._index_node = index_node
if delay > 0:
self._index_node = self._index_node.delaynode(periods=delay,
filter=owner_node.get_filter())
self._set_data(data)
def _set_data(self, data):
self._data = data
self._is_dataframe = False
self._is_widepanel = False
self._is_series = False
# this may get updated (e.g. to be a series corresponding to the columns
# of a dataframe) so restore it to the original value.
self._missing_value = self._missing_value_orig
try:
if isinstance(data, pa.DataFrame):
self._is_dataframe = True
# convert missing value to a row with the same columns as the dataframe
if not isinstance(self._missing_value, pa.Series):
dtype = object
if data.index.size > 0:
dtype = data.xs(data.index[0]).dtype
self._missing_value = pa.Series(self._missing_value,
index=data.columns,
dtype=dtype)
# set up the iterator
self._iter = iter(data.index)
self._current_index = next(self._iter)
self._current_value = self._data.xs(self._current_index)
elif isinstance(data, pa.WidePanel):
self._is_widepanel = True
# convert missing value to a dataframe with the same dimensions as the panel
if not isinstance(self._missing_value, pa.DataFrame):
if not isinstance(self._missing_value, dict):
self._missing_value = dict([(c, self._missing_value) for c in data.items])
self._missing_value = pa.DataFrame(self._missing_value,
columns=data.items,
index=data.minor_axis,
dtype=data.dtype)
# set up ther iterator
self._iter = iter(data.major_axis)
self._current_index = next(self._iter)
self._current_value = self._data.major_xs(self._current_index)
elif isinstance(data, pa.Series):
self._is_series = True
self._iter = _dict_iteritems(data)
self._current_index, self._current_value = next(self._iter)
else:
clsname = type(data)
if hasattr(data, "__class__"):
clsname = data.__class__.__name__
raise AssertionError("datanode expects a DataFrame, WidePanel or Series; "
"got '%s'" % clsname)
except StopIteration:
self._current_index = None
self._current_value = self._missing_value
# reset _prev_value to the missing value, it will get set as the iterator is advanced.
self._prev_value = self._missing_value
# does the index need to be converted from datetime to date?
# (use the stored index_node_type as the current value may be delayed
# and therefore be None instead of it usual type)
self._index_to_date = type(self._current_index) is datetime.date \
and self._index_node_type is datetime.datetime
def send(self, data):
if data is not self._data:
self._set_data(data)
return self.next()
def next(self):
# switching this way cythons better than having a python
# function object and calling that, because it doesn't have
# the overhead of doing a python object call.
if self._is_dataframe:
return self._next_dataframe()
if self._is_widepanel:
return self._next_widepanel()
if self._is_series:
return self._next_series()
return self._missing_value
def _next_dataframe(self):
i = self._index_node()
if self._current_index is None \
or i is None:
return self._missing_value
if self._index_to_date:
i = i.date()
while i > self._current_index:
# advance to the next item in the series
try:
# TODO: once we upgrade pandas use the iterrows method
self._prev_value = self._current_value
self._current_index = next(self._iter)
self._current_value = self._data.xs(self._current_index)
except StopIteration:
if self._ffill:
return self._current_value
return self._missing_value
if self._current_index == i:
return self._current_value
if self._ffill and self._current_index > i:
return self._prev_value
return self._missing_value
def _next_widepanel(self):
i = self._index_node()
if self._current_index is None \
or i is None:
return self._missing_value
if self._index_to_date:
i = i.date()
while i > self._current_index:
# advance to the next item in the series
try:
# TODO: once we upgrade pandas use the iterrows method
self._prev_value = self._current_value
self._current_index = next(self._iter)
self._current_value = self._data.major_xs(self._current_index)
except StopIteration:
if self._ffill:
return self._prev_value
return self._missing_value
if self._current_index == i:
return self._current_value
if self._ffill and self._current_index > i:
return self._prev_value
return self._missing_value
def _next_series(self):
i = self._index_node()
if self._current_index is None \
or i is None:
return self._missing_value
if self._index_to_date:
i = i.date()
while i > self._current_index:
# advance to the next item in the series
try:
self._prev_value = self._current_value
self._current_index, self._current_value = next(self._iter)
except StopIteration:
if self._ffill:
return self._prev_value
return self._missing_value
if self._current_index == i:
return self._current_value
if self._ffill and self._current_index > i:
return self._prev_value
return self._missing_value
# decorators don't work on cythoned types
rowiternode = nodetype(cls=MDFRowIteratorNode, method="rowiter")(_rowiternode)
#
# helper function for creating a row iterator node, but without having
# to write the function just to return a dataframe/series etc...
#
def datanode(name=None,
data=None,
index_node=now,
missing_value=np.nan,
delay=0,
ffill=False,
filter=None,
category=None):
"""
Return a new mdf node for iterating over a dataframe, panel or series.
`data` is indexed by another node `index_node`, (default is :py:func:`now`),
which can be any node that evaluates to a value that can be used to index
into `data`.
If the `index_node` evaluates to a value that is not present in
the index of the `data` then `missing_value` is returned.
`missing_value` can be a scalar, in which case it will be converted
to the same row format used by the data object with the same value
for all items.
`delay` can be a number of timesteps to delay the index_node
by, effectively shifting the data.
`ffill` causes the value to get forward filled if True, default is False.
`data` may either be a data object itself (DataFrame, WidePanel or
Series) or a node that evaluates to one of those types.
e.g.::
df = pa.DataFrame({"A" : range(100)}, index=date_range)
df_node = datanode(data=df)
ctx[df_node] # returns the row from df where df == ctx[now]
A datanode may be explicitly named using the name argument, or
if left as None the variable name the node is being assigned to
will be used.
"""
assert data is not None, "Must specify data as a DataFrame, Series or node"
if name is None:
name = get_assigned_node_name("datanode", 0 if cython.compiled else 1)
if isinstance(data, MDFNode):
func = MDFCallable(name, data)
else:
func = MDFCallable(name, lambda: data)
node = MDFRowIteratorNode(name=name,
func=func,
node_type_func=_rowiternode,
category=category,
filter=filter,
nodetype_func_kwargs={
"index_node" : index_node,
"delay" : delay,
"missing_value" : missing_value,
"ffill" : ffill,
})
return node
def filternode(name=None,
data=None,
index_node=now,
delay=0,
filter=None,
category=None):
"""
Return a new mdf node for using as a filter for other nodes
based on the index of the data object passed in (DataFrame,
Series or WidePanel).
The node value is True when the index_node (default=now)
is in the index of the data, and False otherwise.
This can be used to easily filter other nodes so that
they operate at the same frequency of the underlying data.
`delay` can be a number of timesteps to delay the index_node
by, effectively shifting the data.
A filternode may be explicitly named using the name argument, or
if left as None the variable name the node is being assigned to
will be used.
"""
assert data is not None, "Must specify data as a DataFrame, Series or node"
# the filter is always True for points on the data's index,
# and False otherwise.
func = lambda: pa.Series(True, index=data.index)
if isinstance(data, MDFNode):
func = lambda: pa.Series(True, index=data().index)
if name is None:
name = get_assigned_node_name("filternode", 0 if cython.compiled else 1)
if isinstance(data, MDFNode):
func = MDFCallable(name, data, lambda x: pa.Series(True, index=x.index))
else:
func = MDFCallable(name, lambda: pa.Series(True, index=data.index))
node = MDFRowIteratorNode(name=name,
func=MDFCallable(name, func),
node_type_func=_rowiternode,
category=category,
filter=filter,
nodetype_func_kwargs={
"index_node" : index_node,
"missing_value" : False,
"delay" : delay,
})
return node
#
# applynode is a way of transforming a plain function into an mdf
# node by binding other nodes to its parameters.
# This is useful for quick interactive work more than for applications
# written using mdf.
#
class MDFApplyNode(MDFCustomNode):
nodetype_kwargs = ["func", "args", "kwargs"]
def _applynode(value, func, args=(), kwargs={}):
"""
Return a new mdf node that applies `func` to the value of the node
that is passed in. Extra `args` and `kwargs` can be passed in as
values or nodes.
Unlike most other node types this shouldn't be used as a decorator, but instead
should only be used via the method syntax for node types, (see :ref:`nodetype_method_syntax`)
e.g.::
A_plus_B_node = A.applynode(operator.add, args=(B,))
"""
new_args = []
for arg in args:
if isinstance(arg, MDFNode):
arg = arg()
new_args.append(arg)
new_kwargs = {}
for key, value in _dict_iteritems(kwargs):
if isinstance(value, MDFNode):
value = value()
new_kwargs[key] = value
return func(value, *new_args, **new_kwargs)
# decorators don't work on cythoned types
applynode = nodetype(cls=MDFApplyNode, method="apply")(_applynode)
#
# lookaheadnode evaluates a node over a date range or for a number
# of periods in the future and returns a pandas series of values.
# When looking for a number of periods in the future it does that
# for the first timestep only and is constant thereafter.
# It's intended use is for small look aheads for seeding moving
# average type calculations.
#
class MDFLookAheadNode(MDFCustomNode):
nodetype_kwargs = ["value", "owner_node", "periods", "filter_node", "offset"]
# don't mark this node as dirty when dependent nodes are dirtied
# because of changes to the current date.
dirty_flags_propagate_mask = ~DIRTY_FLAGS.TIME
def on_set_date(self, ctx, date):
"""called just before 'now' is changed"""
# return True if date is going backwards to indicate we should be marked as dirty
return ctx.get_date() > date
def _lookaheadnode(value_unused, owner_node, periods, filter_node=None, offset=pa.datetools.BDay()):
"""
Node type that creates an :py:class:`MDFNode` that returns
a pandas Series of values of the underlying node for a sequence
of dates in the future.
Unlike most other node types this shouldn't be used as a decorator, but instead
should only be used via the method syntax for node types, (see :ref:`nodetype_method_syntax`)
e.g.::
future_values = some_node.lookahead(periods=10)
This would get the next 10 values of ``some_node`` after the current date. Once
evaluated it won't be re-evaluated as time moves forwards; it's always the first
set of future observations. It is intended to be used sparingly for seeding
moving average calculations or other calculations that need some initial value
based on the first few samples of another node.
The dates start with the current context date (i.e. :py:func:`now`) and is
incremented by the optional argument `offset` which defaults to weekdays
(see :py:class:`pandas.datetools.BDay`).
:param int periods: the total number of observations to collect, excluding any that are ignored due
to any filter being used.
:param offset: date offset object (e.g. datetime timedelta or pandas date offset) to use to
increment the date for each sample point.
:param filter: optional node that if specified should evaluate to True if an observation is to
be included, or False otherwise.
"""
assert owner_node.base_node is not None, \
"lookahead nodes must be called via the lookahead or lookaheadnode methods on another node"
ctx = cython.declare(MDFContext)
shifted_ctx = cython.declare(MDFContext)
# create a shifted context from the current context shifted by date
ctx = _get_current_context()
date = ctx.get_date()
shifted_ctx = ctx.shift({now : date})
# collect results from the shifted context
count = cython.declare(int, 0)
values = cython.declare(list, [])
dates = cython.declare(list, [])
try:
while count < periods:
shifted_ctx.set_date(date)
date += offset
if filter_node is not None:
if not shifted_ctx.get_value(filter_node):
continue
value = shifted_ctx.get_value(owner_node.base_node)
values.append(value)
dates.append(shifted_ctx.get_date())
count += 1
finally:
# removed any cached values from the context since they won't be needed again
# and would otherwise just be taking up memory.
shifted_ctx.clear()
if count > 0 and isinstance(values[0], pa.Series):
return pa.DataFrame(values, index=dates)
return pa.Series(values, index=dates)
# decorators don't work on cythoned classes
lookaheadnode = nodetype(_lookaheadnode, cls=MDFLookAheadNode, method="lookahead")
class Op(object):
op = cython.declare(object)
lhs = cython.declare(object)
def __init__(self, op, lhs=None):
self.op = op
self.lhs = lhs
def __get__(self, instance, owner=None):
if instance is not None:
return self.__class__(self.op, instance)
return self.__class__(self.op, owner)
def __call__(self, rhs=None):
args = ()
if rhs is not None:
args = (rhs,)
return self.lhs.applynode(func=self.op, args=args)
if sys.version_info[0] <= 2:
for op in ("__add__", "__sub__", "__mul__", "__div__", "__neg__"):
MDFNode._additional_attrs_[op] = Op(getattr(operator, op))
else:
for op in ("__add__", "__sub__", "__mul__", "__truediv__", "__neg__"):
MDFNode._additional_attrs_[op] = Op(getattr(operator, op))
| [
"pandas.DataFrame",
"pandas.datetools.BDay",
"numpy.isnan",
"inspect.getargspec",
"pandas.Series",
"collections.namedtuple",
"cython.declare",
"numpy.ndarray",
"collections.deque"
] | [((567, 607), 'cython.declare', 'cython.declare', (['int', 'sys.version_info[0]'], {}), '(int, sys.version_info[0])\n', (581, 607), False, 'import cython\n'), ((27312, 27380), 'collections.namedtuple', 'namedtuple', (['"""PerCtxData"""', "['value', 'generator', 'date', 'is_valid']"], {}), "('PerCtxData', ['value', 'generator', 'date', 'is_valid'])\n", (27322, 27380), False, 'from collections import deque, namedtuple\n'), ((66143, 66162), 'pandas.datetools.BDay', 'pa.datetools.BDay', ([], {}), '()\n', (66160, 66162), True, 'import pandas as pa\n'), ((67818, 67844), 'cython.declare', 'cython.declare', (['MDFContext'], {}), '(MDFContext)\n', (67832, 67844), False, 'import cython\n'), ((67863, 67889), 'cython.declare', 'cython.declare', (['MDFContext'], {}), '(MDFContext)\n', (67877, 67889), False, 'import cython\n'), ((68124, 68146), 'cython.declare', 'cython.declare', (['int', '(0)'], {}), '(int, 0)\n', (68138, 68146), False, 'import cython\n'), ((68160, 68184), 'cython.declare', 'cython.declare', (['list', '[]'], {}), '(list, [])\n', (68174, 68184), False, 'import cython\n'), ((68197, 68221), 'cython.declare', 'cython.declare', (['list', '[]'], {}), '(list, [])\n', (68211, 68221), False, 'import cython\n'), ((68936, 68966), 'pandas.Series', 'pa.Series', (['values'], {'index': 'dates'}), '(values, index=dates)\n', (68945, 68966), True, 'import pandas as pa\n'), ((69129, 69151), 'cython.declare', 'cython.declare', (['object'], {}), '(object)\n', (69143, 69151), False, 'import cython\n'), ((69162, 69184), 'cython.declare', 'cython.declare', (['object'], {}), '(object)\n', (69176, 69184), False, 'import cython\n'), ((10425, 10454), 'cython.declare', 'cython.declare', (['MDFCustomNode'], {}), '(MDFCustomNode)\n', (10439, 10454), False, 'import cython\n'), ((11521, 11541), 'cython.declare', 'cython.declare', (['dict'], {}), '(dict)\n', (11535, 11541), False, 'import cython\n'), ((12816, 12836), 'cython.declare', 'cython.declare', (['dict'], {}), '(dict)\n', (12830, 12836), False, 'import cython\n'), ((16582, 16603), 'cython.declare', 'cython.declare', (['tuple'], {}), '(tuple)\n', (16596, 16603), False, 'import cython\n'), ((16627, 16654), 'cython.declare', 'cython.declare', (['MDFEvalNode'], {}), '(MDFEvalNode)\n', (16641, 16654), False, 'import cython\n'), ((26009, 26024), 'collections.deque', 'deque', (['[]', 'size'], {}), '([], size)\n', (26014, 26024), False, 'from collections import deque, namedtuple\n'), ((29328, 29356), 'cython.declare', 'cython.declare', (['MDFDelayNode'], {}), '(MDFDelayNode)\n', (29342, 29356), False, 'import cython\n'), ((30612, 30638), 'cython.declare', 'cython.declare', (['MDFContext'], {}), '(MDFContext)\n', (30626, 30638), False, 'import cython\n'), ((36063, 36118), 'collections.deque', 'deque', (['([initial_value] * max_queue_size)', 'max_queue_size'], {}), '([initial_value] * max_queue_size, max_queue_size)\n', (36068, 36118), False, 'from collections import deque, namedtuple\n'), ((39948, 39968), 'numpy.isnan', 'np.isnan', (['self.accum'], {}), '(self.accum)\n', (39956, 39968), True, 'import numpy as np\n'), ((62998, 63031), 'pandas.Series', 'pa.Series', (['(True)'], {'index': 'data.index'}), '(True, index=data.index)\n', (63007, 63031), True, 'import pandas as pa\n'), ((68890, 68923), 'pandas.DataFrame', 'pa.DataFrame', (['values'], {'index': 'dates'}), '(values, index=dates)\n', (68902, 68923), True, 'import pandas as pa\n'), ((11669, 11692), 'cython.declare', 'cython.declare', (['MDFNode'], {}), '(MDFNode)\n', (11683, 11692), False, 'import cython\n'), ((39415, 39470), 'pandas.Series', 'pa.Series', (['np.nan'], {'index': 'value.index', 'dtype': 'value.dtype'}), '(np.nan, index=value.index, dtype=value.dtype)\n', (39424, 39470), True, 'import pandas as pa\n'), ((39824, 39839), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (39832, 39839), True, 'import numpy as np\n'), ((41837, 41892), 'pandas.Series', 'pa.Series', (['np.nan'], {'index': 'value.index', 'dtype': 'value.dtype'}), '(np.nan, index=value.index, dtype=value.dtype)\n', (41846, 41892), True, 'import pandas as pa\n'), ((41921, 41941), 'numpy.isnan', 'np.isnan', (['self.accum'], {}), '(self.accum)\n', (41929, 41941), True, 'import numpy as np\n'), ((46401, 46437), 'cython.declare', 'cython.declare', (['cython.double', 'value'], {}), '(cython.double, value)\n', (46415, 46437), False, 'import cython\n'), ((46634, 46649), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (46642, 46649), True, 'import numpy as np\n'), ((49158, 49194), 'cython.declare', 'cython.declare', (['cython.double', 'value'], {}), '(cython.double, value)\n', (49172, 49194), False, 'import cython\n'), ((49620, 49643), 'numpy.isnan', 'np.isnan', (['self.return_f'], {}), '(self.return_f)\n', (49628, 49643), True, 'import numpy as np\n'), ((49823, 49838), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (49831, 49838), True, 'import numpy as np\n'), ((50028, 50050), 'numpy.isnan', 'np.isnan', (['self.returns'], {}), '(self.returns)\n', (50036, 50050), True, 'import numpy as np\n'), ((35217, 35279), 'pandas.Series', 'pa.Series', (['initial_value'], {'index': 'value.index', 'dtype': 'value.dtype'}), '(initial_value, index=value.index, dtype=value.dtype)\n', (35226, 35279), True, 'import pandas as pa\n'), ((36608, 36623), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (36616, 36623), True, 'import numpy as np\n'), ((37844, 37906), 'pandas.Series', 'pa.Series', (['initial_value'], {'index': 'value.index', 'dtype': 'value.dtype'}), '(initial_value, index=value.index, dtype=value.dtype)\n', (37853, 37906), True, 'import pandas as pa\n'), ((39540, 39582), 'numpy.ndarray', 'np.ndarray', (['value.shape'], {'dtype': 'value.dtype'}), '(value.shape, dtype=value.dtype)\n', (39550, 39582), True, 'import numpy as np\n'), ((42011, 42053), 'numpy.ndarray', 'np.ndarray', (['value.shape'], {'dtype': 'value.dtype'}), '(value.shape, dtype=value.dtype)\n', (42021, 42053), True, 'import numpy as np\n'), ((42118, 42138), 'numpy.isnan', 'np.isnan', (['self.accum'], {}), '(self.accum)\n', (42126, 42138), True, 'import numpy as np\n'), ((42606, 42621), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (42614, 42621), True, 'import numpy as np\n'), ((42731, 42746), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (42739, 42746), True, 'import numpy as np\n'), ((48387, 48423), 'pandas.Series', 'pa.Series', (['np.nan'], {'index': 'value.index'}), '(np.nan, index=value.index)\n', (48396, 48423), True, 'import pandas as pa\n'), ((48461, 48497), 'pandas.Series', 'pa.Series', (['np.nan'], {'index': 'value.index'}), '(np.nan, index=value.index)\n', (48470, 48497), True, 'import pandas as pa\n'), ((48550, 48592), 'numpy.ndarray', 'np.ndarray', (['value.shape'], {'dtype': 'value.dtype'}), '(value.shape, dtype=value.dtype)\n', (48560, 48592), True, 'import numpy as np\n'), ((48630, 48672), 'numpy.ndarray', 'np.ndarray', (['value.shape'], {'dtype': 'value.dtype'}), '(value.shape, dtype=value.dtype)\n', (48640, 48672), True, 'import numpy as np\n'), ((48704, 48746), 'numpy.ndarray', 'np.ndarray', (['value.shape'], {'dtype': 'value.dtype'}), '(value.shape, dtype=value.dtype)\n', (48714, 48746), True, 'import numpy as np\n'), ((63312, 63342), 'pandas.Series', 'pa.Series', (['(True)'], {'index': 'x.index'}), '(True, index=x.index)\n', (63321, 63342), True, 'import pandas as pa\n'), ((63395, 63428), 'pandas.Series', 'pa.Series', (['(True)'], {'index': 'data.index'}), '(True, index=data.index)\n', (63404, 63428), True, 'import pandas as pa\n'), ((7156, 7190), 'inspect.getargspec', 'inspect.getargspec', (['node_type_func'], {}), '(node_type_func)\n', (7174, 7190), False, 'import inspect\n'), ((35350, 35392), 'numpy.ndarray', 'np.ndarray', (['value.shape'], {'dtype': 'value.dtype'}), '(value.shape, dtype=value.dtype)\n', (35360, 35392), True, 'import numpy as np\n'), ((37977, 38019), 'numpy.ndarray', 'np.ndarray', (['value.shape'], {'dtype': 'value.dtype'}), '(value.shape, dtype=value.dtype)\n', (37987, 38019), True, 'import numpy as np\n'), ((45829, 45884), 'pandas.Series', 'pa.Series', (['np.nan'], {'index': 'value.index', 'dtype': 'value.dtype'}), '(np.nan, index=value.index, dtype=value.dtype)\n', (45838, 45884), True, 'import pandas as pa\n'), ((45948, 45990), 'numpy.ndarray', 'np.ndarray', (['value.shape'], {'dtype': 'value.dtype'}), '(value.shape, dtype=value.dtype)\n', (45958, 45990), True, 'import numpy as np\n'), ((53604, 53667), 'pandas.Series', 'pa.Series', (['self._missing_value'], {'index': 'data.columns', 'dtype': 'dtype'}), '(self._missing_value, index=data.columns, dtype=dtype)\n', (53613, 53667), True, 'import pandas as pa\n'), ((42663, 42678), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (42671, 42678), True, 'import numpy as np\n'), ((45074, 45136), 'pandas.Series', 'pa.Series', (['initial_value'], {'index': 'value.index', 'dtype': 'value.dtype'}), '(initial_value, index=value.index, dtype=value.dtype)\n', (45083, 45136), True, 'import pandas as pa\n'), ((45318, 45360), 'numpy.ndarray', 'np.ndarray', (['value.shape'], {'dtype': 'value.dtype'}), '(value.shape, dtype=value.dtype)\n', (45328, 45360), True, 'import numpy as np\n'), ((54448, 54546), 'pandas.DataFrame', 'pa.DataFrame', (['self._missing_value'], {'columns': 'data.items', 'index': 'data.minor_axis', 'dtype': 'data.dtype'}), '(self._missing_value, columns=data.items, index=data.minor_axis,\n dtype=data.dtype)\n', (54460, 54546), True, 'import pandas as pa\n'), ((14852, 14901), 'inspect.getargspec', 'inspect.getargspec', (['self._node_type_func.__init__'], {}), '(self._node_type_func.__init__)\n', (14870, 14901), False, 'import inspect\n'), ((14960, 15000), 'inspect.getargspec', 'inspect.getargspec', (['self._node_type_func'], {}), '(self._node_type_func)\n', (14978, 15000), False, 'import inspect\n'), ((36724, 36739), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (36732, 36739), True, 'import numpy as np\n')] |
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import ensemble
from sklearn.utils import validation
import tensorflow as tf
from scipy import stats
from scipy.stats import pearsonr
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from tensorflow.keras import layers
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from sklearn.ensemble import RandomForestRegressor
from src import data
from src.data import get_FLX_inputs, make_train_test_data
from src.model import CausalLSTM
from src.tree_causality import CausalPrecursors
np.random.seed(1)
tf.compat.v1.set_random_seed(13)
def main(
# input data params
site_name='',
path_input='',
path_output='',
feature_params=[],
label_params=[],
# causality params
cond_ind_test='parcorr',
max_tau=7,
sig_thres=0.05,
var_names=['TA','SW','LW','PA','P','WS','TS','SM'],
depth=2,
num_features=8,
# model params
len_input=10,
len_output=1,
window_size=7,
num_hiddens=16,
batch_size=50,
epochs=50,
validation_split=0.2,
ensemble_epochs=2,
):
# --------------------------------------------------------------------------
# 1. make output dir.
# --------------------------------------------------------------------------
if not os.path.exists(path_output + 'output/'):
os.mkdir(path_output + 'output/')
if not os.path.exists(path_output + 'loss/'):
os.mkdir(path_output + 'loss/')
if not os.path.exists(path_output + 'info/'):
os.mkdir(path_output + 'info/')
# --------------------------------------------------------------------------
# 2. read and preprocessing FLUXNET2015 dataset.
# --------------------------------------------------------------------------
# process data
print('\033[1;31m%s\033[0m' % 'Read and Processing input data')
qc_params = []
qc_params.append(label_params[0]+'_QC')
feature, label, quality = get_FLX_inputs(
path=path_input,
feature_params=feature_params,
label_params=label_params,
qc_params=qc_params,
resolution='DD',
)
if quality == 0:
print('This site cannot be used, careful for your inputs!')
return
# assert feature/label have any NaN
assert np.isnan(np.array(feature)).any() == False, \
('Features have NaN value!')
assert np.isnan(np.array(label)).any() == False, \
('Label has NaN value!')
# make train and test dataset
train_x, train_y, test_x, test_y, train_mean, train_std, normalized_test_x \
= make_train_test_data( \
feature,
len_input,
len_output,
window_size
)
_, N_t, N_f = train_x.shape
print('the shape of train dataset is {}'.format(train_x.shape))
print('the shape of test dataset is {}'.format(test_x.shape))
print('...done...\n')
# --------------------------------------------------------------------------
# 3. Making causality tree.
# --------------------------------------------------------------------------
# calculate causal tree
print('\033[1;31m%s\033[0m' % 'making causality tree')
a = CausalPrecursors(
site_name=site_name,
cond_ind_test=cond_ind_test,
max_tau=max_tau,
sig_thres=sig_thres,
var_names=var_names,
depth=depth,
num_features=num_features)(feature)
print(a.group_node_dict)
print(a.group_num_child_nodes)
print(a.group_input_idx)
print(a.group_child_state_idx)
print('...done...\n')
# --------------------------------------------------------------------------
# 4. Training and inference
# --------------------------------------------------------------------------
print('\033[1;31m%s\033[0m' % 'start training!\n')
print('training Forest Causal LSTM')
checkpoint = ModelCheckpoint(
filepath='/Users/lewlee/Desktop/log/',
monitor='val_loss',
save_best_only='True',
save_weights_only='True'
)
lr = ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
patience=10,
verbose=0,
mode='auto',
epsilon=0.0001,
cooldown=0,
min_lr=0
)
print('training CausalLSTM')
N_test = test_y.shape[0]
N_train = train_x.shape[0]
num_tree = len(a.group_input_idx.keys())
y_pred_clstm = np.full((N_test, num_tree*ensemble_epochs), np.nan)
y_train_clstm = np.full((N_train, num_tree*ensemble_epochs), np.nan)
for j in np.arange(ensemble_epochs):
for i, timestep in enumerate(a.group_num_child_nodes.keys()):
num_child_nodes = a.group_num_child_nodes[timestep]
input_idx = a.group_input_idx[timestep]
child_state_idx = a.group_child_state_idx[timestep]
num_nodes = len(num_child_nodes)
model = CausalLSTM(
num_child_nodes=num_child_nodes,
input_idx=input_idx,
child_state_idx=child_state_idx,
num_nodes=num_nodes,
num_hiddens=num_hiddens,
input_len=len_input,
batch_size=batch_size)
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss=['mse']
)
history_clstm = model.fit(
train_x,
np.squeeze(train_y),
batch_size=batch_size,
epochs=epochs,
validation_split=validation_split,
callbacks=[checkpoint, lr]
)
y_train_clstm[:,num_tree*j+i] = np.squeeze(model.predict(
train_x,
batch_size=batch_size
))
y_pred_clstm[:,num_tree*j+i] = np.squeeze(model.predict(
test_x,
batch_size=batch_size
))
y_pred_clstm = np.nanmean(y_pred_clstm, axis=-1)
test_y = np.squeeze(test_y)
print('r2 of test dataset is {} of Causal LSTM'.format(
r2_score(np.squeeze(test_y), np.squeeze(y_pred_clstm))))
# --------------------------------------------------------------------------
# 5. Saving
# --------------------------------------------------------------------------
def renormalized(inputs):
return inputs*train_std[-1]+train_mean[-1]
y_pred_clstm = np.squeeze(renormalized(y_pred_clstm))[:, np.newaxis]
y_test = np.squeeze(renormalized(test_y))[:, np.newaxis]
out = np.concatenate(
(y_pred_clstm,
y_test),
axis=-1)
np.save(path_output + 'output/'+site_name+'_out.npy', out)
print('...done...\n')
if __name__ == '__main__':
pass
| [
"numpy.full",
"os.mkdir",
"src.data.make_train_test_data",
"numpy.random.seed",
"numpy.save",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"os.path.exists",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.compat.v1.set_random_seed",
"numpy.arange",
"src.data.get_FLX_inputs",
"src.... | [((652, 669), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (666, 669), True, 'import numpy as np\n'), ((670, 702), 'tensorflow.compat.v1.set_random_seed', 'tf.compat.v1.set_random_seed', (['(13)'], {}), '(13)\n', (698, 702), True, 'import tensorflow as tf\n'), ((2149, 2281), 'src.data.get_FLX_inputs', 'get_FLX_inputs', ([], {'path': 'path_input', 'feature_params': 'feature_params', 'label_params': 'label_params', 'qc_params': 'qc_params', 'resolution': '"""DD"""'}), "(path=path_input, feature_params=feature_params, label_params\n =label_params, qc_params=qc_params, resolution='DD')\n", (2163, 2281), False, 'from src.data import get_FLX_inputs, make_train_test_data\n'), ((2778, 2843), 'src.data.make_train_test_data', 'make_train_test_data', (['feature', 'len_input', 'len_output', 'window_size'], {}), '(feature, len_input, len_output, window_size)\n', (2798, 2843), False, 'from src.data import get_FLX_inputs, make_train_test_data\n'), ((4101, 4228), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': '"""/Users/lewlee/Desktop/log/"""', 'monitor': '"""val_loss"""', 'save_best_only': '"""True"""', 'save_weights_only': '"""True"""'}), "(filepath='/Users/lewlee/Desktop/log/', monitor='val_loss',\n save_best_only='True', save_weights_only='True')\n", (4116, 4228), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\n'), ((4273, 4401), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.1)', 'patience': '(10)', 'verbose': '(0)', 'mode': '"""auto"""', 'epsilon': '(0.0001)', 'cooldown': '(0)', 'min_lr': '(0)'}), "(monitor='val_loss', factor=0.1, patience=10, verbose=0,\n mode='auto', epsilon=0.0001, cooldown=0, min_lr=0)\n", (4290, 4401), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau\n'), ((4627, 4680), 'numpy.full', 'np.full', (['(N_test, num_tree * ensemble_epochs)', 'np.nan'], {}), '((N_test, num_tree * ensemble_epochs), np.nan)\n', (4634, 4680), True, 'import numpy as np\n'), ((4699, 4753), 'numpy.full', 'np.full', (['(N_train, num_tree * ensemble_epochs)', 'np.nan'], {}), '((N_train, num_tree * ensemble_epochs), np.nan)\n', (4706, 4753), True, 'import numpy as np\n'), ((4766, 4792), 'numpy.arange', 'np.arange', (['ensemble_epochs'], {}), '(ensemble_epochs)\n', (4775, 4792), True, 'import numpy as np\n'), ((6192, 6225), 'numpy.nanmean', 'np.nanmean', (['y_pred_clstm'], {'axis': '(-1)'}), '(y_pred_clstm, axis=-1)\n', (6202, 6225), True, 'import numpy as np\n'), ((6239, 6257), 'numpy.squeeze', 'np.squeeze', (['test_y'], {}), '(test_y)\n', (6249, 6257), True, 'import numpy as np\n'), ((6789, 6836), 'numpy.concatenate', 'np.concatenate', (['(y_pred_clstm, y_test)'], {'axis': '(-1)'}), '((y_pred_clstm, y_test), axis=-1)\n', (6803, 6836), True, 'import numpy as np\n'), ((6872, 6934), 'numpy.save', 'np.save', (["(path_output + 'output/' + site_name + '_out.npy')", 'out'], {}), "(path_output + 'output/' + site_name + '_out.npy', out)\n", (6879, 6934), True, 'import numpy as np\n'), ((1487, 1526), 'os.path.exists', 'os.path.exists', (["(path_output + 'output/')"], {}), "(path_output + 'output/')\n", (1501, 1526), False, 'import os\n'), ((1536, 1569), 'os.mkdir', 'os.mkdir', (["(path_output + 'output/')"], {}), "(path_output + 'output/')\n", (1544, 1569), False, 'import os\n'), ((1582, 1619), 'os.path.exists', 'os.path.exists', (["(path_output + 'loss/')"], {}), "(path_output + 'loss/')\n", (1596, 1619), False, 'import os\n'), ((1629, 1660), 'os.mkdir', 'os.mkdir', (["(path_output + 'loss/')"], {}), "(path_output + 'loss/')\n", (1637, 1660), False, 'import os\n'), ((1673, 1710), 'os.path.exists', 'os.path.exists', (["(path_output + 'info/')"], {}), "(path_output + 'info/')\n", (1687, 1710), False, 'import os\n'), ((1720, 1751), 'os.mkdir', 'os.mkdir', (["(path_output + 'info/')"], {}), "(path_output + 'info/')\n", (1728, 1751), False, 'import os\n'), ((3377, 3551), 'src.tree_causality.CausalPrecursors', 'CausalPrecursors', ([], {'site_name': 'site_name', 'cond_ind_test': 'cond_ind_test', 'max_tau': 'max_tau', 'sig_thres': 'sig_thres', 'var_names': 'var_names', 'depth': 'depth', 'num_features': 'num_features'}), '(site_name=site_name, cond_ind_test=cond_ind_test, max_tau=\n max_tau, sig_thres=sig_thres, var_names=var_names, depth=depth,\n num_features=num_features)\n', (3393, 3551), False, 'from src.tree_causality import CausalPrecursors\n'), ((5127, 5323), 'src.model.CausalLSTM', 'CausalLSTM', ([], {'num_child_nodes': 'num_child_nodes', 'input_idx': 'input_idx', 'child_state_idx': 'child_state_idx', 'num_nodes': 'num_nodes', 'num_hiddens': 'num_hiddens', 'input_len': 'len_input', 'batch_size': 'batch_size'}), '(num_child_nodes=num_child_nodes, input_idx=input_idx,\n child_state_idx=child_state_idx, num_nodes=num_nodes, num_hiddens=\n num_hiddens, input_len=len_input, batch_size=batch_size)\n', (5137, 5323), False, 'from src.model import CausalLSTM\n'), ((5679, 5698), 'numpy.squeeze', 'np.squeeze', (['train_y'], {}), '(train_y)\n', (5689, 5698), True, 'import numpy as np\n'), ((6335, 6353), 'numpy.squeeze', 'np.squeeze', (['test_y'], {}), '(test_y)\n', (6345, 6353), True, 'import numpy as np\n'), ((6355, 6379), 'numpy.squeeze', 'np.squeeze', (['y_pred_clstm'], {}), '(y_pred_clstm)\n', (6365, 6379), True, 'import numpy as np\n'), ((2490, 2507), 'numpy.array', 'np.array', (['feature'], {}), '(feature)\n', (2498, 2507), True, 'import numpy as np\n'), ((2584, 2599), 'numpy.array', 'np.array', (['label'], {}), '(label)\n', (2592, 2599), True, 'import numpy as np\n'), ((5527, 5553), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), '()\n', (5551, 5553), True, 'import tensorflow as tf\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: edge extraction.ipynb (unless otherwise specified).
__all__ = ['histogram_equalize', 'raster_edges']
# Cell
import os
import subprocess
from tempfile import TemporaryDirectory
import cv2
import numpy as np
# Cell
def histogram_equalize(data, max_val=None, endpoint=False):
input_shape = np.shape(data)
data_flat = np.asarray(data).flatten()
if max_val is None:
max_val = data_flat.max()
indices = np.argsort(data_flat)
replacements = np.linspace(0, max_val, len(indices), endpoint=endpoint)
data_flat[indices] = replacements
return data_flat.reshape(*input_shape)
def _cld(gray, halfw = 8,smoothPasses = 4, sigma1 = .9, sigma2 = 3, tau = .97):
name = 'cld_tmp_'
cv2.imwrite(f'{name}_in.bmp', gray)
if os.name == 'nt':
wsl = 'wsl '
else:
wsl = ''
subprocess.check_call(f'{wsl}./cld {name}_in.bmp {name}_out.bmp {halfw} {smoothPasses} {sigma1} {sigma2} {tau}', shell=True)
return cv2.imread(f'{name}_out.bmp', cv2.IMREAD_GRAYSCALE)
def raster_edges(gray, histogram_eq=False, cld=True, canny_low=100, canny_hi=200):
if histogram_eq:
gray = histogram_equalize(gray)
edges = 255 - cv2.Canny(gray, canny_low, canny_hi)
if cld:
edges &= _cld(gray)
return edges
| [
"cv2.Canny",
"cv2.imwrite",
"numpy.asarray",
"numpy.shape",
"numpy.argsort",
"cv2.imread",
"subprocess.check_call"
] | [((341, 355), 'numpy.shape', 'np.shape', (['data'], {}), '(data)\n', (349, 355), True, 'import numpy as np\n'), ((471, 492), 'numpy.argsort', 'np.argsort', (['data_flat'], {}), '(data_flat)\n', (481, 492), True, 'import numpy as np\n'), ((757, 792), 'cv2.imwrite', 'cv2.imwrite', (['f"""{name}_in.bmp"""', 'gray'], {}), "(f'{name}_in.bmp', gray)\n", (768, 792), False, 'import cv2\n'), ((869, 1003), 'subprocess.check_call', 'subprocess.check_call', (['f"""{wsl}./cld {name}_in.bmp {name}_out.bmp {halfw} {smoothPasses} {sigma1} {sigma2} {tau}"""'], {'shell': '(True)'}), "(\n f'{wsl}./cld {name}_in.bmp {name}_out.bmp {halfw} {smoothPasses} {sigma1} {sigma2} {tau}'\n , shell=True)\n", (890, 1003), False, 'import subprocess\n'), ((1005, 1056), 'cv2.imread', 'cv2.imread', (['f"""{name}_out.bmp"""', 'cv2.IMREAD_GRAYSCALE'], {}), "(f'{name}_out.bmp', cv2.IMREAD_GRAYSCALE)\n", (1015, 1056), False, 'import cv2\n'), ((1221, 1257), 'cv2.Canny', 'cv2.Canny', (['gray', 'canny_low', 'canny_hi'], {}), '(gray, canny_low, canny_hi)\n', (1230, 1257), False, 'import cv2\n'), ((372, 388), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (382, 388), True, 'import numpy as np\n')] |
import numpy as np
import cv2 as cv
from imutils.video import WebcamVideoStream
import glob
import time
import math
# Load previously saved calibration data
path = './camera_data/camera_calibration.npz'
npzfile = np.load(path)
#Camera Matrix
mtx = npzfile[npzfile.files[0]]
#Distortion Matrix
dist = npzfile[npzfile.files[1]]
rvec = npzfile[npzfile.files[2]]
tve = npzfile[npzfile.files[3]]
print(mtx ,dist)
#Font setup
font = cv.FONT_HERSHEY_PLAIN
start_time = time.time()
objpoints = np.array([[0, 0, 0], [0.0255, 0, 0], [-0.0255, 0, 0],[0, 0.0255, 0], [0, -0.0255, 0],
[0, 0.0755, 0], [0.0255, 0.0755, 0], [-0.0255, 0.0755, 0], [0, 0.05, 0], [0, 0.101, 0],
[0.1055, 0, 0], [0.131, 0, 0], [0.08, 0, 0], [0.1055, 0.0255, 0], [0.1055, -0.0255, 0]], dtype=np.float32)
def nothing(x):
pass
#Criar janela para trackbar
# cv.namedWindow("Trackbars")
#Criar trackbars HSV
# cv.createTrackbar("L - H", "Trackbars", 0, 179, nothing)
# cv.createTrackbar("L - S", "Trackbars", 0, 255, nothing)
# cv.createTrackbar("L - V", "Trackbars", 0, 255, nothing)
# cv.createTrackbar("U - H", "Trackbars", 179, 179, nothing)
# cv.createTrackbar("U - S", "Trackbars", 255, 255, nothing)
# cv.createTrackbar("U - V", "Trackbars", 255, 255, nothing)
#Criar trackbars Binary
# cv.createTrackbar("Max Size", "Trackbars", 100, 1500, nothing)
# cv.createTrackbar("BlockSize", "Trackbars", 11, 49, nothing)
# cv.createTrackbar("Constant", "Trackbars", 2, 20, nothing)
def detect_contourn(image, color):
hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV)
if color == "Red":
#Define the limits in HSV variables
lower = np.array([0, 127, 62])
upper = np.array([20, 255, 255])
if color == "Green":
#Define the limits in HSV variables
lower = np.array([30, 44, 67])
upper = np.array([91, 255, 255])
if color == "Blue":
#Define the limits in HSV variables
lower = np.array([65, 107, 86])
upper = np.array([148, 236, 255])
if color == "Yellow":
#Define the limits in HSV variables
lower = np.array([20, 100, 100])
upper = np.array([32, 220, 255])
l_h = cv.getTrackbarPos("L - H", "Trackbars")
l_s = cv.getTrackbarPos("L - S", "Trackbars")
l_v = cv.getTrackbarPos("L - V", "Trackbars")
u_h = cv.getTrackbarPos("U - H", "Trackbars")
u_s = cv.getTrackbarPos("U - S", "Trackbars")
u_v = cv.getTrackbarPos("U - V", "Trackbars")
lower = np.array([l_h, l_s, l_v])
upper = np.array([u_h, u_s, u_v])
#Define threshold for red color
mask = cv.inRange(hsv, lower, upper)
# cv.imshow("Mask", mask)
#Create a kernel
kernel = np.ones((5,5), np.uint8)
#Apply opening process
opening = cv.morphologyEx(mask, cv.MORPH_OPEN, kernel, iterations = 5)
output = cv.bitwise_and(image, image, mask=opening)
cv.imshow("Opening", output)
#Find BLOB's contours
cnts, _ = cv.findContours(opening.copy(), cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)
return cnts
def detect_contourn_binary(image):
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
gray = cv.medianBlur(gray, 5)
gray = cv.bitwise_not(gray)
# binary = cv.adaptiveThreshold(gray, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 3, 2)
ret, img_thresh = cv.threshold(gray, 170, 255, cv.THRESH_BINARY)
cnts, _ = cv.findContours(img_thresh.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
points = []
for c in cnts:
M = cv.moments(c)
perimeter = cv.arcLength(c, True)
#Compute the eccentricity
if perimeter != 0 and M["m00"]>=1500 and M["m00"]<=4000:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
_, radius = cv.minEnclosingCircle(c)
radius = int(radius)
center = (cX, cY)
metric = (4*math.pi*M["m00"])/perimeter**2
if metric > 0.8:
#Draw the contour and center of the shape on the image
# cv.drawContours(image, [c], -1, (0, 255, 0), 1)
cv.circle(image, center, radius+1, (0, 255, 0), 1)
cv.circle(image, center, 1, (0, 255, 0), -1)
points.append(center)
cv.imshow('gray', gray)
if len(points)==3:
####### FIND CIRCLE COORDINATES ###################
slope1 = 180*math.atan2((points[0][1] - points[1][1]), (points[0][0] - points[1][0]))/math.pi
d1 = int(math.sqrt((points[1][0]-points[0][0])**2 + (points[1][1]-points[0][1])**2))
print("Slope1:", slope1)
print("Dist1:", d1)
slope2 = 180*math.atan2((points[1][1] - points[2][1]), (points[1][0] - points[2][0]))/math.pi
d2 = int(math.sqrt((points[2][0]-points[1][0])**2 + (points[2][1]-points[1][1])**2))
print("Slope2:", slope2)
print("Dist2:", d2)
slope3 = 180*math.atan2((points[0][1] - points[2][1]), (points[0][0] - points[2][0]))/math.pi
d3 = int(math.sqrt((points[0][0]-points[2][0])**2 + (points[0][1]-points[2][1])**2))
print("Slope3", slope3)
print("Dist3:", d3)
cv.line(image, points[1], points[0], (255, 0, 0))
cv.line(image, points[2], points[1], (0, 255, 0))
cv.line(image, points[2], points[0], (0, 0, 255))
def center_mass_calculate(image, c):
# Compute the center of the contour
M = cv.moments(c)
# cX = int(M["m10"] / M["m00"])
# cY = int(M["m01"] / M["m00"])
(cX, cY), radius = cv.minEnclosingCircle(c)
cX = int(cX)
cY = int(cY)
center = (cX, cY)
radius = int(radius)
perimeter = cv.arcLength(c, True)
#Compute the eccentricity
metric = (4*math.pi*M["m00"])/perimeter**2
if metric > 0.8:
#Draw the contour and center of the shape on the image
# cv.drawContours(image, [c], -1, (0, 0, 0), 1)
cv.circle(image, center, radius, (255, 255, 255), 1)
cv.circle(image, center, 1, (255, 255, 255), -1)
return cX, cY, radius
def draw(image, imgpoints, imgpts):
imgpoint = tuple(imgpoints[0].ravel())
img = cv.line(image, imgpoint, tuple(imgpts[0].ravel()), (255,0,0), 3)
img = cv.line(image, imgpoint, tuple(imgpts[1].ravel()), (0,255,0), 3)
img = cv.line(image, imgpoint, tuple(imgpts[2].ravel()), (0,255,255), 3)
return img
def get_element_vector(f1, f2, c1, c2):
#Where f1 is frameA, f2 is frameB
#c1 is the coordinate, let x = 0, y = 1, z = 2
vec = []
for i in range(np.shape(f1)[0]):
cc = f2[i, c1]*f1[i, c2]
vec.append(cc)
return np.sum(vec)
def get_element_A(f1, c1, c2):
A = []
for i in range(np.shape(f1)[0]):
cc = f1[i, c1]*f1[i, c2]
A.append(cc)
return np.sum(A)
def get_element_last(f1, c1):
last = []
for i in range(np.shape(f1)[0]):
cc = f1[i, c1]
last.append(cc)
return np.sum(last)
def get_transform_frame(f1, f2):
matrix = np.zeros((3,4))
for i in range(3):
for j in range(3):
matrix[i, j] = get_element_vector(f1, f2, i, j)
matrix[i, 3] = get_element_last(f2, i)
A = np.zeros((4,4))
for i in range(3):
for j in range(3):
A[i, j] = get_element_A(f1, i, j)
for i in range(3):
A[i,3] = get_element_last(f1, i)
A[3, i] = get_element_last(f1, i)
A[3,3] = np.shape(f1)[0]
A_inv = np.linalg.inv(A)
matrix = np.transpose(matrix)
T = np.dot(A_inv, matrix)
T = np.transpose(T)
last_row = np.array([0,0,0,1]).reshape(1,4)
T = np.concatenate((T, last_row), axis=0)
return T
def get_pose(image, objpoints, imgpoints, mtx, dist):
axis = np.float32([[.05, 0, 0], [0, .05, 0], [0, 0, .05]]).reshape(-1,3)
# Find the rotation and translation vectors.
_, rvecs, tvecs, _ = cv.solvePnPRansac(objpoints, imgpoints, mtx, dist)
# project 3D points to image plane
imgpts, jac = cv.projectPoints(axis, rvecs, tvecs, mtx, dist)
imgpts = imgpts.astype(np.int)
img = draw(image, imgpoints, imgpts)
R_matrix, _ = cv.Rodrigues(rvecs)
return rvecs, tvecs, R_matrix, image
#Camera instance with thread
cap = WebcamVideoStream(src=0).start()
frame_id = 0
#Image constants
cX1 = 0
cY1 = 0
r1 = 0
cX2 = 0
cY2 = 0
r2 = 0
cX3 = 0
cY3 = 0
r3 = 0
#Transformation frame constants
obj_frame = []
ground_frame = []
distances = []
T_flag = False
while True:
img = cap.read()
frame_id += 1
centers = detect_contourn_binary(img)
#Get the circle's contourn based on it color
# cnts_red = detect_contourn(img, "Red")
# cnts_green = detect_contourn(img, "Green")
# cnts_blue = detect_contourn(img, "Blue")
#Get the center point of the detected circle
# for c in cnts_red:
# cX1, cY1, r1 = center_mass_calculate(img, c)
# for c in cnts_green:
# cX2, cY2, r2 = center_mass_calculate(img, c)
# for c in cnts_blue:
# cX3, cY3, r3 = center_mass_calculate(img, c)
# for c in cnts:
# cX, cY, r = center_mass_calculate(img, c)
#Set the image points
imgpoints = np.array([[cX1, cY1], [cX1+r1, cY1], [cX1-r1, cY1], [cX1, cY1+r1], [cX1, cY1-r1],
[cX2, cY2], [cX2+r2, cY2], [cX2-r2, cY2], [cX2, cY2+r2], [cX2, cY2-r2],
[cX3, cY3], [cX3+r3, cY3], [cX3-r3, cY3], [cX3, cY3+r3], [cX3, cY3-r3]], dtype = np.float32)
# Set Frame Transformation parameters
if cX1 != 0 and cY1 != 0:
global R_matrix
#Get the extrinsics parameters
rvecs, tvecs, R_matrix, image = get_pose(img, objpoints, imgpoints, mtx, dist)
tvec = np.concatenate((tvecs, np.ones((1,1))), axis=0)
print("Tvec:", tvec)
#List containing object frame points
obj_pos = np.reshape(tvecs, (1,3))
obj_pos = np.asarray(obj_pos, np.float32)
obj_frame.append(obj_pos)
#List containing ground frame points (SENSOR)
ground_pos = np.zeros((1,3))
ground_pos = np.asarray(ground_pos, np.float32)
ground_frame.append(ground_pos)
# #Euler angles from quadrotor (SENSOR - IMU)
# quad_rot = quad_position.env.mat_rot
# phi_quad = 180*math.atan2(-quad_rot[2,1], quad_rot[2,2])/math.pi
# theta_quad = 180*math.asin(quad_rot[2,0])/math.pi
# psi_quad = 180*math.atan2(-quad_rot[1,0], [quad_rot[0,0]])/math.pi
# print("Quadrotor Angles")
# print("Roll:",phi_quad," Pitch:",theta_quad," Yaw:",psi_quad)
#Estimated angles from camera
phi_est = 180*math.atan2(-R_matrix[2,1], R_matrix[2,2])/math.pi
theta_est = 180*math.asin(R_matrix[2, 0])/math.pi
psi_est = 180*math.atan2(-R_matrix[1,0], R_matrix[0,0])/math.pi
print("Estimated Angles")
print("Roll:",phi_est," Pitch:",theta_est," Yaw:",psi_est)
# print("Matriz Rotação Original:", quad_rot)
print("Matriz Rotação Estimada:", R_matrix)
cv.putText(img, "Roll:"+str(round(phi_est, 2)), (50,440), font, 1, (255,255,255), 2)
cv.putText(img, "Pitch:"+str(round(theta_est, 2)), (200,440), font, 1, (255,255,255), 2)
cv.putText(img, "Yaw:"+str(round(psi_est, 2)), (350,440), font, 1, (255,255,255), 2)
#Evaluate transformation matrix beetween object frame and ground frame
if len(obj_frame)==10 and len(ground_frame)==10:
global T
obj_frame = np.asarray(obj_frame, np.float32).reshape(10,3)
ground_frame = np.asarray(ground_frame, np.float32).reshape(10,3)
T = get_transform_frame(obj_frame, ground_frame)
T_flag = True
obj_frame = []
ground_frame = []
if T_flag:
real_pos = np.dot(T, tvec)
# erro_X = quad_position.env.state[0] - real_pos[0]
# erro_Y = quad_position.env.state[2] - real_pos[1]
# erro_Z = quad_position.env.state[4] - real_pos[2]
print("Ground Frame:", ground_pos)
print("Real Frame:", np.transpose(real_pos)[:,:3])
# print("E_X:",erro_X," E_Y:",erro_Y," E_Y:", erro_Z)
T_flag = False
# Print the image coordinates on the screen
cv.putText(img," Center:"+str(cX1)+','+str(cY1), (10, 10), font, 1, (255,0,0), 1)
cv.putText(img," Center:"+str(cX2)+','+str(cY2), (10, 25), font, 1, (0,255,0), 1)
cv.putText(img," Center:"+str(cX3)+','+str(cY3), (10, 40), font, 1, (0,0,255), 1)
# cv.putText(image," Center:"+str(cX4)+','+str(cY4), (10, 55), font, 1, (0,255,255), 1)
#Compute FPS
elapsed_time = time.time() - start_time
fps = int(frame_id / elapsed_time)
#Print FPS on the screen
cv.putText(img, "FPS:" + str(fps), (10, 80), font, 1, (255,255,255), 1)
cv.imshow('img',img)
key = cv.waitKey(10)
if key == 27:
break
cap.stop()
cv.destroyAllWindows() | [
"numpy.load",
"numpy.sum",
"math.asin",
"cv2.bitwise_and",
"cv2.medianBlur",
"cv2.arcLength",
"math.atan2",
"numpy.ones",
"numpy.shape",
"cv2.imshow",
"cv2.inRange",
"cv2.line",
"cv2.cvtColor",
"numpy.transpose",
"cv2.solvePnPRansac",
"numpy.reshape",
"cv2.getTrackbarPos",
"cv2.des... | [((215, 228), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (222, 228), True, 'import numpy as np\n'), ((465, 476), 'time.time', 'time.time', ([], {}), '()\n', (474, 476), False, 'import time\n'), ((490, 785), 'numpy.array', 'np.array', (['[[0, 0, 0], [0.0255, 0, 0], [-0.0255, 0, 0], [0, 0.0255, 0], [0, -0.0255, 0\n ], [0, 0.0755, 0], [0.0255, 0.0755, 0], [-0.0255, 0.0755, 0], [0, 0.05,\n 0], [0, 0.101, 0], [0.1055, 0, 0], [0.131, 0, 0], [0.08, 0, 0], [0.1055,\n 0.0255, 0], [0.1055, -0.0255, 0]]'], {'dtype': 'np.float32'}), '([[0, 0, 0], [0.0255, 0, 0], [-0.0255, 0, 0], [0, 0.0255, 0], [0, -\n 0.0255, 0], [0, 0.0755, 0], [0.0255, 0.0755, 0], [-0.0255, 0.0755, 0],\n [0, 0.05, 0], [0, 0.101, 0], [0.1055, 0, 0], [0.131, 0, 0], [0.08, 0, 0\n ], [0.1055, 0.0255, 0], [0.1055, -0.0255, 0]], dtype=np.float32)\n', (498, 785), True, 'import numpy as np\n'), ((12926, 12948), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (12946, 12948), True, 'import cv2 as cv\n'), ((1553, 1589), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2HSV'], {}), '(image, cv.COLOR_BGR2HSV)\n', (1564, 1589), True, 'import cv2 as cv\n'), ((2200, 2239), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""L - H"""', '"""Trackbars"""'], {}), "('L - H', 'Trackbars')\n", (2217, 2239), True, 'import cv2 as cv\n'), ((2250, 2289), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""L - S"""', '"""Trackbars"""'], {}), "('L - S', 'Trackbars')\n", (2267, 2289), True, 'import cv2 as cv\n'), ((2300, 2339), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""L - V"""', '"""Trackbars"""'], {}), "('L - V', 'Trackbars')\n", (2317, 2339), True, 'import cv2 as cv\n'), ((2350, 2389), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""U - H"""', '"""Trackbars"""'], {}), "('U - H', 'Trackbars')\n", (2367, 2389), True, 'import cv2 as cv\n'), ((2400, 2439), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""U - S"""', '"""Trackbars"""'], {}), "('U - S', 'Trackbars')\n", (2417, 2439), True, 'import cv2 as cv\n'), ((2450, 2489), 'cv2.getTrackbarPos', 'cv.getTrackbarPos', (['"""U - V"""', '"""Trackbars"""'], {}), "('U - V', 'Trackbars')\n", (2467, 2489), True, 'import cv2 as cv\n'), ((2505, 2530), 'numpy.array', 'np.array', (['[l_h, l_s, l_v]'], {}), '([l_h, l_s, l_v])\n', (2513, 2530), True, 'import numpy as np\n'), ((2543, 2568), 'numpy.array', 'np.array', (['[u_h, u_s, u_v]'], {}), '([u_h, u_s, u_v])\n', (2551, 2568), True, 'import numpy as np\n'), ((2617, 2646), 'cv2.inRange', 'cv.inRange', (['hsv', 'lower', 'upper'], {}), '(hsv, lower, upper)\n', (2627, 2646), True, 'import cv2 as cv\n'), ((2711, 2736), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (2718, 2736), True, 'import numpy as np\n'), ((2777, 2835), 'cv2.morphologyEx', 'cv.morphologyEx', (['mask', 'cv.MORPH_OPEN', 'kernel'], {'iterations': '(5)'}), '(mask, cv.MORPH_OPEN, kernel, iterations=5)\n', (2792, 2835), True, 'import cv2 as cv\n'), ((2851, 2893), 'cv2.bitwise_and', 'cv.bitwise_and', (['image', 'image'], {'mask': 'opening'}), '(image, image, mask=opening)\n', (2865, 2893), True, 'import cv2 as cv\n'), ((2898, 2926), 'cv2.imshow', 'cv.imshow', (['"""Opening"""', 'output'], {}), "('Opening', output)\n", (2907, 2926), True, 'import cv2 as cv\n'), ((3115, 3152), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2GRAY'], {}), '(image, cv.COLOR_BGR2GRAY)\n', (3126, 3152), True, 'import cv2 as cv\n'), ((3164, 3186), 'cv2.medianBlur', 'cv.medianBlur', (['gray', '(5)'], {}), '(gray, 5)\n', (3177, 3186), True, 'import cv2 as cv\n'), ((3198, 3218), 'cv2.bitwise_not', 'cv.bitwise_not', (['gray'], {}), '(gray)\n', (3212, 3218), True, 'import cv2 as cv\n'), ((3344, 3390), 'cv2.threshold', 'cv.threshold', (['gray', '(170)', '(255)', 'cv.THRESH_BINARY'], {}), '(gray, 170, 255, cv.THRESH_BINARY)\n', (3356, 3390), True, 'import cv2 as cv\n'), ((4291, 4314), 'cv2.imshow', 'cv.imshow', (['"""gray"""', 'gray'], {}), "('gray', gray)\n", (4300, 4314), True, 'import cv2 as cv\n'), ((5448, 5461), 'cv2.moments', 'cv.moments', (['c'], {}), '(c)\n', (5458, 5461), True, 'import cv2 as cv\n'), ((5558, 5582), 'cv2.minEnclosingCircle', 'cv.minEnclosingCircle', (['c'], {}), '(c)\n', (5579, 5582), True, 'import cv2 as cv\n'), ((5680, 5701), 'cv2.arcLength', 'cv.arcLength', (['c', '(True)'], {}), '(c, True)\n', (5692, 5701), True, 'import cv2 as cv\n'), ((6664, 6675), 'numpy.sum', 'np.sum', (['vec'], {}), '(vec)\n', (6670, 6675), True, 'import numpy as np\n'), ((6821, 6830), 'numpy.sum', 'np.sum', (['A'], {}), '(A)\n', (6827, 6830), True, 'import numpy as np\n'), ((6971, 6983), 'numpy.sum', 'np.sum', (['last'], {}), '(last)\n', (6977, 6983), True, 'import numpy as np\n'), ((7031, 7047), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {}), '((3, 4))\n', (7039, 7047), True, 'import numpy as np\n'), ((7217, 7233), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (7225, 7233), True, 'import numpy as np\n'), ((7478, 7494), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (7491, 7494), True, 'import numpy as np\n'), ((7509, 7529), 'numpy.transpose', 'np.transpose', (['matrix'], {}), '(matrix)\n', (7521, 7529), True, 'import numpy as np\n'), ((7539, 7560), 'numpy.dot', 'np.dot', (['A_inv', 'matrix'], {}), '(A_inv, matrix)\n', (7545, 7560), True, 'import numpy as np\n'), ((7569, 7584), 'numpy.transpose', 'np.transpose', (['T'], {}), '(T)\n', (7581, 7584), True, 'import numpy as np\n'), ((7641, 7678), 'numpy.concatenate', 'np.concatenate', (['(T, last_row)'], {'axis': '(0)'}), '((T, last_row), axis=0)\n', (7655, 7678), True, 'import numpy as np\n'), ((7917, 7967), 'cv2.solvePnPRansac', 'cv.solvePnPRansac', (['objpoints', 'imgpoints', 'mtx', 'dist'], {}), '(objpoints, imgpoints, mtx, dist)\n', (7934, 7967), True, 'import cv2 as cv\n'), ((8025, 8072), 'cv2.projectPoints', 'cv.projectPoints', (['axis', 'rvecs', 'tvecs', 'mtx', 'dist'], {}), '(axis, rvecs, tvecs, mtx, dist)\n', (8041, 8072), True, 'import cv2 as cv\n'), ((8167, 8186), 'cv2.Rodrigues', 'cv.Rodrigues', (['rvecs'], {}), '(rvecs)\n', (8179, 8186), True, 'import cv2 as cv\n'), ((9209, 9491), 'numpy.array', 'np.array', (['[[cX1, cY1], [cX1 + r1, cY1], [cX1 - r1, cY1], [cX1, cY1 + r1], [cX1, cY1 -\n r1], [cX2, cY2], [cX2 + r2, cY2], [cX2 - r2, cY2], [cX2, cY2 + r2], [\n cX2, cY2 - r2], [cX3, cY3], [cX3 + r3, cY3], [cX3 - r3, cY3], [cX3, cY3 +\n r3], [cX3, cY3 - r3]]'], {'dtype': 'np.float32'}), '([[cX1, cY1], [cX1 + r1, cY1], [cX1 - r1, cY1], [cX1, cY1 + r1], [\n cX1, cY1 - r1], [cX2, cY2], [cX2 + r2, cY2], [cX2 - r2, cY2], [cX2, cY2 +\n r2], [cX2, cY2 - r2], [cX3, cY3], [cX3 + r3, cY3], [cX3 - r3, cY3], [\n cX3, cY3 + r3], [cX3, cY3 - r3]], dtype=np.float32)\n', (9217, 9491), True, 'import numpy as np\n'), ((12827, 12848), 'cv2.imshow', 'cv.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (12836, 12848), True, 'import cv2 as cv\n'), ((12867, 12881), 'cv2.waitKey', 'cv.waitKey', (['(10)'], {}), '(10)\n', (12877, 12881), True, 'import cv2 as cv\n'), ((1674, 1696), 'numpy.array', 'np.array', (['[0, 127, 62]'], {}), '([0, 127, 62])\n', (1682, 1696), True, 'import numpy as np\n'), ((1713, 1737), 'numpy.array', 'np.array', (['[20, 255, 255]'], {}), '([20, 255, 255])\n', (1721, 1737), True, 'import numpy as np\n'), ((1823, 1845), 'numpy.array', 'np.array', (['[30, 44, 67]'], {}), '([30, 44, 67])\n', (1831, 1845), True, 'import numpy as np\n'), ((1862, 1886), 'numpy.array', 'np.array', (['[91, 255, 255]'], {}), '([91, 255, 255])\n', (1870, 1886), True, 'import numpy as np\n'), ((1971, 1994), 'numpy.array', 'np.array', (['[65, 107, 86]'], {}), '([65, 107, 86])\n', (1979, 1994), True, 'import numpy as np\n'), ((2011, 2036), 'numpy.array', 'np.array', (['[148, 236, 255]'], {}), '([148, 236, 255])\n', (2019, 2036), True, 'import numpy as np\n'), ((2123, 2147), 'numpy.array', 'np.array', (['[20, 100, 100]'], {}), '([20, 100, 100])\n', (2131, 2147), True, 'import numpy as np\n'), ((2164, 2188), 'numpy.array', 'np.array', (['[32, 220, 255]'], {}), '([32, 220, 255])\n', (2172, 2188), True, 'import numpy as np\n'), ((3533, 3546), 'cv2.moments', 'cv.moments', (['c'], {}), '(c)\n', (3543, 3546), True, 'import cv2 as cv\n'), ((3569, 3590), 'cv2.arcLength', 'cv.arcLength', (['c', '(True)'], {}), '(c, True)\n', (3581, 3590), True, 'import cv2 as cv\n'), ((5178, 5227), 'cv2.line', 'cv.line', (['image', 'points[1]', 'points[0]', '(255, 0, 0)'], {}), '(image, points[1], points[0], (255, 0, 0))\n', (5185, 5227), True, 'import cv2 as cv\n'), ((5236, 5285), 'cv2.line', 'cv.line', (['image', 'points[2]', 'points[1]', '(0, 255, 0)'], {}), '(image, points[2], points[1], (0, 255, 0))\n', (5243, 5285), True, 'import cv2 as cv\n'), ((5294, 5343), 'cv2.line', 'cv.line', (['image', 'points[2]', 'points[0]', '(0, 0, 255)'], {}), '(image, points[2], points[0], (0, 0, 255))\n', (5301, 5343), True, 'import cv2 as cv\n'), ((5927, 5979), 'cv2.circle', 'cv.circle', (['image', 'center', 'radius', '(255, 255, 255)', '(1)'], {}), '(image, center, radius, (255, 255, 255), 1)\n', (5936, 5979), True, 'import cv2 as cv\n'), ((5988, 6036), 'cv2.circle', 'cv.circle', (['image', 'center', '(1)', '(255, 255, 255)', '(-1)'], {}), '(image, center, 1, (255, 255, 255), -1)\n', (5997, 6036), True, 'import cv2 as cv\n'), ((7450, 7462), 'numpy.shape', 'np.shape', (['f1'], {}), '(f1)\n', (7458, 7462), True, 'import numpy as np\n'), ((8270, 8294), 'imutils.video.WebcamVideoStream', 'WebcamVideoStream', ([], {'src': '(0)'}), '(src=0)\n', (8287, 8294), False, 'from imutils.video import WebcamVideoStream\n'), ((9886, 9911), 'numpy.reshape', 'np.reshape', (['tvecs', '(1, 3)'], {}), '(tvecs, (1, 3))\n', (9896, 9911), True, 'import numpy as np\n'), ((9929, 9960), 'numpy.asarray', 'np.asarray', (['obj_pos', 'np.float32'], {}), '(obj_pos, np.float32)\n', (9939, 9960), True, 'import numpy as np\n'), ((10071, 10087), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (10079, 10087), True, 'import numpy as np\n'), ((10108, 10142), 'numpy.asarray', 'np.asarray', (['ground_pos', 'np.float32'], {}), '(ground_pos, np.float32)\n', (10118, 10142), True, 'import numpy as np\n'), ((12649, 12660), 'time.time', 'time.time', ([], {}), '()\n', (12658, 12660), False, 'import time\n'), ((3811, 3835), 'cv2.minEnclosingCircle', 'cv.minEnclosingCircle', (['c'], {}), '(c)\n', (3832, 3835), True, 'import cv2 as cv\n'), ((4519, 4606), 'math.sqrt', 'math.sqrt', (['((points[1][0] - points[0][0]) ** 2 + (points[1][1] - points[0][1]) ** 2)'], {}), '((points[1][0] - points[0][0]) ** 2 + (points[1][1] - points[0][1]\n ) ** 2)\n', (4528, 4606), False, 'import math\n'), ((4776, 4863), 'math.sqrt', 'math.sqrt', (['((points[2][0] - points[1][0]) ** 2 + (points[2][1] - points[1][1]) ** 2)'], {}), '((points[2][0] - points[1][0]) ** 2 + (points[2][1] - points[1][1]\n ) ** 2)\n', (4785, 4863), False, 'import math\n'), ((5033, 5120), 'math.sqrt', 'math.sqrt', (['((points[0][0] - points[2][0]) ** 2 + (points[0][1] - points[2][1]) ** 2)'], {}), '((points[0][0] - points[2][0]) ** 2 + (points[0][1] - points[2][1]\n ) ** 2)\n', (5042, 5120), False, 'import math\n'), ((6579, 6591), 'numpy.shape', 'np.shape', (['f1'], {}), '(f1)\n', (6587, 6591), True, 'import numpy as np\n'), ((6738, 6750), 'numpy.shape', 'np.shape', (['f1'], {}), '(f1)\n', (6746, 6750), True, 'import numpy as np\n'), ((6895, 6907), 'numpy.shape', 'np.shape', (['f1'], {}), '(f1)\n', (6903, 6907), True, 'import numpy as np\n'), ((7600, 7622), 'numpy.array', 'np.array', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (7608, 7622), True, 'import numpy as np\n'), ((7776, 7830), 'numpy.float32', 'np.float32', (['[[0.05, 0, 0], [0, 0.05, 0], [0, 0, 0.05]]'], {}), '([[0.05, 0, 0], [0, 0.05, 0], [0, 0, 0.05]])\n', (7786, 7830), True, 'import numpy as np\n'), ((11801, 11816), 'numpy.dot', 'np.dot', (['T', 'tvec'], {}), '(T, tvec)\n', (11807, 11816), True, 'import numpy as np\n'), ((4136, 4188), 'cv2.circle', 'cv.circle', (['image', 'center', '(radius + 1)', '(0, 255, 0)', '(1)'], {}), '(image, center, radius + 1, (0, 255, 0), 1)\n', (4145, 4188), True, 'import cv2 as cv\n'), ((4203, 4247), 'cv2.circle', 'cv.circle', (['image', 'center', '(1)', '(0, 255, 0)', '(-1)'], {}), '(image, center, 1, (0, 255, 0), -1)\n', (4212, 4247), True, 'import cv2 as cv\n'), ((4421, 4489), 'math.atan2', 'math.atan2', (['(points[0][1] - points[1][1])', '(points[0][0] - points[1][0])'], {}), '(points[0][1] - points[1][1], points[0][0] - points[1][0])\n', (4431, 4489), False, 'import math\n'), ((4678, 4746), 'math.atan2', 'math.atan2', (['(points[1][1] - points[2][1])', '(points[1][0] - points[2][0])'], {}), '(points[1][1] - points[2][1], points[1][0] - points[2][0])\n', (4688, 4746), False, 'import math\n'), ((4935, 5003), 'math.atan2', 'math.atan2', (['(points[0][1] - points[2][1])', '(points[0][0] - points[2][0])'], {}), '(points[0][1] - points[2][1], points[0][0] - points[2][0])\n', (4945, 5003), False, 'import math\n'), ((9768, 9783), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (9775, 9783), True, 'import numpy as np\n'), ((10665, 10708), 'math.atan2', 'math.atan2', (['(-R_matrix[2, 1])', 'R_matrix[2, 2]'], {}), '(-R_matrix[2, 1], R_matrix[2, 2])\n', (10675, 10708), False, 'import math\n'), ((10739, 10764), 'math.asin', 'math.asin', (['R_matrix[2, 0]'], {}), '(R_matrix[2, 0])\n', (10748, 10764), False, 'import math\n'), ((10795, 10838), 'math.atan2', 'math.atan2', (['(-R_matrix[1, 0])', 'R_matrix[0, 0]'], {}), '(-R_matrix[1, 0], R_matrix[0, 0])\n', (10805, 10838), False, 'import math\n'), ((11502, 11535), 'numpy.asarray', 'np.asarray', (['obj_frame', 'np.float32'], {}), '(obj_frame, np.float32)\n', (11512, 11535), True, 'import numpy as np\n'), ((11573, 11609), 'numpy.asarray', 'np.asarray', (['ground_frame', 'np.float32'], {}), '(ground_frame, np.float32)\n', (11583, 11609), True, 'import numpy as np\n'), ((12089, 12111), 'numpy.transpose', 'np.transpose', (['real_pos'], {}), '(real_pos)\n', (12101, 12111), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import time
import os
NUM_THREADS = "1"
os.environ["OMP_NUM_THREADS"] = NUM_THREADS
os.environ["OPENBLAS_NUM_THREADS"] = NUM_THREADS
os.environ["MKL_NUM_THREADS"] = NUM_THREADS
os.environ["VECLIB_MAXIMUM_THREADS"] = NUM_THREADS
os.environ["NUMEXPR_NUM_THREADS"] = NUM_THREADS
import numpy as np
from scipy import special
from numba import njit
class MyCTMP:
def __init__(self, rating_GroupForUser, rating_GroupForMovie,
num_docs, num_words, num_topics, user_size, lamb, e, f, alpha, iter_infer):
"""
Arguments:
num_words: Number of unique words in the corpus (length of the vocabulary).
num_topics: Number of topics shared by the whole corpus.
alpha: Hyperparameter for prior on topic mixture theta.
iter_infer: Number of iterations of FW algorithm
"""
self.rating_GroupForUser = rating_GroupForUser
self.rating_GroupForMovie = rating_GroupForMovie
self.num_docs = num_docs
self.num_words = num_words
self.num_topics = num_topics
self.user_size = user_size
self.lamb = lamb
self.e = e
self.f = f
self.alpha = alpha
self.iter_infer = iter_infer
# Get initial beta(topics) which was produced by LDA
# self.beta = np.load('./input-data/beta.npy')
self.beta = np.random.rand(self.num_topics, self.num_words) + 1e-10
beta_norm = self.beta.sum(axis=1)
self.beta /= beta_norm[:, np.newaxis]
# Get initial theta(topic proportions) which was produced by LDA
# self.theta = np.load('./input-data/theta.npy')
self.theta = np.random.rand(self.num_docs, self.num_topics) + 1e-10
theta_norm = self.theta.sum(axis=1)
self.theta /= theta_norm[:, np.newaxis]
# Initialize mu (topic offsets)
self.mu = np.copy(self.theta) # + np.random.normal(0, self.lamb, self.theta.shape)
# Initialize phi (rating's variational parameter)
self.phi = self.get_phi()
# Initialize shp, rte (user's variational parameters)
self.shp = np.ones((self.user_size, self.num_topics)) * self.e
self.rte = np.ones((self.user_size, self.num_topics)) * self.f
def get_phi(self):
""" Click to read description
As we know Φ(phi) has shape of (user_size, num_docs, num_topics)
which is 3D matrix of shape=(138493, 25900, 50) for ORIGINAL || (1915, 639, 50) for REDUCED
For ORIGINAL data, it is not possible(memory-wise) to store 3D matrix of shape=(138493, 25900, 50) into single numpy array.
Therefore, we cut the whole 3D matrix into small chunks of 3D matrix and put them into list and set it as our self.phi
"""
block_2D = np.zeros(shape=(self.num_docs, self.num_topics))
# Initiate matrix
phi_matrices = list()
# Create small 3D matrices and add them into list
thousand_block_size = self.user_size // 1000
phi = np.empty(shape=(1000, self.num_docs, self.num_topics))
for i in range(1000):
phi[i, :, :] = block_2D
for i in range(thousand_block_size):
phi_matrices.append(phi)
# Create last remaining 3D matrix and add it into list
remaining_block_size = self.user_size % 1000
phi = np.empty(shape=(remaining_block_size, self.num_docs, self.num_topics))
for i in range(remaining_block_size):
phi[i, :, :] = block_2D
phi_matrices.append(phi)
return phi_matrices
def run_EM(self, wordids, wordcts, GLOB_ITER):
""" Click to read more
First does an E step on given wordids and wordcts to update theta,
then uses the result to update betas in M step.
"""
self.GLOB_ITER = GLOB_ITER
# E - expectation step
self.e_step(wordids, wordcts)
# M - maximization step
self.m_step(wordids, wordcts)
def e_step(self, wordids, wordcts):
""" Does e step. Updates theta, mu, pfi, shp, rte for all documents and users"""
# Normalization denominator for mu
# norm_mu = np.copy((self.shp / self.rte).sum(axis=0))
# --->> UPDATE phi, shp, rte
s = time.time()
mf, pf, rf = 0, 0, 0
mu_sum = self.mu.sum(axis=0)
for u in range(self.user_size):
ms = time.time()
if len(self.rating_GroupForUser[u]) == 0:
# if user didnt like any movie, then dont update anything, continue!
continue
movies_for_u = self.rating_GroupForUser[u] # list of movie ids liked by user u
phi_block = self.phi[u // 1000] # access needed 3D matrix of phi list by index
usr = u % 1000 # convert user id into interval 0-1000
me = time.time()
mf += (me - ms)
ps = time.time()
phi_uj = np.exp(np.log(self.mu[[movies_for_u], :]) + special.psi(self.shp[u, :]) - np.log(self.rte[u, :]))
phi_uj_sum = np.copy(phi_uj)[0].sum(axis=1) # DELETE np.copy and test
phi_uj_norm = np.copy(phi_uj) / phi_uj_sum[:, np.newaxis] # DELETE np.copy and test
# update user's phi in phi_block with newly computed phi_uj_sum
phi_block[usr, [movies_for_u], :] = phi_uj_norm
pe = time.time()
pf += (pe - ps)
rs = time.time()
# update user's shp and rte
self.shp[u, :] = self.e + phi_uj_norm[0].sum(axis=0)
self.rte[u, :] = self.f + mu_sum
re = time.time()
rf += (re-rs)
# print(f" ** UPDATE phi, shp, rte over {u + 1}/{self.user_size} users |iter:{self.GLOB_ITER}| ** ")
e = time.time()
print("users time:", e - s)
# print(mf/(e-s))
# print(pf / (e - s))
# print(rf / (e - s))
# --->> UPDATE theta, mu
d_s = time.time()
a = 0
norm_mu = np.copy((self.shp / self.rte).sum(axis=0))
for d in range(self.num_docs):
ts = time.time()
thetad = self.update_theta(wordids[d], wordcts[d], d)
self.theta[d, :] = thetad
te = time.time()
ms = time.time()
mud = self.update_mu(norm_mu, d)
self.mu[d, :] = mud
# print(f" ** UPDATE theta, mu over {d + 1}/{self.num_docs} documents |iter:{self.GLOB_ITER}| ** ")
me = time.time()
a += (me - ms) / ((me - ms) + (te - ts))
d_e = time.time()
print("docs time:", d_e - d_s)
# print("avg mu proportion on docs time:", a / self.num_docs)
def update_mu(self, norm_mu, d):
# initiate new mu
mu = np.empty(self.num_topics)
mu_users = self.rating_GroupForMovie[d]
def get_phi(x):
phi_ = self.phi[x // 1000]
usr = x % 1000
return phi_[usr, d, :] # **previously** np.sum()
rating_phi = sum(map(get_phi, mu_users))
if len(mu_users) == 0:
mu = np.copy(self.theta[d, :])
else:
for k in range(self.num_topics):
temp = -1 * norm_mu[k] + self.lamb * self.theta[d, k]
delta = temp ** 2 + 4 * self.lamb * rating_phi[k] # added [k] to rating_phi.
mu[k] = (temp + np.sqrt(delta)) / (2 * self.lamb)
return mu
@staticmethod
@njit
def x_(cts, beta, alpha, lamb, mu, tt):
return np.dot(cts, np.log(np.dot(tt, beta))) + (alpha - 1) * np.log(tt) \
- 1 * (lamb / 2) * (np.linalg.norm((tt - mu), ord=2)) ** 2
@staticmethod
@njit
def t_(cts, beta, theta, mu, x, p, T_lower, T_upper, alpha, lamb, t):
# ======== G's ========== 30%
G_1 = (np.dot(beta, cts / x) + (alpha - 1) / theta) / p
G_2 = (-1 * lamb * (theta - mu)) / (1 - p)
# ======== Lower ========== 40%
if np.random.rand() < p:
T_lower[0] += 1
else:
T_lower[1] += 1
ft_lower = T_lower[0] * G_1 + T_lower[1] * G_2
index_lower = np.argmax(ft_lower)
alpha = 1.0 / (t + 1)
theta_lower = np.copy(theta)
theta_lower *= 1 - alpha
theta_lower[index_lower] += alpha
# ======== Upper ========== 30%
if np.random.rand() < p:
T_upper[0] += 1
else:
T_upper[1] += 1
ft_upper = T_upper[0] * G_1 + T_upper[1] * G_2
index_upper = np.argmax(ft_upper)
alpha = 1.0 / (t + 1)
theta_upper = np.copy(theta)
theta_upper *= 1 - alpha
theta_upper[index_upper] += alpha
return theta_lower, theta_upper, index_lower, index_upper, alpha
def update_theta(self, ids, cts, d):
""" Click to read more
Updates theta for a given document using BOPE algorithm (i.e, MAP Estimation With Bernoulli Randomness).
Arguments:
ids: an element of wordids, corresponding to a document.
cts: an element of wordcts, corresponding to a document.
Returns updated theta.
"""
cts = cts.astype("float64")
# locate cache memory
beta = self.beta[:, ids]
# Get theta
theta = self.theta[d, :]
# Get mu
mu = self.mu[d, :]
# x = sum_(k=2)^K theta_k * beta_{kj}
x = np.dot(theta, beta)
# Parameter of Bernoulli distribution
# Likelihood vs Prior
p = 0.9
# Number of times likelihood and prior are chosen
T_lower = [1, 0]
T_upper = [0, 1]
ts = time.time()
jf = 0
xf = 0
uf = 0
for t in range(1, self.iter_infer):
# ======== G's, Upper, Lower ======== 50%
# JITed
js = time.time()
theta_lower, theta_upper, index_lower, index_upper, alpha = self.t_(cts, beta, theta, mu, x, p, T_lower,
T_upper, self.alpha, self.lamb, t)
je = time.time()
jf += (je-js)
# ======== Decision ======== 30%
# JITed
xs = time.time()
x_l = self.x_(cts, beta, self.alpha, self.lamb, mu, theta_lower)
x_u = self.x_(cts, beta, self.alpha, self.lamb, mu, theta_upper)
compare = np.array([x_l[0], x_u[0]])
best = np.argmax(compare)
xe = time.time()
xf += (xe-xs)
# ======== Update ======== 20%
us = time.time()
if best == 0:
theta = np.copy(theta_lower)
x = x + alpha * (beta[index_lower, :] - x)
else:
theta = np.copy(theta_upper)
x = x + alpha * (beta[index_upper, :] - x)
ue = time.time()
uf += (ue-us)
te = time.time()
# print(te-ts)
# print(jf/(te-ts))
# print(xf / (te - ts))
# print(uf / (te - ts))
# print("-----")
return theta
def m_step(self, wordids, wordcts):
""" Does m step: update global variables beta """
# Compute intermediate beta which is denoted as "unit beta"
beta = np.zeros((self.num_topics, self.num_words))
for d in range(self.num_docs):
beta[:, wordids[d]] += np.outer(self.theta[d], wordcts[d])
# Check zeros index
beta_sum = beta.sum(axis=0)
ids = np.where(beta_sum != 0)[0]
unit_beta = beta[:, ids]
# Normalize the intermediate beta
unit_beta_norm = unit_beta.sum(axis=1)
unit_beta /= unit_beta_norm[:, np.newaxis]
# Update beta
self.beta = np.zeros((self.num_topics, self.num_words))
self.beta[:, ids] += unit_beta
| [
"numpy.outer",
"numpy.log",
"numpy.copy",
"numpy.argmax",
"numpy.empty",
"numpy.zeros",
"numpy.ones",
"scipy.special.psi",
"time.time",
"numpy.where",
"numpy.array",
"numpy.linalg.norm",
"numpy.random.rand",
"numpy.dot",
"numpy.sqrt"
] | [((1892, 1911), 'numpy.copy', 'np.copy', (['self.theta'], {}), '(self.theta)\n', (1899, 1911), True, 'import numpy as np\n'), ((2792, 2840), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.num_docs, self.num_topics)'}), '(shape=(self.num_docs, self.num_topics))\n', (2800, 2840), True, 'import numpy as np\n'), ((3024, 3078), 'numpy.empty', 'np.empty', ([], {'shape': '(1000, self.num_docs, self.num_topics)'}), '(shape=(1000, self.num_docs, self.num_topics))\n', (3032, 3078), True, 'import numpy as np\n'), ((3358, 3428), 'numpy.empty', 'np.empty', ([], {'shape': '(remaining_block_size, self.num_docs, self.num_topics)'}), '(shape=(remaining_block_size, self.num_docs, self.num_topics))\n', (3366, 3428), True, 'import numpy as np\n'), ((4261, 4272), 'time.time', 'time.time', ([], {}), '()\n', (4270, 4272), False, 'import time\n'), ((5778, 5789), 'time.time', 'time.time', ([], {}), '()\n', (5787, 5789), False, 'import time\n'), ((5960, 5971), 'time.time', 'time.time', ([], {}), '()\n', (5969, 5971), False, 'import time\n'), ((6563, 6574), 'time.time', 'time.time', ([], {}), '()\n', (6572, 6574), False, 'import time\n'), ((6761, 6786), 'numpy.empty', 'np.empty', (['self.num_topics'], {}), '(self.num_topics)\n', (6769, 6786), True, 'import numpy as np\n'), ((8127, 8146), 'numpy.argmax', 'np.argmax', (['ft_lower'], {}), '(ft_lower)\n', (8136, 8146), True, 'import numpy as np\n'), ((8199, 8213), 'numpy.copy', 'np.copy', (['theta'], {}), '(theta)\n', (8206, 8213), True, 'import numpy as np\n'), ((8511, 8530), 'numpy.argmax', 'np.argmax', (['ft_upper'], {}), '(ft_upper)\n', (8520, 8530), True, 'import numpy as np\n'), ((8583, 8597), 'numpy.copy', 'np.copy', (['theta'], {}), '(theta)\n', (8590, 8597), True, 'import numpy as np\n'), ((9387, 9406), 'numpy.dot', 'np.dot', (['theta', 'beta'], {}), '(theta, beta)\n', (9393, 9406), True, 'import numpy as np\n'), ((9623, 9634), 'time.time', 'time.time', ([], {}), '()\n', (9632, 9634), False, 'import time\n'), ((10900, 10911), 'time.time', 'time.time', ([], {}), '()\n', (10909, 10911), False, 'import time\n'), ((11257, 11300), 'numpy.zeros', 'np.zeros', (['(self.num_topics, self.num_words)'], {}), '((self.num_topics, self.num_words))\n', (11265, 11300), True, 'import numpy as np\n'), ((11731, 11774), 'numpy.zeros', 'np.zeros', (['(self.num_topics, self.num_words)'], {}), '((self.num_topics, self.num_words))\n', (11739, 11774), True, 'import numpy as np\n'), ((1390, 1437), 'numpy.random.rand', 'np.random.rand', (['self.num_topics', 'self.num_words'], {}), '(self.num_topics, self.num_words)\n', (1404, 1437), True, 'import numpy as np\n'), ((1686, 1732), 'numpy.random.rand', 'np.random.rand', (['self.num_docs', 'self.num_topics'], {}), '(self.num_docs, self.num_topics)\n', (1700, 1732), True, 'import numpy as np\n'), ((2141, 2183), 'numpy.ones', 'np.ones', (['(self.user_size, self.num_topics)'], {}), '((self.user_size, self.num_topics))\n', (2148, 2183), True, 'import numpy as np\n'), ((2212, 2254), 'numpy.ones', 'np.ones', (['(self.user_size, self.num_topics)'], {}), '((self.user_size, self.num_topics))\n', (2219, 2254), True, 'import numpy as np\n'), ((4396, 4407), 'time.time', 'time.time', ([], {}), '()\n', (4405, 4407), False, 'import time\n'), ((4840, 4851), 'time.time', 'time.time', ([], {}), '()\n', (4849, 4851), False, 'import time\n'), ((4898, 4909), 'time.time', 'time.time', ([], {}), '()\n', (4907, 4909), False, 'import time\n'), ((5378, 5389), 'time.time', 'time.time', ([], {}), '()\n', (5387, 5389), False, 'import time\n'), ((5436, 5447), 'time.time', 'time.time', ([], {}), '()\n', (5445, 5447), False, 'import time\n'), ((5615, 5626), 'time.time', 'time.time', ([], {}), '()\n', (5624, 5626), False, 'import time\n'), ((6103, 6114), 'time.time', 'time.time', ([], {}), '()\n', (6112, 6114), False, 'import time\n'), ((6236, 6247), 'time.time', 'time.time', ([], {}), '()\n', (6245, 6247), False, 'import time\n'), ((6266, 6277), 'time.time', 'time.time', ([], {}), '()\n', (6275, 6277), False, 'import time\n'), ((6484, 6495), 'time.time', 'time.time', ([], {}), '()\n', (6493, 6495), False, 'import time\n'), ((7087, 7112), 'numpy.copy', 'np.copy', (['self.theta[d, :]'], {}), '(self.theta[d, :])\n', (7094, 7112), True, 'import numpy as np\n'), ((7957, 7973), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7971, 7973), True, 'import numpy as np\n'), ((8341, 8357), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8355, 8357), True, 'import numpy as np\n'), ((9815, 9826), 'time.time', 'time.time', ([], {}), '()\n', (9824, 9826), False, 'import time\n'), ((10076, 10087), 'time.time', 'time.time', ([], {}), '()\n', (10085, 10087), False, 'import time\n'), ((10197, 10208), 'time.time', 'time.time', ([], {}), '()\n', (10206, 10208), False, 'import time\n'), ((10386, 10412), 'numpy.array', 'np.array', (['[x_l[0], x_u[0]]'], {}), '([x_l[0], x_u[0]])\n', (10394, 10412), True, 'import numpy as np\n'), ((10432, 10450), 'numpy.argmax', 'np.argmax', (['compare'], {}), '(compare)\n', (10441, 10450), True, 'import numpy as np\n'), ((10468, 10479), 'time.time', 'time.time', ([], {}), '()\n', (10477, 10479), False, 'import time\n'), ((10567, 10578), 'time.time', 'time.time', ([], {}), '()\n', (10576, 10578), False, 'import time\n'), ((10848, 10859), 'time.time', 'time.time', ([], {}), '()\n', (10857, 10859), False, 'import time\n'), ((11375, 11410), 'numpy.outer', 'np.outer', (['self.theta[d]', 'wordcts[d]'], {}), '(self.theta[d], wordcts[d])\n', (11383, 11410), True, 'import numpy as np\n'), ((11489, 11512), 'numpy.where', 'np.where', (['(beta_sum != 0)'], {}), '(beta_sum != 0)\n', (11497, 11512), True, 'import numpy as np\n'), ((5153, 5168), 'numpy.copy', 'np.copy', (['phi_uj'], {}), '(phi_uj)\n', (5160, 5168), True, 'import numpy as np\n'), ((7805, 7826), 'numpy.dot', 'np.dot', (['beta', '(cts / x)'], {}), '(beta, cts / x)\n', (7811, 7826), True, 'import numpy as np\n'), ((10629, 10649), 'numpy.copy', 'np.copy', (['theta_lower'], {}), '(theta_lower)\n', (10636, 10649), True, 'import numpy as np\n'), ((10751, 10771), 'numpy.copy', 'np.copy', (['theta_upper'], {}), '(theta_upper)\n', (10758, 10771), True, 'import numpy as np\n'), ((5005, 5027), 'numpy.log', 'np.log', (['self.rte[u, :]'], {}), '(self.rte[u, :])\n', (5011, 5027), True, 'import numpy as np\n'), ((7562, 7572), 'numpy.log', 'np.log', (['tt'], {}), '(tt)\n', (7568, 7572), True, 'import numpy as np\n'), ((7610, 7640), 'numpy.linalg.norm', 'np.linalg.norm', (['(tt - mu)'], {'ord': '(2)'}), '(tt - mu, ord=2)\n', (7624, 7640), True, 'import numpy as np\n'), ((4938, 4972), 'numpy.log', 'np.log', (['self.mu[[movies_for_u], :]'], {}), '(self.mu[[movies_for_u], :])\n', (4944, 4972), True, 'import numpy as np\n'), ((4975, 5002), 'scipy.special.psi', 'special.psi', (['self.shp[u, :]'], {}), '(self.shp[u, :])\n', (4986, 5002), False, 'from scipy import special\n'), ((5054, 5069), 'numpy.copy', 'np.copy', (['phi_uj'], {}), '(phi_uj)\n', (5061, 5069), True, 'import numpy as np\n'), ((7368, 7382), 'numpy.sqrt', 'np.sqrt', (['delta'], {}), '(delta)\n', (7375, 7382), True, 'import numpy as np\n'), ((7527, 7543), 'numpy.dot', 'np.dot', (['tt', 'beta'], {}), '(tt, beta)\n', (7533, 7543), True, 'import numpy as np\n')] |
import os
import time
import numpy as np
import tensorflow as tf
from face_py import facenet
from face_py import detect_face
from face_py import align_dataset_mtcnn
import cv2
import math
from util import Logging
class FeatureExtractor():
def __init__(self) :
start_time = time.time()
with tf.Graph().as_default():
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.25
self.sess_detect = tf.Session(config = config)
model =os.path.join(os.path.dirname(os.path.realpath(__file__)),'20180402-114759.pb')
facenet.load_model(model)
self.images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
self.embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
self.phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
self.embedding_size = self.embeddings.get_shape()[1]
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.25)
sess1 = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess1.as_default():
self.pnet, self.rnet, self.onet = detect_face.create_mtcnn(sess1, None)
end_time = time.time()
print(Logging.i("feature extraction model is loaded(time: {})".format(end_time - start_time)))
def extract_feature(self,image_path):
result = []
input_path = image_path
(bounding_boxes, images) = align_dataset_mtcnn.main(self.pnet,self.rnet,self.onet,input_path)
if not bounding_boxes :
return result
batch_size = 20
face_images = []
for i in range(len(images)):
images[i] = cv2.resize(images[i],(160,160),interpolation=cv2.INTER_LINEAR)
face_images.append(images[i])
nrof_images = len(face_images)
faces = np.zeros((nrof_images,160,160,3))
for i in range(nrof_images) :
if faces[i].ndim == 2:
faces[i] = facenet.to_rgb(faces[i])
img = facenet.prewhiten(face_images[i])
faces[i,:,:,:] = img
nrof_batches_per_epoch = int(math.ceil(1.0 * nrof_images / batch_size))
emb_array = np.zeros((nrof_images, self.embedding_size))
for i in range(nrof_batches_per_epoch):
start_index = i * batch_size
end_index = min((i + 1) * batch_size, nrof_images)
face_index = faces[start_index:end_index,:,:,:]
feed_dict = {self.images_placeholder: face_index, self.phase_train_placeholder: False}
emb_array[start_index:end_index, :] = self.sess_detect.run(self.embeddings, feed_dict=feed_dict)
return emb_array, bounding_boxes
if __name__ == '__main__':
image_path = "/workspace/inference/sample.jpg"
feature_file_name = "feature"
bbox_file_name = "bbox.txt"
face = FeatureExtractor()
feature, bounding_boxes = face.extract_feature(image_path)
print(Logging.i("Feature is successfully extracted"))
np.save("feature", feature)
print(Logging.i("Feature is successfully saved({}.npy)".format(feature_file_name)))
with open(bbox_file_name, "w") as bbox_file:
bbox_file.write(str(bounding_boxes))
print(Logging.i("Bounding box list file is successfully saved({})".format(bbox_file_name))) | [
"util.Logging.i",
"numpy.save",
"face_py.align_dataset_mtcnn.main",
"face_py.detect_face.create_mtcnn",
"math.ceil",
"face_py.facenet.prewhiten",
"os.path.realpath",
"numpy.zeros",
"tensorflow.Session",
"time.time",
"tensorflow.ConfigProto",
"face_py.facenet.load_model",
"face_py.facenet.to_... | [((3111, 3138), 'numpy.save', 'np.save', (['"""feature"""', 'feature'], {}), "('feature', feature)\n", (3118, 3138), True, 'import numpy as np\n'), ((289, 300), 'time.time', 'time.time', ([], {}), '()\n', (298, 300), False, 'import time\n'), ((1317, 1328), 'time.time', 'time.time', ([], {}), '()\n', (1326, 1328), False, 'import time\n'), ((1563, 1632), 'face_py.align_dataset_mtcnn.main', 'align_dataset_mtcnn.main', (['self.pnet', 'self.rnet', 'self.onet', 'input_path'], {}), '(self.pnet, self.rnet, self.onet, input_path)\n', (1587, 1632), False, 'from face_py import align_dataset_mtcnn\n'), ((1958, 1994), 'numpy.zeros', 'np.zeros', (['(nrof_images, 160, 160, 3)'], {}), '((nrof_images, 160, 160, 3))\n', (1966, 1994), True, 'import numpy as np\n'), ((2302, 2346), 'numpy.zeros', 'np.zeros', (['(nrof_images, self.embedding_size)'], {}), '((nrof_images, self.embedding_size))\n', (2310, 2346), True, 'import numpy as np\n'), ((3058, 3104), 'util.Logging.i', 'Logging.i', (['"""Feature is successfully extracted"""'], {}), "('Feature is successfully extracted')\n", (3067, 3104), False, 'from util import Logging\n'), ((360, 376), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (374, 376), True, 'import tensorflow as tf\n'), ((478, 503), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (488, 503), True, 'import tensorflow as tf\n'), ((616, 641), 'face_py.facenet.load_model', 'facenet.load_model', (['model'], {}), '(model)\n', (634, 641), False, 'from face_py import facenet\n'), ((1014, 1065), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.25)'}), '(per_process_gpu_memory_fraction=0.25)\n', (1027, 1065), True, 'import tensorflow as tf\n'), ((1798, 1863), 'cv2.resize', 'cv2.resize', (['images[i]', '(160, 160)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(images[i], (160, 160), interpolation=cv2.INTER_LINEAR)\n', (1808, 1863), False, 'import cv2\n'), ((2135, 2168), 'face_py.facenet.prewhiten', 'facenet.prewhiten', (['face_images[i]'], {}), '(face_images[i])\n', (2152, 2168), False, 'from face_py import facenet\n'), ((2239, 2280), 'math.ceil', 'math.ceil', (['(1.0 * nrof_images / batch_size)'], {}), '(1.0 * nrof_images / batch_size)\n', (2248, 2280), False, 'import math\n'), ((1260, 1297), 'face_py.detect_face.create_mtcnn', 'detect_face.create_mtcnn', (['sess1', 'None'], {}), '(sess1, None)\n', (1284, 1297), False, 'from face_py import detect_face\n'), ((2092, 2116), 'face_py.facenet.to_rgb', 'facenet.to_rgb', (['faces[i]'], {}), '(faces[i])\n', (2106, 2116), False, 'from face_py import facenet\n'), ((314, 324), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (322, 324), True, 'import tensorflow as tf\n'), ((554, 580), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (570, 580), False, 'import os\n'), ((680, 702), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (700, 702), True, 'import tensorflow as tf\n'), ((763, 785), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (783, 785), True, 'import tensorflow as tf\n'), ((864, 886), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (884, 886), True, 'import tensorflow as tf\n'), ((1104, 1171), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options', 'log_device_placement': '(False)'}), '(gpu_options=gpu_options, log_device_placement=False)\n', (1118, 1171), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python
# coding: utf-8
# # **<NAME> - Tracking Data Assignment**
#
# Sunday 11th October 2020
#
# ---
# In[1]:
import pandas as pd
import numpy as np
import datetime
# imports required by data prep functions
import json
# Laurie's libraries
import scipy.signal as signal
import matplotlib.animation as animation
# removing annoying matplotlib warnings
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
import re
import os
from collections import Counter, defaultdict
# plotting
import matplotlib.pyplot as plt
pd.options.display.max_rows = 500
pd.options.display.max_columns = 500
signalityRepo = r'2019/Tracking Data/'
movieRepo = r'Movies/'
# # **1)** Data Preparation Functions
# In[2]:
def initialise_dic_tracks(df_homePlayers, df_awayPlayers):
"""
Initialises dictionaries for both home and away player locations
"""
dic_home_tracks = {}
dic_away_tracks = {}
for homePlayer in df_homePlayers.playerIndex:
for xy in ['x','y']:
dic_home_tracks[f'Home_{homePlayer}_{xy}'] = []
for awayPlayer in df_awayPlayers.playerIndex:
for xy in ['x','y']:
dic_away_tracks[f'Away_{awayPlayer}_{xy}'] = []
return dic_home_tracks, dic_away_tracks
# In[3]:
def populate_df_tracks(homeAway, homeAway_tracks, playersJerseyMapping, dic_tracks, df_players):
"""
For a given team (home OR away), will transform the JSON track data to produce a dataframe just like Laurie's
"""
lst_playerJerseys = df_players.jersey_number.values
# iterating through frames for home/away team
for n, frame in enumerate(homeAway_tracks):
lst_playerJerseysPerFrame = []
for player in frame:
jersey_number = player.get('jersey_number')
playerIndex = playersJerseyMapping[jersey_number]
x,y = player.get('position', [np.nan, np.nan])
# keeping track of jerseys that have a position for that frame
lst_playerJerseysPerFrame.append(jersey_number)
dic_tracks[f'{homeAway}_{playerIndex}_x'].append(x)
# flipping the y axis to make the data sync with Laurie's plotting methods
dic_tracks[f'{homeAway}_{playerIndex}_y'].append(-1*y)
# list of jerseys that aren't in the frame
lst_playerJerseysNotInFrame = list(set(lst_playerJerseys) - set(lst_playerJerseysPerFrame))
# adding the jerseys that aren't in frame and providing an x,y position of nan, nan
for jersey_number in lst_playerJerseysNotInFrame:
playerIndex = playersJerseyMapping[jersey_number]
x,y = [np.nan, np.nan]
dic_tracks[f'{homeAway}_{playerIndex}_x'].append(x)
dic_tracks[f'{homeAway}_{playerIndex}_y'].append(y)
# transforming tracking dic to a tracking dataframe
df_tracks = pd.DataFrame(dic_tracks)
return df_tracks
# In[4]:
def to_single_playing_direction(home,away):
"""
Switches x and y co-ords with negative sign in the second half
Requires the co-ords to be symmetric about 0,0 (i.e. going from roughly -60 to +60 in the x direction and -34 to +34 in the y direction)
"""
for team in [home,away]:
second_half_idx = team.Period.idxmax(2)
columns = [c for c in team.columns if c[-1].lower() in ['x','y']]
team.loc[second_half_idx:,columns] *= -1
return home,away
# In[5]:
def shoot_direction(gk_x_position):
"""
Produces either 1 (L2R) or -1 (R2L) based on GK position
"""
if gk_x_position > 0:
# shooting right-to-left
return -1
else:
# shotting left-to-right
return 1
# In[6]:
def parse_raw_to_df(signalityRepo, rootFileName, interpolate=True):
"""
Takes raw root of a match string e.g. 20190930.Hammarby-Örebrö and transforms it into 4 dataframes:
1) home players
2) away players
3) home tracking
4) away tracking
"""
lst_df_home = []
lst_df_away = []
for half in ['.1','.2']:
# producing filename prefix (just need to add either "-info_live.json" or "-tracks.json")
fileNamePrefix = rootFileName + half
# load info
## looks like the info JSON is duplicated between the two halves
with open(os.path.join(signalityRepo, f'{fileNamePrefix}-info_live.json')) as f:
info = json.load(f)
# load tracks
with open(os.path.join(signalityRepo, f'{fileNamePrefix}-tracks.json')) as f:
tracks = json.load(f)
# unpacking info
## looks like .1 and .2 files are duplicated, so just looking at the .1 (first half file)
if half == '.1':
matchId = info.get('id')
venueId = info.get('venueId')
timeStart = info.get('time_start')
pitchLength, pitchWidth = info.get('calibration').get('pitch_size')
homeTeam = info.get('team_home_name')
awayTeam = info.get('team_away_name')
# unpacking players
homePlayers = info.get('team_home_players')
awayPlayers = info.get('team_away_players')
homeLineup = info.get('team_home_lineup')
awayLineup = info.get('team_away_lineup')
homeLineupSwitch = {homeLineup[i]:i for i in homeLineup}
awayLineupSwitch = {awayLineup[i]:i for i in awayLineup}
# putting player metadata in dataframe
df_homePlayers = pd.DataFrame(homePlayers)
df_awayPlayers = pd.DataFrame(awayPlayers)
df_homePlayers['teamName'] = homeTeam
df_awayPlayers['teamName'] = awayTeam
# adding matchId to the player dataframes
df_homePlayers['matchId'] = matchId
df_awayPlayers['matchId'] = matchId
df_homePlayers['matchName'] = rootFileName
df_awayPlayers['matchName'] = rootFileName
# adding 1-11 + sub player indices (will probably use these for the final column names like Laurie in the tracks df)
df_homePlayers['playerIndex'] = [int(homeLineupSwitch[i]) if i in homeLineupSwitch else np.nan for i in df_homePlayers.jersey_number.values]
df_awayPlayers['playerIndex'] = [int(awayLineupSwitch[i]) if i in awayLineupSwitch else np.nan for i in df_awayPlayers.jersey_number.values]
df_homePlayers.loc[pd.isna(df_homePlayers['playerIndex']) == True, 'playerIndex'] = np.arange(int(np.nanmax(df_homePlayers.playerIndex))+1, len(df_homePlayers)+1)
df_awayPlayers.loc[pd.isna(df_awayPlayers['playerIndex']) == True, 'playerIndex'] = np.arange(int(np.nanmax(df_awayPlayers.playerIndex))+1, len(df_awayPlayers)+1)
df_homePlayers['playerIndex'] = df_homePlayers.playerIndex.apply(lambda x: int(x))
df_awayPlayers['playerIndex'] = df_awayPlayers.playerIndex.apply(lambda x: int(x))
# re-jigging cols and re-ordering rows
df_homePlayers = df_homePlayers[['matchId','matchName','teamName','playerIndex','jersey_number','name']].sort_values('playerIndex')
df_awayPlayers = df_awayPlayers[['matchId','matchName','teamName','playerIndex','jersey_number','name']].sort_values('playerIndex')
homePlayersJerseyMapping = {i:j for i, j in zip(df_homePlayers.jersey_number, df_homePlayers.playerIndex)}
awayPlayersJerseyMapping = {i:j for i, j in zip(df_awayPlayers.jersey_number, df_awayPlayers.playerIndex)}
## parsing the track data
phase = int(half[-1])
# extracting home and away tracks
home_tracks = [i.get('home_team') for i in tracks]
away_tracks = [i.get('away_team') for i in tracks]
# ball tracks
ball_tracks = [i.get('ball') for i in tracks]
ball_tracks_position = [(i.get('position')[0],i.get('position')[1],i.get('position')[2]) if i.get('position') != None else (np.nan, np.nan, np.nan) for i in ball_tracks]
ball_x = [i[0] for i in ball_tracks_position]
# flipping the y-coordinate
ball_y = [-1*i[1] for i in ball_tracks_position]
ball_z = [i[2] for i in ball_tracks_position]
ball_jerseyPossession = [i.get('player') for i in ball_tracks]
ball_jerseyPossession = [int(i) if pd.isna(i) == False else np.nan for i in ball_jerseyPossession]
# match timestamps
match_time = [i.get('match_time') for i in tracks]
period = [i.get('phase') for i in tracks]
timeStamp = pd.to_datetime([datetime.datetime.utcfromtimestamp(i.get('utc_time')/1000) for i in tracks])
# unpacking tracks
## 1) initialising dictionaries for home and away teams
dic_home_tracks, dic_away_tracks = initialise_dic_tracks(df_homePlayers, df_awayPlayers)
## 2) producing home tracking dataframe
df_home_tracks = populate_df_tracks('Home', home_tracks, homePlayersJerseyMapping, dic_home_tracks, df_homePlayers)
## 3) producing away tracking dataframe
df_away_tracks = populate_df_tracks('Away', away_tracks, awayPlayersJerseyMapping, dic_away_tracks, df_awayPlayers)
# putting things together
## 1) home
df_home_tracks['ball_x'] = ball_x
df_home_tracks['ball_y'] = ball_y
df_home_tracks['ball_z'] = ball_z
# at this point we just have player and ball positions in the dataframe, so providing option now to interpolate
# linearly interpolating (inside only) when there are enclosed NaNs - this is the shortest path, and will thus be the slowest in a set amount of time, so won't overestimate speed / acceleration when we're missing data
if interpolate:
df_home_tracks = df_home_tracks.interpolate(method='linear', limit_area='inside')
# and now adding things where we wouldn't want there to be any interpolation (like the ball_jerseyPossession)
df_home_tracks['halfIndex'] = df_home_tracks.index
df_home_tracks['matchId'] = matchId
df_home_tracks['matchName'] = rootFileName
df_home_tracks['Period'] = period
df_home_tracks['Time [s]'] = np.array(match_time) / 1000
df_home_tracks['TimeStamp'] = timeStamp
df_home_tracks['ball_jerseyPossession'] = ball_jerseyPossession
## 2) away
df_away_tracks['ball_x'] = ball_x
df_away_tracks['ball_y'] = ball_y
df_away_tracks['ball_z'] = ball_z
# option to interpolate, like with the home team
if interpolate:
df_away_tracks = df_away_tracks.interpolate(method='linear', limit_area='inside')
df_away_tracks['halfIndex'] = df_away_tracks.index
df_away_tracks['matchId'] = matchId
df_away_tracks['matchName'] = rootFileName
df_away_tracks['Period'] = period
df_away_tracks['Time [s]'] = np.array(match_time) / 1000
df_away_tracks['TimeStamp'] = timeStamp
df_away_tracks['ball_jerseyPossession'] = ball_jerseyPossession
lst_df_home.append(df_home_tracks)
lst_df_away.append(df_away_tracks)
# combining the first and second half data
df_homeTracks = pd.concat(lst_df_home, ignore_index=True)
df_awayTracks = pd.concat(lst_df_away, ignore_index=True)
# getting a master index for the full game
df_homeTracks['index'] = df_homeTracks.index
df_awayTracks['index'] = df_awayTracks.index
# forcing the second half of the match to follow the same direction as the first
df_homeTracks, df_awayTracks = to_single_playing_direction(df_homeTracks, df_awayTracks)
# use GK x position to know whether team is shooting right-to-left or left-to-right.
avHomeGKxTrack = df_homeTracks.Home_1_x.mean()
avAwayGKxTrack = df_awayTracks.Away_1_x.mean()
# apply shooting direction to both home and away dataframes
df_homeTracks['shootingDirection'] = shoot_direction(avHomeGKxTrack)
df_awayTracks['shootingDirection'] = shoot_direction(avAwayGKxTrack)
return df_homePlayers, df_awayPlayers, df_homeTracks, df_awayTracks, pitchLength, pitchWidth, homePlayersJerseyMapping, awayPlayersJerseyMapping
# # **2)** Calculate first and second derivatives of position: velocity and acceleration
# In[7]:
def calc_opp_goal_position(shootingDirection, pitchLength):
"""
Outputs either +1 or -1 if team shooting left-to-right or right-to-left, respectively.
"""
# 1 = left-to-right
if shootingDirection == 1:
return (pitchLength/2, 0)
# -1 = right-to-left
else:
return (-1*pitchLength/2, 0)
# In[8]:
def calc_player_velocities(team, pitchLength = 105, smoothing = True, filter_ = 'Savitzky-Golay', window = 7, polyorder = 1, maxspeed = 12):
"""
Calculate player x,y components of velocities and acceleration
Also calculates scalar quantities for velocity (i.e. speed), acceleration, and player distance to goal
Parameters
-----------
team: the tracking DataFrame for home or away team
smoothing: boolean variable that determines whether velocity measures are smoothed. Default is True.
filter: type of filter to use when smoothing the velocities. Default is Savitzky-Golay, which fits a polynomial of order 'polyorder' to the data within each window
window: smoothing window size in # of frames
polyorder: order of the polynomial for the Savitzky-Golay filter. Default is 1 - a linear fit to the velocity, so gradient is the acceleration
maxspeed: the maximum speed that a player can realisitically achieve (in meters/second). Speed measures that exceed maxspeed are tagged as outliers and set to NaN.
"""
# remove any velocity data already in the dataframe
team = remove_player_velocity_acceleration_distance(team)
# extract the shooting direction (+1 L2R, -1 R2L)
shootingDirection = team.shootingDirection.values[0]
# getting the opposite goal position
goal_x, goal_y = calc_opp_goal_position(shootingDirection, pitchLength)
# Get the player ids
player_ids = np.unique( [ c[:-2] for c in team.columns if c[:4] in ['Home','Away'] ] )
# Calculate the timestep from one frame to the next. Should always be 0.04 within the same half
dt = team['Time [s]'].diff()
# index of first frame in second half
second_half_idx = team.Period.idxmax(2)
# estimate velocities for players in team
# cycle through players individually
for player in player_ids:
# difference player positions in timestep dt to get unsmoothed estimate of velicity
vx = team[f'{player}_x'].diff() / dt
vy = team[f'{player}_y'].diff() / dt
# calculating distance to goal
# dy will always just be the y position as goal_y is always 0 by definition using current co-ord system, but leaving in this redundancy for now
# just incase the co-ord system changes for different applications
dx = team[f'{player}_x'] - goal_x
dy = team[f'{player}_y'] - goal_y
D = np.sqrt(dx**2 + dy**2)
if maxspeed > 0:
# remove unsmoothed data points that exceed the maximum speed (these are most likely position errors)
raw_speed = np.sqrt(vx**2 + vy**2)
vx[raw_speed>maxspeed] = np.nan
vy[raw_speed>maxspeed] = np.nan
if smoothing:
if filter_=='Savitzky-Golay':
# calculate first half velocity
vx.loc[:second_half_idx] = signal.savgol_filter(vx.loc[:second_half_idx],window_length=window,polyorder=polyorder)
vy.loc[:second_half_idx] = signal.savgol_filter(vy.loc[:second_half_idx],window_length=window,polyorder=polyorder)
# calculate second half velocity
vx.loc[second_half_idx:] = signal.savgol_filter(vx.loc[second_half_idx:],window_length=window,polyorder=polyorder)
vy.loc[second_half_idx:] = signal.savgol_filter(vy.loc[second_half_idx:],window_length=window,polyorder=polyorder)
elif filter_=='moving average':
ma_window = np.ones( window ) / window
# calculate first half velocity
vx.loc[:second_half_idx] = np.convolve( vx.loc[:second_half_idx] , ma_window, mode='same' )
vy.loc[:second_half_idx] = np.convolve( vy.loc[:second_half_idx] , ma_window, mode='same' )
# calculate second half velocity
vx.loc[second_half_idx:] = np.convolve( vx.loc[second_half_idx:] , ma_window, mode='same' )
vy.loc[second_half_idx:] = np.convolve( vy.loc[second_half_idx:] , ma_window, mode='same' )
# acceleration components: second derivative of position
ax = vx.diff() / dt
ay = vy.diff() / dt
# acceleration smoothing
if smoothing:
if filter_=='Savitzky-Golay':
# calculate first half acceleration
ax.loc[:second_half_idx] = signal.savgol_filter(ax.loc[:second_half_idx],window_length=window,polyorder=polyorder)
ay.loc[:second_half_idx] = signal.savgol_filter(ay.loc[:second_half_idx],window_length=window,polyorder=polyorder)
# calculate second half acceleration
ax.loc[second_half_idx:] = signal.savgol_filter(ax.loc[second_half_idx:],window_length=window,polyorder=polyorder)
ay.loc[second_half_idx:] = signal.savgol_filter(ay.loc[second_half_idx:],window_length=window,polyorder=polyorder)
elif filter_=='moving average':
ma_window = np.ones( window ) / window
# calculate first half acceleration
ax.loc[:second_half_idx] = np.convolve( ax.loc[:second_half_idx] , ma_window, mode='same' )
ay.loc[:second_half_idx] = np.convolve( ay.loc[:second_half_idx] , ma_window, mode='same' )
# calculate second half acceleration
ax.loc[second_half_idx:] = np.convolve( ax.loc[second_half_idx:] , ma_window, mode='same' )
ay.loc[second_half_idx:] = np.convolve( ay.loc[second_half_idx:] , ma_window, mode='same' )
# put player speed in x,y direction, and total speed back in the data frame
team[f'{player}_vx'] = vx
team[f'{player}_vy'] = vy
team[f'{player}_speed'] = np.sqrt( vx**2 + vy**2 )
team[f'{player}_ax'] = ax
team[f'{player}_ay'] = ay
team[f'{player}_acceleration'] = np.sqrt( ax**2 + ay**2 )
team[f'{player}_D'] = D
return team
def remove_player_velocity_acceleration_distance(team):
"""
Clean up function: removes velocities, acceleration, and distance to goal
"""
# remove player velocities, acceleration, and distance to goal measures that are already in the 'team' dataframe
# so that they can be cleanly re-calculated
columns = [c for c in team.columns if c.split('_')[-1] in ['vx','vy','ax','ay','speed','acceleration','D']]
team = team.drop(columns=columns)
return team
# # **3)** Functions to create plots and videos
# In[9]:
def plot_pitch( field_dimen = (106.0,68.0), field_color ='green', linewidth=2, markersize=20):
""" plot_pitch
Plots a soccer pitch. All distance units converted to meters.
Parameters
-----------
field_dimen: (length, width) of field in meters. Default is (106,68)
field_color: color of field. options are {'green','white'}
linewidth : width of lines. default = 2
markersize : size of markers (e.g. penalty spot, centre spot, posts). default = 20
Returrns
-----------
fig,ax : figure and aixs objects (so that other data can be plotted onto the pitch)
"""
fig,ax = plt.subplots(figsize=(12,8)) # create a figure
# decide what color we want the field to be. Default is green, but can also choose white
if field_color=='green':
ax.set_facecolor('mediumseagreen')
lc = 'whitesmoke' # line color
pc = 'w' # 'spot' colors
elif field_color=='white':
lc = 'k'
pc = 'k'
# ALL DIMENSIONS IN m
border_dimen = (3,3) # include a border arround of the field of width 3m
meters_per_yard = 0.9144 # unit conversion from yards to meters
half_pitch_length = field_dimen[0]/2. # length of half pitch
half_pitch_width = field_dimen[1]/2. # width of half pitch
signs = [-1,1]
# Soccer field dimensions typically defined in yards, so we need to convert to meters
goal_line_width = 8*meters_per_yard
box_width = 20*meters_per_yard
box_length = 6*meters_per_yard
area_width = 44*meters_per_yard
area_length = 18*meters_per_yard
penalty_spot = 12*meters_per_yard
corner_radius = 1*meters_per_yard
D_length = 8*meters_per_yard
D_radius = 10*meters_per_yard
D_pos = 12*meters_per_yard
centre_circle_radius = 10*meters_per_yard
# plot half way line # center circle
ax.plot([0,0],[-half_pitch_width,half_pitch_width],lc,linewidth=linewidth)
ax.scatter(0.0,0.0,marker='o',facecolor=lc,linewidth=0,s=markersize)
y = np.linspace(-1,1,50)*centre_circle_radius
x = np.sqrt(centre_circle_radius**2-y**2)
ax.plot(x,y,lc,linewidth=linewidth)
ax.plot(-x,y,lc,linewidth=linewidth)
for s in signs: # plots each line seperately
# plot pitch boundary
ax.plot([-half_pitch_length,half_pitch_length],[s*half_pitch_width,s*half_pitch_width],lc,linewidth=linewidth)
ax.plot([s*half_pitch_length,s*half_pitch_length],[-half_pitch_width,half_pitch_width],lc,linewidth=linewidth)
# goal posts & line
ax.plot( [s*half_pitch_length,s*half_pitch_length],[-goal_line_width/2.,goal_line_width/2.],pc+'s',markersize=6*markersize/20.,linewidth=linewidth)
# 6 yard box
ax.plot([s*half_pitch_length,s*half_pitch_length-s*box_length],[box_width/2.,box_width/2.],lc,linewidth=linewidth)
ax.plot([s*half_pitch_length,s*half_pitch_length-s*box_length],[-box_width/2.,-box_width/2.],lc,linewidth=linewidth)
ax.plot([s*half_pitch_length-s*box_length,s*half_pitch_length-s*box_length],[-box_width/2.,box_width/2.],lc,linewidth=linewidth)
# penalty area
ax.plot([s*half_pitch_length,s*half_pitch_length-s*area_length],[area_width/2.,area_width/2.],lc,linewidth=linewidth)
ax.plot([s*half_pitch_length,s*half_pitch_length-s*area_length],[-area_width/2.,-area_width/2.],lc,linewidth=linewidth)
ax.plot([s*half_pitch_length-s*area_length,s*half_pitch_length-s*area_length],[-area_width/2.,area_width/2.],lc,linewidth=linewidth)
# penalty spot
ax.scatter(s*half_pitch_length-s*penalty_spot,0.0,marker='o',facecolor=lc,linewidth=0,s=markersize)
# corner flags
y = np.linspace(0,1,50)*corner_radius
x = np.sqrt(corner_radius**2-y**2)
ax.plot(s*half_pitch_length-s*x,-half_pitch_width+y,lc,linewidth=linewidth)
ax.plot(s*half_pitch_length-s*x,half_pitch_width-y,lc,linewidth=linewidth)
# draw the D
y = np.linspace(-1,1,50)*D_length # D_length is the chord of the circle that defines the D
x = np.sqrt(D_radius**2-y**2)+D_pos
ax.plot(s*half_pitch_length-s*x,y,lc,linewidth=linewidth)
# remove axis labels and ticks
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_yticks([])
# set axis limits
xmax = field_dimen[0]/2. + border_dimen[0]
ymax = field_dimen[1]/2. + border_dimen[1]
ax.set_xlim([-xmax,xmax])
ax.set_ylim([-ymax,ymax])
ax.set_axisbelow(True)
return fig,ax
# In[10]:
def plot_frame(hometeam, awayteam, homeplayers, awayplayers, homemapping, awaymapping, figax=None, team_colors=('r','b'), field_colour='green', field_dimen = (106.0,68.0), include_player_velocities=False, PlayerMarkerSize=10, PlayerAlpha=0.7, annotate=False):
""" plot_frame( hometeam, awayteam )
Have re-written Laurie's plotting function to plot Signality data and to also deal with the numpy typing error that was occuring.
Have also added team names, and the mapping from playerIndex number to the jersey number to allow for consistency of comparison of players between games.
Parameters
-----------
hometeam: row (i.e. instant) of the home team tracking data frame
awayteam: row of the away team tracking data frame
fig,ax: Can be used to pass in the (fig,ax) objects of a previously generated pitch. Set to (fig,ax) to use an existing figure, or None (the default) to generate a new pitch plot,
team_colors: Tuple containing the team colors of the home & away team. Default is 'r' (red, home team) and 'b' (blue away team)
field_dimen: tuple containing the length and width of the pitch in meters. Default is (106,68)
include_player_velocities: Boolean variable that determines whether player velocities are also plotted (as quivers). Default is False
PlayerMarkerSize: size of the individual player marlers. Default is 10
PlayerAlpha: alpha (transparency) of player markers. Defaault is 0.7
annotate: Boolean variable that determines with player jersey numbers are added to the plot (default is False)
Returrns
-----------
fig,ax : figure and aixs objects (so that other data can be plotted onto the pitch)
"""
teamnames = [homeplayers.teamName[0], awayplayers.teamName[0]]
homeIndexMapping = {homemapping[i]:i for i in homemapping}
awayIndexMapping = {awaymapping[i]:i for i in awaymapping}
homeAwayMappings = [homeIndexMapping, awayIndexMapping]
if figax is None: # create new pitch
fig,ax = plot_pitch(field_dimen = field_dimen, field_color=field_colour)
else: # overlay on a previously generated pitch
fig,ax = figax # unpack tuple
# plot home & away teams in order
for team, color, name, mapping in zip( [hometeam, awayteam], team_colors, teamnames, homeAwayMappings) :
x_columns = [c for c in team.keys() if c[-2:].lower()=='_x' and c!='ball_x'] # column header for player x positions
y_columns = [c for c in team.keys() if c[-2:].lower()=='_y' and c!='ball_y'] # column header for player y positions
x = np.array(team[x_columns], dtype=np.float64)
y = np.array(team[y_columns], dtype=np.float64)
ax.plot( x, y, color+'o', MarkerSize=PlayerMarkerSize, alpha=PlayerAlpha, label=name ) # plot player positions
if include_player_velocities:
vx_columns = np.array(['{}_vx'.format(c[:-2]) for c in x_columns]) # column header for player x positions
vy_columns = np.array(['{}_vy'.format(c[:-2]) for c in y_columns]) # column header for player y positions
vx = np.array(team[vx_columns], dtype=np.float64)
vy = np.array(team[vy_columns], dtype=np.float64)
ax.quiver( x, y, vx, vy, color=color, scale_units='inches', scale=10.,width=0.0015,headlength=5,headwidth=3,alpha=PlayerAlpha)
if annotate:
[ ax.text( team[x]+0.5, team[y]+0.5, mapping[int(x.split('_')[1])], fontsize=10, color=color ) for x,y in zip(x_columns,y_columns) if not ( np.isnan(team[x]) or np.isnan(team[y]) ) ]
# plot ball
ax.plot( hometeam['ball_x'], hometeam['ball_y'], 'ko', MarkerSize=6, alpha=1.0, LineWidth=0)
return fig,ax
# In[11]:
def save_match_clip(hometeam, awayteam, fpath, fname='clip_test', figax=None, frames_per_second=25, team_colors=('r','b'), field_dimen = (106.0,68.0), include_player_velocities=False, PlayerMarkerSize=10, PlayerAlpha=0.7):
""" save_match_clip( hometeam, awayteam, fpath )
Re-written Laurie's function to deal with bug caused by numpy typing of the (x,y) co-ords as it was fed into the quiver to plot the velocities
Generates a movie from Signality tracking data, saving it in the 'fpath' directory with name 'fname'
Parameters
-----------
hometeam: home team tracking data DataFrame. Movie will be created from all rows in the DataFrame
awayteam: away team tracking data DataFrame. The indices *must* match those of the hometeam DataFrame
fpath: directory to save the movie
fname: movie filename. Default is 'clip_test.mp4'
fig,ax: Can be used to pass in the (fig,ax) objects of a previously generated pitch. Set to (fig,ax) to use an existing figure, or None (the default) to generate a new pitch plot,
frames_per_second: frames per second to assume when generating the movie. Default is 25.
team_colors: Tuple containing the team colors of the home & away team. Default is 'r' (red, home team) and 'b' (blue away team)
field_dimen: tuple containing the length and width of the pitch in meters. Default is (106,68)
include_player_velocities: Boolean variable that determines whether player velocities are also plotted (as quivers). Default is False
PlayerMarkerSize: size of the individual player marlers. Default is 10
PlayerAlpha: alpha (transparency) of player markers. Default is 0.7
Returrns
-----------
fig,ax : figure and aixs objects (so that other data can be plotted onto the pitch)
"""
# check that indices match first
assert np.all( hometeam.index==awayteam.index ), "Home and away team Dataframe indices must be the same"
# in which case use home team index
index = hometeam.index
# Set figure and movie settings
FFMpegWriter = animation.writers['ffmpeg']
metadata = dict(title='Tracking Data', artist='Matplotlib', comment='Metrica tracking data clip')
writer = FFMpegWriter(fps=frames_per_second, metadata=metadata)
fname = fpath + '/' + fname + '.mp4' # path and filename
# create football pitch
if figax is None:
fig,ax = plot_pitch(field_dimen=field_dimen)
else:
fig,ax = figax
fig.set_tight_layout(True)
# Generate movie
print("Generating movie...",end='')
with writer.saving(fig, fname, 100):
for i in index:
figobjs = [] # this is used to collect up all the axis objects so that they can be deleted after each iteration
for team,color in zip( [hometeam.loc[i],awayteam.loc[i]], team_colors) :
x_columns = [c for c in team.keys() if c[-2:].lower()=='_x' and c!='ball_x'] # column header for player x positions
y_columns = [c for c in team.keys() if c[-2:].lower()=='_y' and c!='ball_y'] # column header for player y positions
x = np.array(team[x_columns], dtype=np.float64)
y = np.array(team[y_columns], dtype=np.float64)
objs, = ax.plot( x, y, color+'o', MarkerSize=PlayerMarkerSize, alpha=PlayerAlpha ) # plot player positions
figobjs.append(objs)
if include_player_velocities:
vx_columns = ['{}_vx'.format(c[:-2]) for c in x_columns] # column header for player x positions
vy_columns = ['{}_vy'.format(c[:-2]) for c in y_columns] # column header for player y positions
vx = np.array(team[vx_columns], dtype=np.float64)
vy = np.array(team[vy_columns], dtype=np.float64)
objs = ax.quiver( x, y, vx, vy, color=color, scale_units='inches', scale=10.,width=0.0015,headlength=5,headwidth=3,alpha=PlayerAlpha)
figobjs.append(objs)
# plot ball
objs, = ax.plot( team['ball_x'], team['ball_y'], 'ko', MarkerSize=6, alpha=1.0, LineWidth=0)
figobjs.append(objs)
# include match time at the top
frame_minute = int( team['Time [s]']/60. )
frame_second = ( team['Time [s]']/60. - frame_minute ) * 60.
timestring = "%d:%1.2f" % ( frame_minute, frame_second )
objs = ax.text(-2.5,field_dimen[1]/2.+1., timestring, fontsize=14 )
figobjs.append(objs)
writer.grab_frame()
# Delete all axis objects (other than pitch lines) in preperation for next frame
for figobj in figobjs:
figobj.remove()
print("\nMovie Completed.")
plt.clf()
plt.close(fig)
# # **4) Functions to Calculate Running Metrics**
# In[74]:
def summarise_match_running(df_players, df_tracks, playerJerseyMapping, pitchLength = 104.6, jogThreshold=2, runThreshold=4, sprintThreshold=7, plotDistances=True):
"""
Builds on Laurie's code and uses his neat convolution trick to extract frame indices
"""
playerJerseyMapping = {playerJerseyMapping[i]:i for i in playerJerseyMapping}
df_summary = df_players.copy()
# (1) Minutes played per player
minutes = []
for player in df_summary.playerIndex:
# search for first and last frames that we have a position observation for each player (when a player is not on the pitch positions are NaN)
column = f'Home_{player}_x' # use player x-position coordinate
player_minutes = len(df_tracks.loc[pd.isna(df_tracks[column]) == False]) / (25*60)
minutes.append( player_minutes )
df_summary['Minutes Played'] = np.round(minutes, 2)
# (2) Calculating total distance covered (essentialy integrating velocity over the constant timesteps)
distance = []
dt = 1/25
for player in df_summary.playerIndex:
column = f'Home_{player}_speed'
player_distance = df_tracks[column].sum()*dt/1000
distance.append(player_distance)
df_summary['Distance [km]'] = np.round(distance, 2)
# (3) Calculate distance covered while: walking, joggings, running, sprinting
walking = []
jogging = []
running = []
sprinting = []
for player in df_summary.playerIndex:
column = f'Home_{player}_speed'
# walking (less than 2 m/s)
player_distance = df_tracks.loc[df_tracks[column] < jogThreshold, column].sum()/25./1000
walking.append(player_distance)
# jogging (between 2 and 4 m/s)
player_distance = df_tracks.loc[(df_tracks[column] >= jogThreshold) & (df_tracks[column] < runThreshold), column].sum()/25./1000
jogging.append(player_distance)
# running (between 4 and 7 m/s)
player_distance = df_tracks.loc[(df_tracks[column] >= runThreshold) & (df_tracks[column] < sprintThreshold), column].sum()/25./1000
running.append(player_distance)
# sprinting (greater than 7 m/s)
player_distance = df_tracks.loc[df_tracks[column] >= sprintThreshold, column].sum()/25./1000
sprinting.append(player_distance)
df_summary['Walking [km]'] = np.round(np.array(walking), 2)
df_summary['Jogging [km]'] = np.round(np.array(jogging), 2)
df_summary['Running [km]'] = np.round(np.array(running), 2)
df_summary['Sprinting [km]'] = np.round(np.array(sprinting), 2)
if plotDistances:
ax = df_summary.sort_values('Distance [km]', ascending=False).plot.bar(x='jersey_number', y=['Walking [km]','Jogging [km]','Running [km]','Sprinting [km]'], colormap='coolwarm', figsize=(16,8))
ax.set_xlabel('Jersey Number', fontsize=20)
ax.set_ylabel('Distance covered [m]', fontsize=20)
ax.legend(fontsize=20)
########################################################################################################
###### NOW WE CALCULATE THE SPECIFIC METRICS FOR THE FINAL PART OF THE ASSIGNEMENT ######
########################################################################################################
# sustained sprints: how many sustained sprints per match did each player complete? Defined as maintaining a speed > 7 m/s for at least 1 second
nsprints = []
nruns = []
dic_runs = {}
# minimum duration sprint should be sustained (in this case, 1 second = 25 consecutive frames)
sprint_window = 25
for player in df_players.playerIndex:
column = f'Home_{player}_speed'
column_x = f'Home_{player}_x'
column_y = f'Home_{player}_y'
### LAURIE'S CONVOLUTION TRICK ###
# trick here is to convolve speed with a window of size 'sprint_window', and find number of occassions that sprint was sustained for at least one window length
# diff helps us to identify when the window starts
player_sprints = np.diff( 1*( np.convolve( 1*(df_tracks[column]>=sprintThreshold), np.ones(sprint_window), mode='same' ) >= sprint_window ) )
# to make sure runs and sprints are disjoint, let's handle them both with a lower limit, then take one from t'other at the end
player_runs = np.diff( 1*( np.convolve( 1*(df_tracks[column]>=runThreshold), np.ones(sprint_window), mode='same' ) >= sprint_window ) )
# counting the runs / sprints
nsprints.append(np.sum(player_sprints == 1))
nruns.append(np.sum(player_runs == 1))
# getting the indices of the runs and sprints
player_sprints_start = np.where( player_sprints == 1 )[0] - int(sprint_window/2) + 1 # adding sprint_window/2 because of the way that the convolution is centred
player_sprints_end = np.where( player_sprints == -1 )[0] + int(sprint_window/2) + 1
player_runs_start = np.where( player_runs == 1 )[0] - int(sprint_window/2) + 1 # adding sprint_window/2 because of the way that the convolution is centred
player_runs_end = np.where( player_runs == -1 )[0] + int(sprint_window/2) + 1
## will now loop through the runs and sprints and figure out whether they were forward / backward / right / left, and whether they occured in the final third
for n, (r_start, r_end) in enumerate(zip(player_runs_start, player_runs_end)):
# getting the run delta y and delta x
dy = df_tracks.loc[r_end, column_y] - df_tracks.loc[r_start, column_y]
dx = df_tracks.loc[r_end, column_x] - df_tracks.loc[r_start, column_x]
# getting the list of x coords so that we can see whether the run occured in the final third
list_x = df_tracks.loc[r_start:r_end, column_x].values
# initially started just looking at final third runs
# but more interesting data when you look just in the opponents half
final_third_threshold = 0#(pitchLength/3) - (pitchLength/2)
final_third_flag = 1 if sum([1 if i < final_third_threshold else 0 for i in list_x]) > 0 else 0
# classifying whether a run was left, right, back or forward
if abs(dy) > abs(dx):
if dy > 0:
run_direction = 'L'
elif dy <= 0:
run_direction = 'R'
elif abs(dx) >= abs(dy):
if dx > 0:
run_direction = 'B'
elif dx <= 0:
run_direction = 'F'
dic_runs[f'{player}-{n+1}'] = [player, playerJerseyMapping[player], n+1, dy, dx, run_direction, final_third_flag, np.arange(r_start, r_end+1)]
# transforming dic_runs dictionary -> dataframe
dic_cols = ['playerIndex','jersey_number','runIndex','dy','dx','runDirection','finalThirdFlag','timeIndexArray']
df_final_third = pd.DataFrame.from_dict(dic_runs, orient='index', columns=dic_cols)
# just looking at runs in opponents half / final third
df_final_third = df_final_third.loc[df_final_third['finalThirdFlag'] == 1]
# summarising directional runs pivoting to produce directional run counts per player per match
df_directional_runs = df_final_third.groupby(['jersey_number','runDirection'])\
.agg({'runIndex':'nunique'})\
.reset_index()\
.rename(columns={'runIndex':'numDirectionRuns'})\
.pivot(index='jersey_number', columns='runDirection', values='numDirectionRuns')\
.reset_index()
df_summary['numSprints'] = nsprints
df_summary['numRuns'] = nruns
df_summary['numRuns'] = df_summary['numRuns'] - df_summary['numSprints']
df_summary = df_summary.merge(df_directional_runs, on='jersey_number', how='inner')
df_summary = df_summary.sort_values('Distance [km]', ascending=False)
df_summary = df_summary[['jersey_number','name','Minutes Played','Distance [km]'\
,'Walking [km]','Jogging [km]','Running [km]','Sprinting [km]'\
,'numRuns','numSprints','B','F','L','R']].reset_index(drop=True)
########################################################################################################
###### SHEARING RUN COMBINATIONS ######
########################################################################################################
# producing a df of paired runs
df_join = df_final_third.merge(df_final_third, on='finalThirdFlag', how='inner', suffixes=('_main','_other'))
# only looking at paired runs in the final third
df_join = df_join.loc[df_join['finalThirdFlag'] == 1]
# getting rid of self runs, and removing dupes
df_join = df_join.loc[(df_join['jersey_number_main'] < df_join['jersey_number_other'])]
# looking at R/L or L/R runs
df_join = df_join.loc[((df_join['runDirection_main'] == 'R') & (df_join['runDirection_other'] == 'L')) | ((df_join['runDirection_main'] == 'L') & (df_join['runDirection_other'] == 'R'))]
# now getting the paired runs that overlapped with each other
df_join['shearFrames'] = df_join.apply(lambda x: [i for i in x.timeIndexArray_main if i in x.timeIndexArray_other], axis=1)
# getting the sync fraction of the shear run
df_join['shearOverlapFraction'] = df_join.apply(lambda x: 2*len(x.shearFrames) / (len(x.timeIndexArray_main) + len(x.timeIndexArray_other)), axis=1)
df_join['shearTime [s]'] = df_join.shearFrames.apply(lambda x: len(x)/25)
df_join = df_join.loc[df_join['shearTime [s]'] > 0]
df_shear = df_join.groupby(['jersey_number_main','jersey_number_other'])\
.agg({'finalThirdFlag':np.sum, 'shearTime [s]':np.sum, 'shearOverlapFraction':lambda x: np.round(np.mean(x), 2)})\
.reset_index()\
.rename(columns={'finalThirdFlag':'numberShearCombos'})
########################################################################################################
###### SHEARING RUN COMBINATIONS THAT HAVE A THIRD FORWARD RUNNING PLAYER ######
########################################################################################################
# getting forward runs
df_forward = df_final_third.loc[df_final_third['runDirection'] == 'F']
# joining that forward run into the paired data frame
df_forward = df_join.merge(df_forward, on='finalThirdFlag')
# getting overlapping frames between forward runner and the shearing runs
df_forward['forwardShearFrames'] = df_forward.apply(lambda x: [i for i in x.shearFrames if i in x.timeIndexArray], axis=1)
# getting the time that the forward run overlaps with the shearing runs
df_forward['shearTimeForward [s]'] = df_forward.forwardShearFrames.apply(lambda x: len(x)/25)
# filtering out cases where there's no forward run overlapping with the shears
df_forward = df_forward.loc[df_forward['shearTimeForward [s]'] > 0]
# summarising
df_forward_shear = df_forward.groupby(['jersey_number_main','jersey_number_other','jersey_number'])\
.agg({'finalThirdFlag':np.sum, 'shearTimeForward [s]':np.sum})\
.reset_index()\
.rename(columns={'finalThirdFlag':'numberShearCombos','jersey_number':'jersey_number_forward_runner'})
return df_summary, df_shear, df_forward_shear
# ---
#
#
#
#
#
#
#
# # Will organise the code such that it goes through the assignment top to bottom.
#
# ## 1) **4 frame plots**
# ### 1.1) **Vs IF Elfsborg**
# In[14]:
# load tracking data for a given match
df_homePlayersElf, df_awayPlayersElf, df_homeTracksElf, df_awayTracksElf, pitchLengthElf, pitchWidthElf, homeJerseyMappingElf, awayJerseyMappingElf = \
parse_raw_to_df(signalityRepo, '20190722.Hammarby-IFElfsborg', interpolate=True)
# calculating velocities, accelerations, distances to goal and other metrics
df_homeTracksElf = calc_player_velocities(df_homeTracksElf, pitchLengthElf, smoothing=True, filter_='Savitzky-Golay', window=7, polyorder=1, maxspeed = 12)
df_awayTracksElf = calc_player_velocities(df_awayTracksElf, pitchLengthElf, smoothing=True, filter_='Savitzky-Golay', window=7, polyorder=1, maxspeed = 12)
# ### Elfsborg teamsheets
# In[15]:
df_homePlayersElf
# In[16]:
df_awayPlayersElf
# ### FIG 1
# In[17]:
frameIdx = 26828
fig, ax = plot_frame(df_homeTracksElf.loc[frameIdx], df_awayTracksElf.loc[frameIdx], df_homePlayersElf, df_awayPlayersElf, homeJerseyMappingElf, awayJerseyMappingElf,include_player_velocities=True, annotate=True, team_colors=('r','y'), field_colour='green', PlayerAlpha=0.8)
fig.legend(loc='upper center', fontsize=17)
plt.savefig('Elfsborg_Goal_1.pdf', dpi=300, format='pdf', transparent=False, bbox_inches='tight')
# ### 1.2) **Vs Malmo FF**
# In[18]:
# load tracking data for a given match
df_homePlayersMal, df_awayPlayersMal, df_homeTracksMal, df_awayTracksMal, pitchLengthMal, pitchWidthMal, homeJerseyMappingMal, awayJerseyMappingMal = \
parse_raw_to_df(signalityRepo, '20191020.Hammarby-MalmöFF', interpolate=True)
df_homeTracksMal = calc_player_velocities(df_homeTracksMal, pitchLengthMal, smoothing=True, filter_='Savitzky-Golay', window=7, polyorder=1, maxspeed = 12)
df_awayTracksMal = calc_player_velocities(df_awayTracksMal, pitchLengthMal, smoothing=True, filter_='Savitzky-Golay', window=7, polyorder=1, maxspeed = 12)
# ### Malmo teamsheets
# In[19]:
df_homePlayersMal
# In[20]:
df_awayPlayersMal
# ### FIG 2
# In[21]:
frameIdx = 21032
fig, ax = plot_frame(df_homeTracksMal.loc[frameIdx], df_awayTracksMal.loc[frameIdx], df_homePlayersMal, df_awayPlayersMal, homeJerseyMappingMal, awayJerseyMappingMal,include_player_velocities=True, annotate=True, team_colors=('r','b'), field_colour='green', PlayerAlpha=0.8)
fig.legend(loc='upper center', fontsize=17)
plt.savefig('Malmo_Goal_1.pdf', dpi=300, format='pdf', transparent=False, bbox_inches='tight')
# ### 1.3) **Vs Orebro**
# In[22]:
# load tracking data for a given match
df_homePlayersOre, df_awayPlayersOre, df_homeTracksOre, df_awayTracksOre, pitchLengthOre, pitchWidthOre, homeJerseyMappingOre, awayJerseyMappingOre = \
parse_raw_to_df(signalityRepo, '20190930.Hammarby-Örebrö', interpolate=True)
df_homeTracksOre = calc_player_velocities(df_homeTracksOre, pitchLengthOre, smoothing=True, filter_='Savitzky-Golay', window=7, polyorder=1, maxspeed = 12)
df_awayTracksOre = calc_player_velocities(df_awayTracksOre, pitchLengthOre, smoothing=True, filter_='Savitzky-Golay', window=7, polyorder=1, maxspeed = 12)
# ### Orebro teamsheets
# In[23]:
df_homePlayersOre
# In[24]:
df_awayPlayersOre
# ### FIG 3
# In[25]:
frameIdx = 58300
fig, ax = plot_frame(df_homeTracksOre.loc[frameIdx], df_awayTracksOre.loc[frameIdx], df_homePlayersOre, df_awayPlayersOre, homeJerseyMappingOre, awayJerseyMappingOre,include_player_velocities=True, annotate=True, team_colors=('r','k'), field_colour='green', PlayerAlpha=0.8)
fig.legend(loc='upper center', fontsize=17)
plt.savefig('Orebro_Goal_1_scored.pdf', dpi=300, format='pdf', transparent=False, bbox_inches='tight')
# ### FIG 4
# In[26]:
frameIdx = 16220
fig, ax = plot_frame(df_homeTracksOre.loc[frameIdx], df_awayTracksOre.loc[frameIdx], df_homePlayersOre, df_awayPlayersOre, homeJerseyMappingOre, awayJerseyMappingOre,include_player_velocities=True, annotate=True, team_colors=('r','k'), field_colour='green', PlayerAlpha=0.8)
fig.legend(loc='upper center', fontsize=17)
plt.savefig('Orebro_Goal_1_conceded.pdf', dpi=300, format='pdf', transparent=False, bbox_inches='tight')
# ---
#
#
#
#
#
#
# ## **2) Insights & Limitations**
#
# > 15 second plots of distance from goal, speed, and acceleration for Hammarby over 15 second horizons.
#
# > Illustrate when the tracking data is accurate, and when the tracking data is less than accurate...
# ## 2.1) Mapping Errors
# ### **Reloading Velocities (no velocity smoothing or maxspeed cap)**
# In[202]:
df_homeTracksElf = calc_player_velocities(df_homeTracksElf, pitchLengthElf, smoothing=False, filter_='Savitzky-Golay', window=7, polyorder=1, maxspeed = 10000)
df_awayTracksElf = calc_player_velocities(df_awayTracksElf, pitchLengthElf, smoothing=False, filter_='Savitzky-Golay', window=7, polyorder=1, maxspeed = 10000)
# ### **Plotting Mapping Error**
# In[203]:
fig, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, figsize=(50,42))
mappingErrorStart = 88300
mappingErrorFinish = mappingErrorStart + (15*25 - 1)
df_mappingErrorViz = df_homeTracksElf.loc[mappingErrorStart:mappingErrorFinish]
# code to help pick out specific jerseys
pattern = r'Home_(\d+)_'
homePlayerIndexMapping = {homeJerseyMappingElf[i]:i for i in homeJerseyMappingElf}
#specifying jerseys of interest
lstJerseysInterest = [5,7]
# getting x indices
x_ = (df_mappingErrorViz.index/(25*60))
D_cols = [i for i in df_mappingErrorViz if i[-1] == 'D']
v_cols = [i for i in df_mappingErrorViz if i[-5:] == 'speed']
a_cols = [i for i in df_mappingErrorViz if i[-12:] == 'acceleration']
for player in D_cols:
jNumber = homePlayerIndexMapping[int(re.search(pattern, player).group(1))]
pName = df_homePlayersElf.loc[df_homePlayersElf['jersey_number'] == jNumber].name.values[0]
if jNumber in lstJerseysInterest:
ax1.plot(x_, df_mappingErrorViz[player], lw=3)
else:
ax1.plot(x_, df_mappingErrorViz[player], alpha=0.4)
for player in v_cols:
jNumber = homePlayerIndexMapping[int(re.search(pattern, player).group(1))]
pName = df_homePlayersElf.loc[df_homePlayersElf['jersey_number'] == jNumber].name.values[0]
if jNumber in lstJerseysInterest:
ax2.plot(x_, df_mappingErrorViz[player], lw=3, label = f'{pName} (#{jNumber})')
else:
ax2.plot(x_, df_mappingErrorViz[player], alpha=0.4)
for player in a_cols:
jNumber = homePlayerIndexMapping[int(re.search(pattern, player).group(1))]
pName = df_homePlayersElf.loc[df_homePlayersElf['jersey_number'] == jNumber].name.values[0]
if jNumber in lstJerseysInterest:
ax3.plot(x_, df_mappingErrorViz[player], lw=3)
else:
ax3.plot(x_, df_mappingErrorViz[player], alpha=0.4)
ax1.set_ylabel(r'Distance from Goal (m)', fontsize=22)
ax2.set_ylabel(r'Speed (ms$^{-1}$)', fontsize=22)
ax3.set_ylabel(r'Acceleration (ms$^{-2}$)', fontsize=22)
# transforming ticks from min.min to min:secs
existingTicks = ax3.get_xticks()
newTicks = ["%02dm:%02ds" % (int(i), (i*60)%60) for i in existingTicks]
ax3.set_xticklabels(newTicks, fontsize=20)
# setting y-axis tick label size
ax1.set_yticklabels(ax1.get_yticks(), fontsize=20)
ax2.set_yticklabels(ax2.get_yticks(), fontsize=20)
ax3.set_yticklabels(ax3.get_yticks(), fontsize=20)
fig.legend(loc='center right', fontsize=24)
plt.savefig('MappingError.pdf', dpi=300, format='pdf', bbox_inches='tight')
# ### Photo Clips of Mapping Error
# In[204]:
frameIdx = 88401
fig, ax = plot_frame(df_homeTracksElf.loc[frameIdx], df_awayTracksElf.loc[frameIdx], df_homePlayersElf, df_awayPlayersElf, homeJerseyMappingElf, awayJerseyMappingElf,include_player_velocities=True, annotate=True, team_colors=('r','y'), field_colour='green', PlayerAlpha=0.8)
# In[205]:
frameIdx = 88402
fig, ax = plot_frame(df_homeTracksElf.loc[frameIdx], df_awayTracksElf.loc[frameIdx], df_homePlayersElf, df_awayPlayersElf, homeJerseyMappingElf, awayJerseyMappingElf,include_player_velocities=True, annotate=True, team_colors=('r','y'), field_colour='green', PlayerAlpha=0.8)
# In[206]:
frameIdx = 88403
fig, ax = plot_frame(df_homeTracksElf.loc[frameIdx], df_awayTracksElf.loc[frameIdx], df_homePlayersElf, df_awayPlayersElf, homeJerseyMappingElf, awayJerseyMappingElf,include_player_velocities=True, annotate=True, team_colors=('r','y'), field_colour='green', PlayerAlpha=0.8)
# ### Player mappings revert 100 frames later...
# In[207]:
frameIdx = 88501
fig, ax = plot_frame(df_homeTracksElf.loc[frameIdx], df_awayTracksElf.loc[frameIdx], df_homePlayersElf, df_awayPlayersElf, homeJerseyMappingElf, awayJerseyMappingElf,include_player_velocities=True, annotate=True, team_colors=('r','y'), field_colour='green', PlayerAlpha=0.8)
# In[208]:
frameIdx = 88503
fig, ax = plot_frame(df_homeTracksElf.loc[frameIdx], df_awayTracksElf.loc[frameIdx], df_homePlayersElf, df_awayPlayersElf, homeJerseyMappingElf, awayJerseyMappingElf,include_player_velocities=True, annotate=True, team_colors=('r','y'), field_colour='green', PlayerAlpha=0.8)
# In[209]:
frameIdx = 88505
fig, ax = plot_frame(df_homeTracksElf.loc[frameIdx], df_awayTracksElf.loc[frameIdx], df_homePlayersElf, df_awayPlayersElf, homeJerseyMappingElf, awayJerseyMappingElf,include_player_velocities=True, annotate=True, team_colors=('r','y'), field_colour='green', PlayerAlpha=0.8)
# ## **Combining 2.2) & 3) Nice view of breakaway goal: Goal 3 Vs Elfsborg**
#
# > Want another 15 second sequence that shows off the possibilities of tracking data
#
# > Will also combine this with the third part of the assignment to also calculate the distance to nearest teammate & opposition
#
# > And will start the focus on a handful of players to have a flowing narrative throughout the report
# ### **Reloading Velocities**
# In[210]:
df_homeTracksElf = calc_player_velocities(df_homeTracksElf, pitchLengthElf, smoothing=True, filter_='Savitzky-Golay', window=7, polyorder=1, maxspeed = 12)
df_awayTracksElf = calc_player_velocities(df_awayTracksElf, pitchLengthElf, smoothing=True, filter_='Savitzky-Golay', window=7, polyorder=1, maxspeed = 12)
# ### **Starting with frames for third goal Vs Elfsborg**
# In[211]:
frameIdx = 45020
fig, ax = plot_frame(df_homeTracksElf.loc[frameIdx], df_awayTracksElf.loc[frameIdx], df_homePlayersElf, df_awayPlayersElf, homeJerseyMappingElf, awayJerseyMappingElf,include_player_velocities=True, annotate=True, team_colors=('r','y'), field_colour='green', PlayerAlpha=0.8)
# In[212]:
frameIdx = 45080
fig, ax = plot_frame(df_homeTracksElf.loc[frameIdx], df_awayTracksElf.loc[frameIdx], df_homePlayersElf, df_awayPlayersElf, homeJerseyMappingElf, awayJerseyMappingElf,include_player_velocities=True, annotate=True, team_colors=('r','y'), field_colour='green', PlayerAlpha=0.8)
# In[213]:
frameIdx = 45105
fig, ax = plot_frame(df_homeTracksElf.loc[frameIdx], df_awayTracksElf.loc[frameIdx], df_homePlayersElf, df_awayPlayersElf, homeJerseyMappingElf, awayJerseyMappingElf,include_player_velocities=True, annotate=True, team_colors=('r','y'), field_colour='green', PlayerAlpha=0.8)
# In[214]:
frameIdx = 45130
fig, ax = plot_frame(df_homeTracksElf.loc[frameIdx], df_awayTracksElf.loc[frameIdx], df_homePlayersElf, df_awayPlayersElf, homeJerseyMappingElf, awayJerseyMappingElf,include_player_velocities=True, annotate=True, team_colors=('r','y'), field_colour='green', PlayerAlpha=0.8)
# In[215]:
frameIdx = 45175
fig, ax = plot_frame(df_homeTracksElf.loc[frameIdx], df_awayTracksElf.loc[frameIdx], df_homePlayersElf, df_awayPlayersElf, homeJerseyMappingElf, awayJerseyMappingElf,include_player_velocities=True, annotate=True, team_colors=('r','y'), field_colour='green', PlayerAlpha=0.8)
# In[216]:
frameIdx = 45213
fig, ax = plot_frame(df_homeTracksElf.loc[frameIdx], df_awayTracksElf.loc[frameIdx], df_homePlayersElf, df_awayPlayersElf, homeJerseyMappingElf, awayJerseyMappingElf,include_player_velocities=True, annotate=True, team_colors=('r','y'), field_colour='green', PlayerAlpha=0.8)
# ## **Khalili's second goal Vs IF Elfsborg: distance, velocity, acceleration, nearest teammate & opponent plots**
# In[217]:
# starting with the great goal viz dataframe and adding the away team
niceGoalStart = 45000
niceGoalFinish = niceGoalStart + (15*25 - 1)
df_homeGoal = df_homeTracksElf.loc[niceGoalStart:niceGoalFinish]
df_awayGoal = df_awayTracksElf.loc[niceGoalStart:niceGoalFinish]
def player_distance(x1, y1, x2, y2):
"""
Function to calculate the distance between players
"""
delta_x = x2-x1
delta_y = y2-y1
return np.sqrt(delta_x**2 + delta_y**2)
# home and away cols
homeCols = df_homeGoal.columns
awayCols = df_awayGoal.columns
# regex pattern to pick out positional cols
positionPattern = r'^(Home|Away)_(\d+)_[xy]'
#using default dicts to store the positional cols against the playerIndexes
dic_home = defaultdict(lambda: [])
dic_away = defaultdict(lambda: [])
# producing home & away dictionaries
## keys are playerIndices
## values are lists of the x and y column names for the positional cols
for h in homeCols:
colMatch = re.match(positionPattern, h)
if colMatch:
dic_home[colMatch.group(2)].append(h)
for a in awayCols:
colMatch = re.match(positionPattern, a)
if colMatch:
dic_away[colMatch.group(2)].append(a)
# combining home and away dataframes
df_homeAwayGoal = df_homeGoal\
.drop(columns=['ball_x','ball_y','ball_z','matchId','matchName','Period','Time [s]','ball_jerseyPossession','index','halfIndex','TimeStamp'])\
.merge(df_awayGoal, left_index=True, right_index=True, suffixes=('_home','_away'))
# adding four new columns per home player
## one for the nearest teammate distance
## one for nearest teammate playerIndex
## one for nearest opposition distance
## one for nearest opposition playerIndex
for player in dic_home:
df_homeAwayGoal[f'Home_{player}_nearestTeammateDist'] = np.nan
df_homeAwayGoal[f'Home_{player}_nearestTeammateIndex'] = np.nan
df_homeAwayGoal[f'Home_{player}_nearestOppositionDist'] = np.nan
df_homeAwayGoal[f'Home_{player}_nearestOppositionIndex'] = np.nan
##############################################################################################################################
########## NEAREST NEIGHBOURS ALGORITHM ##########
##############################################################################################################################
# looping through each frame
for idx, cols in df_homeAwayGoal.iterrows():
# looping through each home player
for player in dic_home:
player_xCol, player_yCol = dic_home[player]
player_x, player_y = cols[player_xCol], cols[player_yCol]
# setting a max distance for teammate and opposition
closestTeammateDistance = 1e6
closestOppositionDistance = 1e6
closestTeammate = 0
closestOpposition = 0
# looping through teammates
for teammate in dic_home:
# only interested in other teammates (otherwise you'll always be closest to yourself)
if player != teammate:
teammate_xCol, teammate_yCol = dic_home[teammate]
teammate_x, teammate_y = cols[teammate_xCol], cols[teammate_yCol]
teammateDist = player_distance(player_x, player_y, teammate_x, teammate_y)
if teammateDist < closestTeammateDistance:
closestTeammateDistance = teammateDist
closestTeammate = teammate
for opposition in dic_away:
opposition_xCol, opposition_yCol = dic_away[opposition]
opposition_x, opposition_y = cols[opposition_xCol], cols[opposition_yCol]
oppositionDist = player_distance(player_x, player_y, opposition_x, opposition_y)
if oppositionDist < closestOppositionDistance:
closestOppositionDistance = oppositionDist
closestOpposition = opposition
df_homeAwayGoal.loc[idx, f'Home_{player}_nearestTeammateDist'] = closestTeammateDistance
df_homeAwayGoal.loc[idx, f'Home_{player}_nearestTeammateIndex'] = closestTeammate
df_homeAwayGoal.loc[idx, f'Home_{player}_nearestOppositionDist'] = closestOppositionDistance
df_homeAwayGoal.loc[idx, f'Home_{player}_nearestOppositionIndex'] = closestOpposition
print ('Done.')
# ## Plotting
# In[220]:
fig, (ax1, ax2, ax3, ax4, ax5) = plt.subplots(5, sharex=True, figsize=(50,70))
# getting x indices
x_ = (df_homeGoal.index/(25*60))
# code to help pick out specific jerseys
pattern = r'Home_(\d+)_'
homePlayerIndexMapping = {homeJerseyMappingElf[i]:i for i in homeJerseyMappingElf}
awayPlayerIndexMapping = {awayJerseyMappingElf[i]:i for i in awayJerseyMappingElf}
#specifying jerseys of interest
lstJerseysInterest = [6,22,7,40]
# picking out columns for displacement to goal, speed, acceleration
D_cols = [i for i in df_homeGoal if i[-1] == 'D']
v_cols = [i for i in df_homeGoal if i[-5:] == 'speed']
a_cols = [i for i in df_homeGoal if i[-12:] == 'acceleration']
# filtering above columns for specific jerseys of interest
D_cols = [i for i in D_cols if homePlayerIndexMapping[int(re.search(pattern, i).group(1))] in lstJerseysInterest]
v_cols = [i for i in v_cols if homePlayerIndexMapping[int(re.search(pattern, i).group(1))] in lstJerseysInterest]
a_cols = [i for i in a_cols if homePlayerIndexMapping[int(re.search(pattern, i).group(1))] in lstJerseysInterest]
# plotting specific player distance to goal, speed, and acceleration
for player in D_cols:
ax1.plot(x_, df_homeGoal[player], lw=3)
for player in v_cols:
jNumber = homePlayerIndexMapping[int(re.search(pattern, player).group(1))]
pName = df_homePlayersElf.loc[df_homePlayersElf['jersey_number'] == jNumber].name.values[0]
ax2.plot(x_, df_homeGoal[player], lw=3, label = f'{pName} (#{jNumber})')
for player in a_cols:
ax3.plot(x_, df_homeGoal[player], lw=3)
# getting the player labels for the below plots
closestTeammateLabels = [homePlayerIndexMapping[int(i)] for i in df_homeAwayGoal.Home_9_nearestTeammateIndex]
closestOppositionLabels = [awayPlayerIndexMapping[int(i)] for i in df_homeAwayGoal.Home_9_nearestOppositionIndex]
# Plotting Closest Teammates
ax4.scatter(x_, df_homeAwayGoal.Home_9_nearestTeammateDist, c=closestTeammateLabels, alpha=0.7)
# Labelling teammates on the chart
prevName = None
overUnder = -1
for i, j, k in zip(x_, df_homeAwayGoal.Home_9_nearestTeammateDist, closestTeammateLabels):
pName = df_homePlayersElf.loc[df_homePlayersElf['jersey_number'] == k].name.values[0]
if k != prevName:
ax4.annotate(pName, (i, j+1.5*overUnder - 0.5), fontsize=18)
overUnder *= -1
prevName = k
# Plotting Opposition
ax5.scatter(x_, df_homeAwayGoal.Home_9_nearestOppositionDist, c=[np.log(i)*20000 for i in closestOppositionLabels], alpha=0.7)
# Labelling opposition players on the chart
prevName = None
overUnder = 1
for i, j, k in zip(x_, df_homeAwayGoal.Home_9_nearestOppositionDist, closestOppositionLabels):
pName = df_awayPlayersElf.loc[df_awayPlayersElf['jersey_number'] == k].name.values[0]
if k != prevName:
ax5.annotate(pName, (i, j+1.5*overUnder - 3), fontsize=18)
prevName = k
ax1.set_ylabel(r'Distance from Goal (m)', fontsize=22)
ax2.set_ylabel(r'Speed (ms$^{-1}$)', fontsize=22)
ax3.set_ylabel(r'Acceleration (ms$^{-2}$)', fontsize=22)
ax4.set_ylabel(r'Closest Teammate (m)', fontsize=22)
ax5.set_ylabel(r'Closest Opposition (m)', fontsize=22)
# transforming ticks from min.min to min:secs
existingTicks = ax5.get_xticks()
newTicks = ["%02dm:%02ds" % (int(i), (i*60)%60) for i in existingTicks]
ax5.set_xticklabels(newTicks, fontsize=20)
# setting y-axis tick label size
ax1.set_yticklabels(ax1.get_yticks(), fontsize=20)
ax2.set_yticklabels(ax2.get_yticks(), fontsize=20)
ax3.set_yticklabels(ax3.get_yticks(), fontsize=20)
ax4.set_yticklabels(ax4.get_yticks(), fontsize=20)
ax5.set_yticklabels(ax5.get_yticks(), fontsize=20)
# plotting the main actions
# pass
ax1.vlines(x=45080/(25*60), ymin=10, ymax=50, color='grey', alpha = 0.3)
ax2.vlines(x=45080/(25*60), ymin=0, ymax=6.5, label='Pass #6 -> #22', color='grey', alpha = 0.3)
ax3.vlines(x=45080/(25*60), ymin=0, ymax=6, color='grey', alpha = 0.3)
ax4.vlines(x=45080/(25*60), ymin=0, ymax=15, color='grey', alpha = 0.3)
ax5.vlines(x=45080/(25*60), ymin=0, ymax=17, color='grey', alpha = 0.3)
# assist
ax1.vlines(x=45105/(25*60), ymin=10, ymax=50, color='grey', alpha = 0.6)
ax2.vlines(x=45105/(25*60), ymin=0, ymax=6.5, label='Assist #22 -> #7', color='grey', alpha = 0.6)
ax3.vlines(x=45105/(25*60), ymin=0, ymax=6, color='grey', alpha = 0.6)
ax4.vlines(x=45105/(25*60), ymin=0, ymax=15, color='grey', alpha = 0.6)
ax5.vlines(x=45105/(25*60), ymin=0, ymax=17, color='grey', alpha = 0.6)
# goal
ax1.vlines(x=45175/(25*60), ymin=10, ymax=50, color='grey', alpha = 0.9)
ax2.vlines(x=45175/(25*60), ymin=0, ymax=6.5, label='Shot #7 -> Goal', color='grey', alpha = 0.9)
ax3.vlines(x=45175/(25*60), ymin=0, ymax=6, color='grey', alpha = 0.9)
ax4.vlines(x=45175/(25*60), ymin=0, ymax=15, color='grey', alpha = 0.9)
ax5.vlines(x=45175/(25*60), ymin=0, ymax=17, color='grey', alpha = 0.9)
fig.legend(loc='center right', fontsize=20)
plt.savefig('NiceGoalAdded.pdf', dpi=300, format='pdf', bbox_inches='tight')
# ## **4) Additional Run Metrics**
#
# **Building up our run statistics from simpler to more advanced statistics:**
# 1. Number of runs and sprints per player;
# 2. Number of runs and sprints broken down by run direction per player (forward, backward, left, right);
# 3. Number of shearing runs by pairs of players running left and right at the same time in the opponents half / final third;
# 4. Number of shearing runs pay pairs of players, where there's a forward run by a third player.
# ### **Vs Elfsborg**
# In[191]:
df_summaryElf, df_shearElf, df_forward_shearElf = summarise_match_running(df_homePlayersElf, df_homeTracksElf, homeJerseyMappingElf, pitchLengthElf)
# ### **Vs Malmo**
# In[192]:
df_summaryMal, df_shearMal, df_forward_shearMal = summarise_match_running(df_homePlayersMal, df_homeTracksMal, homeJerseyMappingMal, pitchLengthMal)
# ### **Vs Orebro**
# In[194]:
df_summaryOre, df_shearOre, df_forward_shearOre = summarise_match_running(df_homePlayersOre, df_homeTracksOre, homeJerseyMappingOre, pitchLengthOre)
# ### **Summarising per 90 mins over all games**
# In[221]:
df_summary = pd.concat([df_summaryElf, df_summaryMal, df_summaryOre], ignore_index=True)
df_summary['numMatches'] = 1
df_summary = df_summary.groupby(['jersey_number','name'])\
.agg({'numMatches':np.sum,'Minutes Played':np.sum,'Distance [km]':np.sum,'Walking [km]':np.sum,'Jogging [km]':np.sum\
,'Running [km]':np.sum, 'Sprinting [km]':np.sum, 'numRuns':np.sum,'numSprints':np.sum\
,'B':np.sum,'F':np.sum,'L':np.sum,'R':np.sum})\
.reset_index()
df_summary.iloc[:,4:14] = df_summary.iloc[:,4:14].div(df_summary.iloc[:,3], axis=0) * 90
df_summary['pcForward'] = 100*df_summary['F'] / (df_summary['F'] + df_summary['B'] + df_summary['R'] + df_summary['L'])
df_summary['pcSideToSide'] = 100*(df_summary['R'] + df_summary['L']) / (df_summary['F'] + df_summary['B'] + df_summary['R'] + df_summary['L'])
df_summary = df_summary.loc[df_summary['Minutes Played'] > 45].round(1).sort_values('pcForward', ascending=False).reset_index(drop=True)
df_summary
# ### Top five by percent of forward runs
# In[196]:
df_summary.round(1)[['name','F','B','L','R','pcForward']].head(5)
# ### Top five by percent of side-to-side runs
# In[228]:
df_summary.sort_values('pcSideToSide', ascending=False).round(1)[['name','F','B','L','R','pcSideToSide']].head(6)
# ### **Summarising the shearing runs over the three games**
# In[198]:
df_shear = pd.concat([df_shearElf, df_shearMal, df_shearOre], ignore_index=True)
df_shear = df_shear.groupby(['jersey_number_main','jersey_number_other'])\
.agg({'numberShearCombos':np.sum,'shearTime [s]':np.sum,'shearOverlapFraction':np.mean})\
.sort_values('numberShearCombos', ascending=False)\
.reset_index()
df_shear[['jersey_number_main','jersey_number_other','numberShearCombos','shearTime [s]']].head(5)
# In[199]:
print ('Analysis Finished.')
# ---
#
# ## **End Technical Assignment Qs**
#
# ---
| [
"numpy.sum",
"matplotlib.pyplot.clf",
"numpy.ones",
"numpy.isnan",
"collections.defaultdict",
"numpy.mean",
"numpy.arange",
"numpy.convolve",
"os.path.join",
"numpy.round",
"numpy.unique",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"numpy.linspace",
"pandas.isna",
"matplotlib.pyplo... | [((390, 445), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (413, 445), False, 'import warnings\n'), ((44945, 45046), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Elfsborg_Goal_1.pdf"""'], {'dpi': '(300)', 'format': '"""pdf"""', 'transparent': '(False)', 'bbox_inches': '"""tight"""'}), "('Elfsborg_Goal_1.pdf', dpi=300, format='pdf', transparent=False,\n bbox_inches='tight')\n", (44956, 45046), True, 'import matplotlib.pyplot as plt\n'), ((46120, 46218), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Malmo_Goal_1.pdf"""'], {'dpi': '(300)', 'format': '"""pdf"""', 'transparent': '(False)', 'bbox_inches': '"""tight"""'}), "('Malmo_Goal_1.pdf', dpi=300, format='pdf', transparent=False,\n bbox_inches='tight')\n", (46131, 46218), True, 'import matplotlib.pyplot as plt\n'), ((47291, 47398), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Orebro_Goal_1_scored.pdf"""'], {'dpi': '(300)', 'format': '"""pdf"""', 'transparent': '(False)', 'bbox_inches': '"""tight"""'}), "('Orebro_Goal_1_scored.pdf', dpi=300, format='pdf', transparent=\n False, bbox_inches='tight')\n", (47302, 47398), True, 'import matplotlib.pyplot as plt\n'), ((47759, 47867), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Orebro_Goal_1_conceded.pdf"""'], {'dpi': '(300)', 'format': '"""pdf"""', 'transparent': '(False)', 'bbox_inches': '"""tight"""'}), "('Orebro_Goal_1_conceded.pdf', dpi=300, format='pdf',\n transparent=False, bbox_inches='tight')\n", (47770, 47867), True, 'import matplotlib.pyplot as plt\n'), ((48648, 48694), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)'], {'sharex': '(True)', 'figsize': '(50, 42)'}), '(3, sharex=True, figsize=(50, 42))\n', (48660, 48694), True, 'import matplotlib.pyplot as plt\n'), ((51030, 51105), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""MappingError.pdf"""'], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('MappingError.pdf', dpi=300, format='pdf', bbox_inches='tight')\n", (51041, 51105), True, 'import matplotlib.pyplot as plt\n'), ((56563, 56587), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (56574, 56587), False, 'from collections import Counter, defaultdict\n'), ((56598, 56622), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (56609, 56622), False, 'from collections import Counter, defaultdict\n'), ((60214, 60260), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(5)'], {'sharex': '(True)', 'figsize': '(50, 70)'}), '(5, sharex=True, figsize=(50, 70))\n', (60226, 60260), True, 'import matplotlib.pyplot as plt\n'), ((65051, 65127), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""NiceGoalAdded.pdf"""'], {'dpi': '(300)', 'format': '"""pdf"""', 'bbox_inches': '"""tight"""'}), "('NiceGoalAdded.pdf', dpi=300, format='pdf', bbox_inches='tight')\n", (65062, 65127), True, 'import matplotlib.pyplot as plt\n'), ((66253, 66328), 'pandas.concat', 'pd.concat', (['[df_summaryElf, df_summaryMal, df_summaryOre]'], {'ignore_index': '(True)'}), '([df_summaryElf, df_summaryMal, df_summaryOre], ignore_index=True)\n', (66262, 66328), True, 'import pandas as pd\n'), ((67636, 67705), 'pandas.concat', 'pd.concat', (['[df_shearElf, df_shearMal, df_shearOre]'], {'ignore_index': '(True)'}), '([df_shearElf, df_shearMal, df_shearOre], ignore_index=True)\n', (67645, 67705), True, 'import pandas as pd\n'), ((2864, 2888), 'pandas.DataFrame', 'pd.DataFrame', (['dic_tracks'], {}), '(dic_tracks)\n', (2876, 2888), True, 'import pandas as pd\n'), ((11107, 11148), 'pandas.concat', 'pd.concat', (['lst_df_home'], {'ignore_index': '(True)'}), '(lst_df_home, ignore_index=True)\n', (11116, 11148), True, 'import pandas as pd\n'), ((11169, 11210), 'pandas.concat', 'pd.concat', (['lst_df_away'], {'ignore_index': '(True)'}), '(lst_df_away, ignore_index=True)\n', (11178, 11210), True, 'import pandas as pd\n'), ((14004, 14074), 'numpy.unique', 'np.unique', (["[c[:-2] for c in team.columns if c[:4] in ['Home', 'Away']]"], {}), "([c[:-2] for c in team.columns if c[:4] in ['Home', 'Away']])\n", (14013, 14074), True, 'import numpy as np\n'), ((19650, 19679), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (19662, 19679), True, 'import matplotlib.pyplot as plt\n'), ((21061, 21104), 'numpy.sqrt', 'np.sqrt', (['(centre_circle_radius ** 2 - y ** 2)'], {}), '(centre_circle_radius ** 2 - y ** 2)\n', (21068, 21104), True, 'import numpy as np\n'), ((29128, 29168), 'numpy.all', 'np.all', (['(hometeam.index == awayteam.index)'], {}), '(hometeam.index == awayteam.index)\n', (29134, 29168), True, 'import numpy as np\n'), ((32034, 32043), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (32041, 32043), True, 'import matplotlib.pyplot as plt\n'), ((32048, 32062), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (32057, 32062), True, 'import matplotlib.pyplot as plt\n'), ((33000, 33020), 'numpy.round', 'np.round', (['minutes', '(2)'], {}), '(minutes, 2)\n', (33008, 33020), True, 'import numpy as np\n'), ((33376, 33397), 'numpy.round', 'np.round', (['distance', '(2)'], {}), '(distance, 2)\n', (33384, 33397), True, 'import numpy as np\n'), ((39013, 39079), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['dic_runs'], {'orient': '"""index"""', 'columns': 'dic_cols'}), "(dic_runs, orient='index', columns=dic_cols)\n", (39035, 39079), True, 'import pandas as pd\n'), ((56268, 56304), 'numpy.sqrt', 'np.sqrt', (['(delta_x ** 2 + delta_y ** 2)'], {}), '(delta_x ** 2 + delta_y ** 2)\n', (56275, 56304), True, 'import numpy as np\n'), ((56792, 56820), 're.match', 're.match', (['positionPattern', 'h'], {}), '(positionPattern, h)\n', (56800, 56820), False, 'import re\n'), ((56919, 56947), 're.match', 're.match', (['positionPattern', 'a'], {}), '(positionPattern, a)\n', (56927, 56947), False, 'import re\n'), ((14963, 14989), 'numpy.sqrt', 'np.sqrt', (['(dx ** 2 + dy ** 2)'], {}), '(dx ** 2 + dy ** 2)\n', (14970, 14989), True, 'import numpy as np\n'), ((18254, 18280), 'numpy.sqrt', 'np.sqrt', (['(vx ** 2 + vy ** 2)'], {}), '(vx ** 2 + vy ** 2)\n', (18261, 18280), True, 'import numpy as np\n'), ((18390, 18416), 'numpy.sqrt', 'np.sqrt', (['(ax ** 2 + ay ** 2)'], {}), '(ax ** 2 + ay ** 2)\n', (18397, 18416), True, 'import numpy as np\n'), ((21011, 21033), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(50)'], {}), '(-1, 1, 50)\n', (21022, 21033), True, 'import numpy as np\n'), ((22717, 22753), 'numpy.sqrt', 'np.sqrt', (['(corner_radius ** 2 - y ** 2)'], {}), '(corner_radius ** 2 - y ** 2)\n', (22724, 22753), True, 'import numpy as np\n'), ((26126, 26169), 'numpy.array', 'np.array', (['team[x_columns]'], {'dtype': 'np.float64'}), '(team[x_columns], dtype=np.float64)\n', (26134, 26169), True, 'import numpy as np\n'), ((26182, 26225), 'numpy.array', 'np.array', (['team[y_columns]'], {'dtype': 'np.float64'}), '(team[y_columns], dtype=np.float64)\n', (26190, 26225), True, 'import numpy as np\n'), ((34475, 34492), 'numpy.array', 'np.array', (['walking'], {}), '(walking)\n', (34483, 34492), True, 'import numpy as np\n'), ((34539, 34556), 'numpy.array', 'np.array', (['jogging'], {}), '(jogging)\n', (34547, 34556), True, 'import numpy as np\n'), ((34603, 34620), 'numpy.array', 'np.array', (['running'], {}), '(running)\n', (34611, 34620), True, 'import numpy as np\n'), ((34669, 34688), 'numpy.array', 'np.array', (['sprinting'], {}), '(sprinting)\n', (34677, 34688), True, 'import numpy as np\n'), ((4380, 4392), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4389, 4392), False, 'import json\n'), ((4523, 4535), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4532, 4535), False, 'import json\n'), ((5463, 5488), 'pandas.DataFrame', 'pd.DataFrame', (['homePlayers'], {}), '(homePlayers)\n', (5475, 5488), True, 'import pandas as pd\n'), ((5518, 5543), 'pandas.DataFrame', 'pd.DataFrame', (['awayPlayers'], {}), '(awayPlayers)\n', (5530, 5543), True, 'import pandas as pd\n'), ((10100, 10120), 'numpy.array', 'np.array', (['match_time'], {}), '(match_time)\n', (10108, 10120), True, 'import numpy as np\n'), ((10804, 10824), 'numpy.array', 'np.array', (['match_time'], {}), '(match_time)\n', (10812, 10824), True, 'import numpy as np\n'), ((15150, 15176), 'numpy.sqrt', 'np.sqrt', (['(vx ** 2 + vy ** 2)'], {}), '(vx ** 2 + vy ** 2)\n', (15157, 15176), True, 'import numpy as np\n'), ((22671, 22692), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(50)'], {}), '(0, 1, 50)\n', (22682, 22692), True, 'import numpy as np\n'), ((22948, 22970), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', '(50)'], {}), '(-1, 1, 50)\n', (22959, 22970), True, 'import numpy as np\n'), ((23047, 23078), 'numpy.sqrt', 'np.sqrt', (['(D_radius ** 2 - y ** 2)'], {}), '(D_radius ** 2 - y ** 2)\n', (23054, 23078), True, 'import numpy as np\n'), ((26639, 26683), 'numpy.array', 'np.array', (['team[vx_columns]'], {'dtype': 'np.float64'}), '(team[vx_columns], dtype=np.float64)\n', (26647, 26683), True, 'import numpy as np\n'), ((26701, 26745), 'numpy.array', 'np.array', (['team[vy_columns]'], {'dtype': 'np.float64'}), '(team[vy_columns], dtype=np.float64)\n', (26709, 26745), True, 'import numpy as np\n'), ((36634, 36661), 'numpy.sum', 'np.sum', (['(player_sprints == 1)'], {}), '(player_sprints == 1)\n', (36640, 36661), True, 'import numpy as np\n'), ((36684, 36708), 'numpy.sum', 'np.sum', (['(player_runs == 1)'], {}), '(player_runs == 1)\n', (36690, 36708), True, 'import numpy as np\n'), ((4290, 4353), 'os.path.join', 'os.path.join', (['signalityRepo', 'f"""{fileNamePrefix}-info_live.json"""'], {}), "(signalityRepo, f'{fileNamePrefix}-info_live.json')\n", (4302, 4353), False, 'import os\n'), ((4434, 4494), 'os.path.join', 'os.path.join', (['signalityRepo', 'f"""{fileNamePrefix}-tracks.json"""'], {}), "(signalityRepo, f'{fileNamePrefix}-tracks.json')\n", (4446, 4494), False, 'import os\n'), ((15418, 15511), 'scipy.signal.savgol_filter', 'signal.savgol_filter', (['vx.loc[:second_half_idx]'], {'window_length': 'window', 'polyorder': 'polyorder'}), '(vx.loc[:second_half_idx], window_length=window,\n polyorder=polyorder)\n', (15438, 15511), True, 'import scipy.signal as signal\n'), ((15549, 15642), 'scipy.signal.savgol_filter', 'signal.savgol_filter', (['vy.loc[:second_half_idx]'], {'window_length': 'window', 'polyorder': 'polyorder'}), '(vy.loc[:second_half_idx], window_length=window,\n polyorder=polyorder)\n', (15569, 15642), True, 'import scipy.signal as signal\n'), ((15729, 15822), 'scipy.signal.savgol_filter', 'signal.savgol_filter', (['vx.loc[second_half_idx:]'], {'window_length': 'window', 'polyorder': 'polyorder'}), '(vx.loc[second_half_idx:], window_length=window,\n polyorder=polyorder)\n', (15749, 15822), True, 'import scipy.signal as signal\n'), ((15860, 15953), 'scipy.signal.savgol_filter', 'signal.savgol_filter', (['vy.loc[second_half_idx:]'], {'window_length': 'window', 'polyorder': 'polyorder'}), '(vy.loc[second_half_idx:], window_length=window,\n polyorder=polyorder)\n', (15880, 15953), True, 'import scipy.signal as signal\n'), ((16894, 16987), 'scipy.signal.savgol_filter', 'signal.savgol_filter', (['ax.loc[:second_half_idx]'], {'window_length': 'window', 'polyorder': 'polyorder'}), '(ax.loc[:second_half_idx], window_length=window,\n polyorder=polyorder)\n', (16914, 16987), True, 'import scipy.signal as signal\n'), ((17025, 17118), 'scipy.signal.savgol_filter', 'signal.savgol_filter', (['ay.loc[:second_half_idx]'], {'window_length': 'window', 'polyorder': 'polyorder'}), '(ay.loc[:second_half_idx], window_length=window,\n polyorder=polyorder)\n', (17045, 17118), True, 'import scipy.signal as signal\n'), ((17209, 17302), 'scipy.signal.savgol_filter', 'signal.savgol_filter', (['ax.loc[second_half_idx:]'], {'window_length': 'window', 'polyorder': 'polyorder'}), '(ax.loc[second_half_idx:], window_length=window,\n polyorder=polyorder)\n', (17229, 17302), True, 'import scipy.signal as signal\n'), ((17340, 17433), 'scipy.signal.savgol_filter', 'signal.savgol_filter', (['ay.loc[second_half_idx:]'], {'window_length': 'window', 'polyorder': 'polyorder'}), '(ay.loc[second_half_idx:], window_length=window,\n polyorder=polyorder)\n', (17360, 17433), True, 'import scipy.signal as signal\n'), ((30399, 30442), 'numpy.array', 'np.array', (['team[x_columns]'], {'dtype': 'np.float64'}), '(team[x_columns], dtype=np.float64)\n', (30407, 30442), True, 'import numpy as np\n'), ((30463, 30506), 'numpy.array', 'np.array', (['team[y_columns]'], {'dtype': 'np.float64'}), '(team[y_columns], dtype=np.float64)\n', (30471, 30506), True, 'import numpy as np\n'), ((38793, 38822), 'numpy.arange', 'np.arange', (['r_start', '(r_end + 1)'], {}), '(r_start, r_end + 1)\n', (38802, 38822), True, 'import numpy as np\n'), ((62604, 62613), 'numpy.log', 'np.log', (['i'], {}), '(i)\n', (62610, 62613), True, 'import numpy as np\n'), ((8256, 8266), 'pandas.isna', 'pd.isna', (['i'], {}), '(i)\n', (8263, 8266), True, 'import pandas as pd\n'), ((16140, 16201), 'numpy.convolve', 'np.convolve', (['vx.loc[:second_half_idx]', 'ma_window'], {'mode': '"""same"""'}), "(vx.loc[:second_half_idx], ma_window, mode='same')\n", (16151, 16201), True, 'import numpy as np\n'), ((16248, 16309), 'numpy.convolve', 'np.convolve', (['vy.loc[:second_half_idx]', 'ma_window'], {'mode': '"""same"""'}), "(vy.loc[:second_half_idx], ma_window, mode='same')\n", (16259, 16309), True, 'import numpy as np\n'), ((16405, 16466), 'numpy.convolve', 'np.convolve', (['vx.loc[second_half_idx:]', 'ma_window'], {'mode': '"""same"""'}), "(vx.loc[second_half_idx:], ma_window, mode='same')\n", (16416, 16466), True, 'import numpy as np\n'), ((16513, 16574), 'numpy.convolve', 'np.convolve', (['vy.loc[second_half_idx:]', 'ma_window'], {'mode': '"""same"""'}), "(vy.loc[second_half_idx:], ma_window, mode='same')\n", (16524, 16574), True, 'import numpy as np\n'), ((17624, 17685), 'numpy.convolve', 'np.convolve', (['ax.loc[:second_half_idx]', 'ma_window'], {'mode': '"""same"""'}), "(ax.loc[:second_half_idx], ma_window, mode='same')\n", (17635, 17685), True, 'import numpy as np\n'), ((17732, 17793), 'numpy.convolve', 'np.convolve', (['ay.loc[:second_half_idx]', 'ma_window'], {'mode': '"""same"""'}), "(ay.loc[:second_half_idx], ma_window, mode='same')\n", (17743, 17793), True, 'import numpy as np\n'), ((17893, 17954), 'numpy.convolve', 'np.convolve', (['ax.loc[second_half_idx:]', 'ma_window'], {'mode': '"""same"""'}), "(ax.loc[second_half_idx:], ma_window, mode='same')\n", (17904, 17954), True, 'import numpy as np\n'), ((18001, 18062), 'numpy.convolve', 'np.convolve', (['ay.loc[second_half_idx:]', 'ma_window'], {'mode': '"""same"""'}), "(ay.loc[second_half_idx:], ma_window, mode='same')\n", (18012, 18062), True, 'import numpy as np\n'), ((30972, 31016), 'numpy.array', 'np.array', (['team[vx_columns]'], {'dtype': 'np.float64'}), '(team[vx_columns], dtype=np.float64)\n', (30980, 31016), True, 'import numpy as np\n'), ((31042, 31086), 'numpy.array', 'np.array', (['team[vy_columns]'], {'dtype': 'np.float64'}), '(team[vy_columns], dtype=np.float64)\n', (31050, 31086), True, 'import numpy as np\n'), ((36796, 36825), 'numpy.where', 'np.where', (['(player_sprints == 1)'], {}), '(player_sprints == 1)\n', (36804, 36825), True, 'import numpy as np\n'), ((36963, 36993), 'numpy.where', 'np.where', (['(player_sprints == -1)'], {}), '(player_sprints == -1)\n', (36971, 36993), True, 'import numpy as np\n'), ((37055, 37081), 'numpy.where', 'np.where', (['(player_runs == 1)'], {}), '(player_runs == 1)\n', (37063, 37081), True, 'import numpy as np\n'), ((37216, 37243), 'numpy.where', 'np.where', (['(player_runs == -1)'], {}), '(player_runs == -1)\n', (37224, 37243), True, 'import numpy as np\n'), ((49381, 49407), 're.search', 're.search', (['pattern', 'player'], {}), '(pattern, player)\n', (49390, 49407), False, 'import re\n'), ((49744, 49770), 're.search', 're.search', (['pattern', 'player'], {}), '(pattern, player)\n', (49753, 49770), False, 'import re\n'), ((50139, 50165), 're.search', 're.search', (['pattern', 'player'], {}), '(pattern, player)\n', (50148, 50165), False, 'import re\n'), ((61453, 61479), 're.search', 're.search', (['pattern', 'player'], {}), '(pattern, player)\n', (61462, 61479), False, 'import re\n'), ((6372, 6410), 'pandas.isna', 'pd.isna', (["df_homePlayers['playerIndex']"], {}), "(df_homePlayers['playerIndex'])\n", (6379, 6410), True, 'import pandas as pd\n'), ((6451, 6488), 'numpy.nanmax', 'np.nanmax', (['df_homePlayers.playerIndex'], {}), '(df_homePlayers.playerIndex)\n', (6460, 6488), True, 'import numpy as np\n'), ((6547, 6585), 'pandas.isna', 'pd.isna', (["df_awayPlayers['playerIndex']"], {}), "(df_awayPlayers['playerIndex'])\n", (6554, 6585), True, 'import pandas as pd\n'), ((6626, 6663), 'numpy.nanmax', 'np.nanmax', (['df_awayPlayers.playerIndex'], {}), '(df_awayPlayers.playerIndex)\n', (6635, 6663), True, 'import numpy as np\n'), ((16022, 16037), 'numpy.ones', 'np.ones', (['window'], {}), '(window)\n', (16029, 16037), True, 'import numpy as np\n'), ((17502, 17517), 'numpy.ones', 'np.ones', (['window'], {}), '(window)\n', (17509, 17517), True, 'import numpy as np\n'), ((32876, 32902), 'pandas.isna', 'pd.isna', (['df_tracks[column]'], {}), '(df_tracks[column])\n', (32883, 32902), True, 'import pandas as pd\n'), ((36232, 36254), 'numpy.ones', 'np.ones', (['sprint_window'], {}), '(sprint_window)\n', (36239, 36254), True, 'import numpy as np\n'), ((36512, 36534), 'numpy.ones', 'np.ones', (['sprint_window'], {}), '(sprint_window)\n', (36519, 36534), True, 'import numpy as np\n'), ((27061, 27078), 'numpy.isnan', 'np.isnan', (['team[x]'], {}), '(team[x])\n', (27069, 27078), True, 'import numpy as np\n'), ((27082, 27099), 'numpy.isnan', 'np.isnan', (['team[y]'], {}), '(team[y])\n', (27090, 27099), True, 'import numpy as np\n'), ((60968, 60989), 're.search', 're.search', (['pattern', 'i'], {}), '(pattern, i)\n', (60977, 60989), False, 'import re\n'), ((61082, 61103), 're.search', 're.search', (['pattern', 'i'], {}), '(pattern, i)\n', (61091, 61103), False, 'import re\n'), ((61196, 61217), 're.search', 're.search', (['pattern', 'i'], {}), '(pattern, i)\n', (61205, 61217), False, 'import re\n'), ((42007, 42017), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (42014, 42017), True, 'import numpy as np\n')] |
import os
import random
import torch
import torch.utils.data as data
from torchvision.datasets.folder import default_loader
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
def get_image_list(root):
# images = []
# for class_dir in os.listdir(root):
# for image in os.listdir(os.path.join(root, class_dir)):
# image_path = os.path.join(root, class_dir, image)
# images.append(image_path)
# return images
with open(root, 'r') as f:
file_list = f.readlines()
file_list = [row.rstrip().split(" ")[0] for row in file_list]
return file_list
def color_projection(image):
new_image = torch.zeros_like(image)
r, g, b = image[0], image[1], image[2]
new_image[0] = 0.8333 * r + 0.3333 * g - 0.1667 * b
new_image[1] = 0.3333 * r + 0.3333 * g + 0.3333 * b
new_image[2] =-0.1667 * r + 0.3333 * g + 0.8333 * b
return new_image
class JigsawDataset(data.Dataset):
def __init__(self, root, perms_file):
self.root = root
self.perms_file = perms_file
self.perms = np.load(perms_file)
self.classes = list(range(self.perms.shape[0]))
self.images = get_image_list(root)
self.resize = transforms.Resize(256)
self.rand_crop = transforms.RandomCrop(255)
self.grayscale = transforms.Grayscale(num_output_channels=3)
self.to_tensor = transforms.ToTensor()
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def __getitem__(self, index):
image_path = self.images[index]
target = np.random.randint(0, self.perms.shape[0])
image = default_loader(image_path)
perm = self.perms[target]
image = self.rand_crop(self.resize(image))
if random.random() < 0.5:
image = self.grayscale(image)
image = self.normalize(self.to_tensor(image))
else:
image = self.to_tensor(image)
if random.random() < 0.5:
image = color_projection(image)
image = self.normalize(image)
k = 0
tiles = torch.zeros((9, 3, 85, 85), dtype=image.dtype)
for i in range(0, 255, 85):
for j in range(0, 255, 85):
tiles[k] = image[:, i:i+85, j:j+85]
k += 1
k = 0
shuffled_tiles = torch.zeros((9, 3, 64, 64), dtype=image.dtype)
for i in perm:
x = np.random.randint(0, 21)
y = np.random.randint(0, 21)
shuffled_tiles[i] = tiles[k, :, x:x+64, y:y+64]
k += 1
return shuffled_tiles, target
def __len__(self):
return len(self.images)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
fmt_str += ' Root Location: {}\n'.format(self.root)
return fmt_str
class FileListDataset(data.Dataset):
def __init__(self, path_to_txt_file, transform):
with open(path_to_txt_file, 'r') as f:
self.file_list = f.readlines()
self.file_list = [row.rstrip() for row in self.file_list]
self.transform = transform
def __getitem__(self, idx):
image_path = self.file_list[idx].split()[0]
img = Image.open(image_path).convert('RGB')
target = int(self.file_list[idx].split()[1])
if self.transform is not None:
images = self.transform(img)
return images, target
def __len__(self):
return len(self.file_list)
def denormalize(tensor):
tensor = tensor.cpu()
std = torch.tensor([0.229, 0.224, 0.225], dtype=torch.float32)
mean = torch.tensor([0.485, 0.456, 0.406], dtype=torch.float32)
array = ((tensor * std[:, None, None]) + mean[:, None, None]).numpy()
return array.transpose((1, 2, 0))
def show_jigsaw(shuffled_tiles, image_path, target):
image_name = image_path.split('/')[-1]
bn, ext = image_name.split('.')
image_name = '%s_%d.%s' % (bn, target, ext)
fig, axes = plt.subplots(nrows=3, ncols=3)
for i in range(3):
for j in range(3):
axes[i][j].imshow(denormalize(shuffled_tiles[(i * 3) + j]))
fig.savefig(os.path.join('example_puzzles', image_name))
| [
"numpy.load",
"os.path.join",
"torch.zeros_like",
"torch.zeros",
"torchvision.transforms.ToTensor",
"PIL.Image.open",
"random.random",
"numpy.random.randint",
"torchvision.transforms.Grayscale",
"torchvision.datasets.folder.default_loader",
"torchvision.transforms.RandomCrop",
"torchvision.tra... | [((728, 751), 'torch.zeros_like', 'torch.zeros_like', (['image'], {}), '(image)\n', (744, 751), False, 'import torch\n'), ((3760, 3816), 'torch.tensor', 'torch.tensor', (['[0.229, 0.224, 0.225]'], {'dtype': 'torch.float32'}), '([0.229, 0.224, 0.225], dtype=torch.float32)\n', (3772, 3816), False, 'import torch\n'), ((3828, 3884), 'torch.tensor', 'torch.tensor', (['[0.485, 0.456, 0.406]'], {'dtype': 'torch.float32'}), '([0.485, 0.456, 0.406], dtype=torch.float32)\n', (3840, 3884), False, 'import torch\n'), ((4195, 4225), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'ncols': '(3)'}), '(nrows=3, ncols=3)\n', (4207, 4225), True, 'import matplotlib.pyplot as plt\n'), ((1146, 1165), 'numpy.load', 'np.load', (['perms_file'], {}), '(perms_file)\n', (1153, 1165), True, 'import numpy as np\n'), ((1287, 1309), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (1304, 1309), True, 'import torchvision.transforms as transforms\n'), ((1335, 1361), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(255)'], {}), '(255)\n', (1356, 1361), True, 'import torchvision.transforms as transforms\n'), ((1387, 1430), 'torchvision.transforms.Grayscale', 'transforms.Grayscale', ([], {'num_output_channels': '(3)'}), '(num_output_channels=3)\n', (1407, 1430), True, 'import torchvision.transforms as transforms\n'), ((1456, 1477), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1475, 1477), True, 'import torchvision.transforms as transforms\n'), ((1503, 1578), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1523, 1578), True, 'import torchvision.transforms as transforms\n'), ((1717, 1758), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.perms.shape[0]'], {}), '(0, self.perms.shape[0])\n', (1734, 1758), True, 'import numpy as np\n'), ((1775, 1801), 'torchvision.datasets.folder.default_loader', 'default_loader', (['image_path'], {}), '(image_path)\n', (1789, 1801), False, 'from torchvision.datasets.folder import default_loader\n'), ((2237, 2283), 'torch.zeros', 'torch.zeros', (['(9, 3, 85, 85)'], {'dtype': 'image.dtype'}), '((9, 3, 85, 85), dtype=image.dtype)\n', (2248, 2283), False, 'import torch\n'), ((2475, 2521), 'torch.zeros', 'torch.zeros', (['(9, 3, 64, 64)'], {'dtype': 'image.dtype'}), '((9, 3, 64, 64), dtype=image.dtype)\n', (2486, 2521), False, 'import torch\n'), ((4364, 4407), 'os.path.join', 'os.path.join', (['"""example_puzzles"""', 'image_name'], {}), "('example_puzzles', image_name)\n", (4376, 4407), False, 'import os\n'), ((1899, 1914), 'random.random', 'random.random', ([], {}), '()\n', (1912, 1914), False, 'import random\n'), ((2561, 2585), 'numpy.random.randint', 'np.random.randint', (['(0)', '(21)'], {}), '(0, 21)\n', (2578, 2585), True, 'import numpy as np\n'), ((2602, 2626), 'numpy.random.randint', 'np.random.randint', (['(0)', '(21)'], {}), '(0, 21)\n', (2619, 2626), True, 'import numpy as np\n'), ((2093, 2108), 'random.random', 'random.random', ([], {}), '()\n', (2106, 2108), False, 'import random\n'), ((3436, 3458), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (3446, 3458), False, 'from PIL import Image\n')] |
# Make the "Fraction" class available here.
from util.math.fraction import Fraction
from util.math.points import mesh, chebyshev, polynomials, \
polynomial_indices, fekete_indices, fekete_points
from util.math.pairs import pair_to_index, index_to_pair, \
num_from_pairs, pairwise_distance
# Access different polynoimal functions and objects.
from util.math.polynomial import Spline, Polynomial, NewtonPolynomial
from util.math.polynomial import fit as fit_spline
from util.math.polynomial import polynomial as fit_polynomial
from util.math.polynomial import inverse as polynomial_inverse
# Useful for checking near-equal cases when roundoff error is involved.
SMALL = 2**(-26)
# ^^ SQRT(EPSILON( 0.0_REAL64 ))
# Compute the order of magnitude error (powers of 2 between numbers).
def oom_error(x, y):
if (x == y): return 0.0
from numpy import log2
if (y != 0): return abs(log2(x / y))
else: return abs(log2(y / x))
# Compute the number-of-bits of accuracy (upper bound on correct float mantissa bits).
def bit_accuracy(x, y):
if (x == y): return float('inf')
from numpy import log2
return -log2(oom_error(x, y))
# Given a list of lists, flatten it to one dimension.
def flatten(l): return [v for row in l for v in row]
# Given a list of lists, transpose it and return.
def transpose(l): return [list(row) for row in zip(*l)]
# Return a boolean "is_numeric"
def is_numeric(obj):
try:
abs((.3*obj + 1*obj) - .3*obj)
return True
except: return False
# Function for performing absolute difference between numbers (or
# vectors). Falls back to equality for things that don't support
# difference, absolute value, or sums.
def abs_diff(v1, v2):
if hasattr(v1, "__iter__"):
try: return sum(abs(v1 - v2))
except: return int(v1 == v2)
try: return abs(v1 - v2)
except: return int(v1 == v2)
# Function for computing the product of a list of numbers.
def product(sequence):
v = 1
for number in sequence: v *= number
return v
# Compute the probability that 1 specific value is picked from a
# population of size `N` given a set number of `samples`.
def pick(samples, N):
return 1 - product( ((N-i-1)/(N-i) for i in range(samples)) )
# Return True if a number is prime, False otherwise.
def is_prime(n):
for i in range(2,int(n**(1/2))+1):
if (not (n%i)): return False
return True
# Return all unique prime factors of a number in sorted order.
def primes(n):
factors = {}
candidate = 2
while candidate**2 <= n:
while (n % candidate) == 0:
factors[candidate] = factors.get(candidate,0) + 1
n //= candidate
candidate += 1
# Store the last remainder if no more primes factors were found.
if (n > 1): factors[n] = factors.get(n,0) + 1
# Sort all prime factors by their index.
return sorted((k,factors[k]) for k in factors)
# Return all primes up to a given number.
def primes_up_to(n):
if (n <= 0): return []
elif (n == 1): return [1]
elif (n == 2): return [1,2]
prime_numbers = [1,2]
for i in range(3, n+1):
if is_prime(i): prime_numbers.append( i )
return prime_numbers
# Function for calculating (n choose k), more efficiently than
# using the raw factorials (takes advantage of cancellation).
#
# Parameters:
# n -- positive integer
# k -- positive integer less than or equal to "n"
#
# Example:
# > choose(10,7)
# 120
#
# Notes:
# Raises generic exception for negative input integers.
# Undefined behavior for non-integer inputs.
def choose(n, k):
if (n < 0) or (k < 0): raise(Exception("Both 'n' and 'k' should be positive numbers."))
if (n == k) or (k == 0): return 1
numerator = 1
denominator = 1
for i in range(n,max(n-k,k),-1): numerator *= i
for i in range(1,min(n-k,k)+1): denominator *= i
return numerator // denominator
# Compute the root of something to a higher degree of accuracy than is
# possible with floating point numbers. Default accuracy is 1e(-17),
# which is slightly more precise than a 64 bit floating point number.
#
# The result that is returned is a Fraction object, so that raising
# it to the "power" should result in something that exactly rounds to
# the provided "base".
def root(base, power, accuracy=Fraction(1,1e17)):
from util.optimize import min_on_line
power = Fraction(power)
lower = Fraction(0,1)
upper = Fraction(base)
f = lambda x: abs(x**power.numerator - upper)
frac_round = lambda x: Fraction(x)
return min_on_line(f, lower, upper, accuracy=accuracy, round=frac_round)
if __name__ == "__main__":
value = 2
root_num = 10
print("value: ",value)
print("power: ",1/root_num)
float_result = value**(1/root_num)
print("float_result**power: ",float_result**root_num)
root_result = root(value, root_num)
print("root_result**power: ",float(root_result**root_num))
print("root_result: ",repr(root_result))
| [
"numpy.log2",
"util.optimize.min_on_line",
"util.math.fraction.Fraction"
] | [((4318, 4336), 'util.math.fraction.Fraction', 'Fraction', (['(1)', '(1e+17)'], {}), '(1, 1e+17)\n', (4326, 4336), False, 'from util.math.fraction import Fraction\n'), ((4391, 4406), 'util.math.fraction.Fraction', 'Fraction', (['power'], {}), '(power)\n', (4399, 4406), False, 'from util.math.fraction import Fraction\n'), ((4419, 4433), 'util.math.fraction.Fraction', 'Fraction', (['(0)', '(1)'], {}), '(0, 1)\n', (4427, 4433), False, 'from util.math.fraction import Fraction\n'), ((4445, 4459), 'util.math.fraction.Fraction', 'Fraction', (['base'], {}), '(base)\n', (4453, 4459), False, 'from util.math.fraction import Fraction\n'), ((4560, 4625), 'util.optimize.min_on_line', 'min_on_line', (['f', 'lower', 'upper'], {'accuracy': 'accuracy', 'round': 'frac_round'}), '(f, lower, upper, accuracy=accuracy, round=frac_round)\n', (4571, 4625), False, 'from util.optimize import min_on_line\n'), ((4537, 4548), 'util.math.fraction.Fraction', 'Fraction', (['x'], {}), '(x)\n', (4545, 4548), False, 'from util.math.fraction import Fraction\n'), ((901, 912), 'numpy.log2', 'log2', (['(x / y)'], {}), '(x / y)\n', (905, 912), False, 'from numpy import log2\n'), ((942, 953), 'numpy.log2', 'log2', (['(y / x)'], {}), '(y / x)\n', (946, 953), False, 'from numpy import log2\n')] |
import cv2 as cv
import os
import numpy as np
cfg_file_path = "Hue_Booster_Config.txt"
os.chdir(os.path.dirname(__file__)) # Makes working directory as .py file
cfg_file = open(cfg_file_path, 'r')
r_cfg_file = cfg_file.readlines()
input_folder = r_cfg_file[0]
input_folder = input_folder[:-1] # Deleting '\0' character in the line.
change_operation = r_cfg_file[1]
change_operation = change_operation[:-1] # Deleting '\0' character in the line.
output_folder = r_cfg_file[2]
output_folder = output_folder[:-1] # Deleting '\0' character in the line.
if os.path.exists(input_folder):
for (dirpath, dirnames, filenames) in os.walk(input_folder):
for file in filenames:
image = cv.imread(dirpath + '\\' + file)
image = cv.cvtColor(image, cv.COLOR_BGR2HSV)
if change_operation == '0':
image[..., 1] = 255
elif change_operation == '1':
image[..., 1] = np.where(image[..., 1] > 127, 255, image[..., 1] + image[..., 1])
elif change_operation == '2':
image[..., 1] = np.where(image[..., 1] > 25, 255, image[..., 1])
image = cv.cvtColor(image, cv.COLOR_HSV2BGR)
if not os.path.exists(output_folder):
os.mkdir(output_folder)
cv.imwrite(output_folder + '\\' + file, image)
| [
"os.mkdir",
"cv2.cvtColor",
"cv2.imwrite",
"os.path.dirname",
"os.walk",
"os.path.exists",
"cv2.imread",
"numpy.where"
] | [((579, 607), 'os.path.exists', 'os.path.exists', (['input_folder'], {}), '(input_folder)\n', (593, 607), False, 'import os\n'), ((104, 129), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (119, 129), False, 'import os\n'), ((652, 673), 'os.walk', 'os.walk', (['input_folder'], {}), '(input_folder)\n', (659, 673), False, 'import os\n'), ((728, 760), 'cv2.imread', 'cv.imread', (["(dirpath + '\\\\' + file)"], {}), "(dirpath + '\\\\' + file)\n", (737, 760), True, 'import cv2 as cv\n'), ((782, 818), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_BGR2HSV'], {}), '(image, cv.COLOR_BGR2HSV)\n', (793, 818), True, 'import cv2 as cv\n'), ((1185, 1221), 'cv2.cvtColor', 'cv.cvtColor', (['image', 'cv.COLOR_HSV2BGR'], {}), '(image, cv.COLOR_HSV2BGR)\n', (1196, 1221), True, 'import cv2 as cv\n'), ((1327, 1373), 'cv2.imwrite', 'cv.imwrite', (["(output_folder + '\\\\' + file)", 'image'], {}), "(output_folder + '\\\\' + file, image)\n", (1337, 1373), True, 'import cv2 as cv\n'), ((1242, 1271), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (1256, 1271), False, 'import os\n'), ((1290, 1313), 'os.mkdir', 'os.mkdir', (['output_folder'], {}), '(output_folder)\n', (1298, 1313), False, 'import os\n'), ((973, 1038), 'numpy.where', 'np.where', (['(image[..., 1] > 127)', '(255)', '(image[..., 1] + image[..., 1])'], {}), '(image[..., 1] > 127, 255, image[..., 1] + image[..., 1])\n', (981, 1038), True, 'import numpy as np\n'), ((1115, 1163), 'numpy.where', 'np.where', (['(image[..., 1] > 25)', '(255)', 'image[..., 1]'], {}), '(image[..., 1] > 25, 255, image[..., 1])\n', (1123, 1163), True, 'import numpy as np\n')] |
import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
from PIL import Image
#load images
center = Image.open('images/pc.jpg')
pc_image = Image.open('images/pc2.jpg')
pc =Image.open('images/pc3.jpg')
pc2 =Image.open('images/pc4.jpeg')
game =Image.open('images/game.jpg')
game2 =Image.open('images/game2.jpg')
with open("dataset/gamesCopy.json") as file:
data = json.load(file)
try:
with open("data.pickle", "rb") as f:
words, labels, training, output = pickle.load(f)
except:
words = []
labels = []
docs_x = []
docs_y = []
for intent in data["intents"]:
for pattern in intent["patterns"]:
wrds = nltk.word_tokenize(pattern)
words.extend(wrds)
docs_x.append(wrds)
docs_y.append(intent["tag"])
if intent["tag"] not in labels:
labels.append(intent["tag"])
words = [stemmer.stem(w.lower()) for w in words if w != "?"]
words = sorted(list(set(words)))
labels = sorted(labels)
training = []
output = []
out_empty = [0 for _ in range(len(labels))]
for x, doc in enumerate(docs_x):
bag = []
wrds = [stemmer.stem(w.lower()) for w in doc]
for w in words:
if w in wrds:
bag.append(1)
else:
bag.append(0)
output_row = out_empty[:]
output_row[labels.index(docs_y[x])] = 1
training.append(bag)
output.append(output_row)
training = numpy.array(training)
output = numpy.array(output)
with open("data.pickle", "wb") as f:
pickle.dump((words, labels, training, output), f)
# tensorflow.reset_default_graph()
tensorflow.compat.v1.reset_default_graph()
net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)
model = tflearn.DNN(net)
# try:
model.load("model_combine.tflearn")
# except:
# model.fit(training, output, n_epoch=500, batch_size=8, show_metric=True)
# model.save("model_combine.tflearn")
ss = SessionState.get(is_startup=True)
def bag_of_words(s, words):
bag = [0 for _ in range(len(words))]
s_words = nltk.word_tokenize(s)
s_words = [stemmer.stem(word.lower()) for word in s_words]
for se in s_words:
for i, w in enumerate(words):
if w == se:
bag[i] = 1
return numpy.array(bag)
def get_text():
input_text = st.text_input("You: ", "")
return input_text
def chat():
if ss.is_startup:
get_text()
response = "Hi, I'm happy to have you here \nI have a lot to discuss about PC or Games"
ss.is_startup = False
return response
else:
inp = get_text()
if inp.lower() == "quit":
response = "Thank you. Good bye. Tata. Good night. Oyasumi."
return response
results = model.predict([bag_of_words(inp, words)])
results_index = numpy.argmax(results)
tag = labels[results_index]
for tg in data["intents"]:
if tg['tag'] == tag:
responses = tg['responses'][0]
break
return responses
st.sidebar.title("Prof PC")
st.title("""
Prof PC
You can ask anything about PC or games requirements.
""")
st.image(center,width=700)
st.sidebar.image(pc_image,width=300)
st.sidebar.image(pc,width=350)
st.sidebar.image(pc2,width=350)
st.sidebar.image(game,width=350)
st.sidebar.image(game2,width=320)
st.text_area("Bot:", value=chat(), height=500, max_chars=None, key=None) | [
"pickle.dump",
"json.load",
"streamlit.image",
"streamlit.text_input",
"tflearn.fully_connected",
"numpy.argmax",
"SessionState.get",
"tflearn.regression",
"PIL.Image.open",
"streamlit.title",
"nltk.stem.lancaster.LancasterStemmer",
"tflearn.DNN",
"streamlit.sidebar.title",
"numpy.array",
... | [((157, 175), 'nltk.stem.lancaster.LancasterStemmer', 'LancasterStemmer', ([], {}), '()\n', (173, 175), False, 'from nltk.stem.lancaster import LancasterStemmer\n'), ((222, 249), 'PIL.Image.open', 'Image.open', (['"""images/pc.jpg"""'], {}), "('images/pc.jpg')\n", (232, 249), False, 'from PIL import Image\n'), ((261, 289), 'PIL.Image.open', 'Image.open', (['"""images/pc2.jpg"""'], {}), "('images/pc2.jpg')\n", (271, 289), False, 'from PIL import Image\n'), ((294, 322), 'PIL.Image.open', 'Image.open', (['"""images/pc3.jpg"""'], {}), "('images/pc3.jpg')\n", (304, 322), False, 'from PIL import Image\n'), ((328, 357), 'PIL.Image.open', 'Image.open', (['"""images/pc4.jpeg"""'], {}), "('images/pc4.jpeg')\n", (338, 357), False, 'from PIL import Image\n'), ((364, 393), 'PIL.Image.open', 'Image.open', (['"""images/game.jpg"""'], {}), "('images/game.jpg')\n", (374, 393), False, 'from PIL import Image\n'), ((401, 431), 'PIL.Image.open', 'Image.open', (['"""images/game2.jpg"""'], {}), "('images/game2.jpg')\n", (411, 431), False, 'from PIL import Image\n'), ((1802, 1844), 'tensorflow.compat.v1.reset_default_graph', 'tensorflow.compat.v1.reset_default_graph', ([], {}), '()\n', (1842, 1844), False, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((1909, 1940), 'tflearn.fully_connected', 'tflearn.fully_connected', (['net', '(8)'], {}), '(net, 8)\n', (1932, 1940), False, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((1947, 1978), 'tflearn.fully_connected', 'tflearn.fully_connected', (['net', '(8)'], {}), '(net, 8)\n', (1970, 1978), False, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((2058, 2081), 'tflearn.regression', 'tflearn.regression', (['net'], {}), '(net)\n', (2076, 2081), False, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((2090, 2106), 'tflearn.DNN', 'tflearn.DNN', (['net'], {}), '(net)\n', (2101, 2106), False, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((2280, 2313), 'SessionState.get', 'SessionState.get', ([], {'is_startup': '(True)'}), '(is_startup=True)\n', (2296, 2313), False, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((3406, 3433), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Prof PC"""'], {}), "('Prof PC')\n", (3422, 3433), True, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((3434, 3519), 'streamlit.title', 'st.title', (['"""\nProf PC \nYou can ask anything about PC or games requirements.\n"""'], {}), '("""\nProf PC \nYou can ask anything about PC or games requirements.\n"""\n )\n', (3442, 3519), True, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((3516, 3543), 'streamlit.image', 'st.image', (['center'], {'width': '(700)'}), '(center, width=700)\n', (3524, 3543), True, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((3543, 3580), 'streamlit.sidebar.image', 'st.sidebar.image', (['pc_image'], {'width': '(300)'}), '(pc_image, width=300)\n', (3559, 3580), True, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((3580, 3611), 'streamlit.sidebar.image', 'st.sidebar.image', (['pc'], {'width': '(350)'}), '(pc, width=350)\n', (3596, 3611), True, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((3611, 3643), 'streamlit.sidebar.image', 'st.sidebar.image', (['pc2'], {'width': '(350)'}), '(pc2, width=350)\n', (3627, 3643), True, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((3643, 3676), 'streamlit.sidebar.image', 'st.sidebar.image', (['game'], {'width': '(350)'}), '(game, width=350)\n', (3659, 3676), True, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((3676, 3710), 'streamlit.sidebar.image', 'st.sidebar.image', (['game2'], {'width': '(320)'}), '(game2, width=320)\n', (3692, 3710), True, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((489, 504), 'json.load', 'json.load', (['file'], {}), '(file)\n', (498, 504), False, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((2400, 2421), 'nltk.word_tokenize', 'nltk.word_tokenize', (['s'], {}), '(s)\n', (2418, 2421), False, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((2622, 2638), 'numpy.array', 'numpy.array', (['bag'], {}), '(bag)\n', (2633, 2638), False, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((2673, 2699), 'streamlit.text_input', 'st.text_input', (['"""You: """', '""""""'], {}), "('You: ', '')\n", (2686, 2699), True, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((594, 608), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (605, 608), False, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((1611, 1632), 'numpy.array', 'numpy.array', (['training'], {}), '(training)\n', (1622, 1632), False, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((1646, 1665), 'numpy.array', 'numpy.array', (['output'], {}), '(output)\n', (1657, 1665), False, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((3183, 3204), 'numpy.argmax', 'numpy.argmax', (['results'], {}), '(results)\n', (3195, 3204), False, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((1716, 1765), 'pickle.dump', 'pickle.dump', (['(words, labels, training, output)', 'f'], {}), '((words, labels, training, output), f)\n', (1727, 1765), False, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n'), ((778, 805), 'nltk.word_tokenize', 'nltk.word_tokenize', (['pattern'], {}), '(pattern)\n', (796, 805), False, 'import nltk, numpy, tflearn, tensorflow, random, json, pickle, streamlit as st, SessionState, sys\n')] |
"""
Project: RadarBook
File: frank_code.py
Created by: <NAME>
One: 1/26/2019
Created with: PyCharm
Copyright (C) 2019 Artech House (<EMAIL>)
This file is part of Introduction to Radar Using Python and MATLAB
and can not be copied and/or distributed without the express permission of Artech House.
"""
from scipy.constants import pi
from numpy import exp
def n_phase_code(n):
"""
Generate an N-phase Frank code sequence.
:param n: The sequence groups.
:return: The Frank code sequence (length N^2).
"""
phi = [2.0 * pi / float(n) * i * j for i in range(n) for j in range(n)]
return [exp(1j * p) for p in phi]
| [
"numpy.exp"
] | [((613, 626), 'numpy.exp', 'exp', (['(1.0j * p)'], {}), '(1.0j * p)\n', (616, 626), False, 'from numpy import exp\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 7 16:54:56 2021
@author: dawooood
Usage
python3 model_hum_corr.py path_to_model_features path_to_avg_human_ratings
"""
import sys
import pandas as pd
import numpy as np
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Ridge
from sklearn.model_selection import cross_validate
def calc_corr(model,human_mat):
hum_sim_surv = pd.read_csv(human_mat,header=None)
hum_avg_ratings = hum_sim_surv.loc[0].to_numpy()
hum_sim_mat = np.zeros((18,18))
ind = 0
hum_sim_mat[:] = np.nan
for i in range(18):
for j in range(i,18):
if i!=j:
hum_sim_mat[i,j] = hum_avg_ratings[ind]
hum_sim_mat[j,i] = hum_avg_ratings[ind]
ind+=1
else:
hum_sim_mat[i,j] = 6
hum_sim_mat_corr = np.reshape(hum_sim_mat,(18*18))
F =np.genfromtxt(model,delimiter=',')
#F = np.reshape(F, (18,4096))
model_sim = np.dot(F,np.transpose(F))
model_sim_corr = np.reshape(model_sim, (18*18))
deep_corr = (np.corrcoef(hum_sim_mat_corr,model_sim_corr)[0,1])**2
hum_sim_i_j = []
for i in range(hum_sim_mat.shape[0]):
for j in range(i,hum_sim_mat.shape[1]):
hum_sim_i_j.append(hum_sim_mat[i,j])
model_F= []
for i in range(F.shape[0]):
for j in range(i,F.shape[0]):
model_F.append(F[i] * F[j])
model_F_cv= []
for i in range(F.shape[0]):
for j in range(F.shape[0]):
model_F_cv.append(F[i] * F[j])
reg = Ridge(solver='sag', fit_intercept=False)
parameters = {'alpha': [10,100,1000,1e4, 50000, 1e5,1e6]}
search = GridSearchCV(reg, parameters, scoring='neg_mean_squared_error', cv=6)
search.fit(model_F, hum_sim_i_j)
best_reg = search.best_estimator_
#print(best_reg)
a=cross_validate(best_reg,model_F_cv,hum_sim_mat_corr,scoring="r2",cv=6)
PredSimMat = best_reg.predict(model_F_cv)
cor_mat = np.corrcoef(PredSimMat, hum_sim_mat.reshape(18*18))
r = cor_mat[0,1]
r2 = r**2
adap_corr = r2
return deep_corr,adap_corr
if __name__ == "__main__":
model = sys.argv[1]
human_mat = sys.argv[2]
deep_corr, adap_corr = calc_corr(model,human_mat)
print(f'Deep representation : correlation = {round(deep_corr,2)}')
print(f'Adapting representation : correlation = {round(adap_corr,2)}')
| [
"sklearn.model_selection.GridSearchCV",
"pandas.read_csv",
"sklearn.model_selection.cross_validate",
"numpy.corrcoef",
"numpy.zeros",
"numpy.genfromtxt",
"numpy.transpose",
"numpy.reshape",
"sklearn.linear_model.Ridge"
] | [((437, 472), 'pandas.read_csv', 'pd.read_csv', (['human_mat'], {'header': 'None'}), '(human_mat, header=None)\n', (448, 472), True, 'import pandas as pd\n'), ((548, 566), 'numpy.zeros', 'np.zeros', (['(18, 18)'], {}), '((18, 18))\n', (556, 566), True, 'import numpy as np\n'), ((894, 926), 'numpy.reshape', 'np.reshape', (['hum_sim_mat', '(18 * 18)'], {}), '(hum_sim_mat, 18 * 18)\n', (904, 926), True, 'import numpy as np\n'), ((933, 968), 'numpy.genfromtxt', 'np.genfromtxt', (['model'], {'delimiter': '""","""'}), "(model, delimiter=',')\n", (946, 968), True, 'import numpy as np\n'), ((1071, 1101), 'numpy.reshape', 'np.reshape', (['model_sim', '(18 * 18)'], {}), '(model_sim, 18 * 18)\n', (1081, 1101), True, 'import numpy as np\n'), ((1644, 1684), 'sklearn.linear_model.Ridge', 'Ridge', ([], {'solver': '"""sag"""', 'fit_intercept': '(False)'}), "(solver='sag', fit_intercept=False)\n", (1649, 1684), False, 'from sklearn.linear_model import Ridge\n'), ((1760, 1829), 'sklearn.model_selection.GridSearchCV', 'GridSearchCV', (['reg', 'parameters'], {'scoring': '"""neg_mean_squared_error"""', 'cv': '(6)'}), "(reg, parameters, scoring='neg_mean_squared_error', cv=6)\n", (1772, 1829), False, 'from sklearn.model_selection import GridSearchCV\n'), ((1947, 2021), 'sklearn.model_selection.cross_validate', 'cross_validate', (['best_reg', 'model_F_cv', 'hum_sim_mat_corr'], {'scoring': '"""r2"""', 'cv': '(6)'}), "(best_reg, model_F_cv, hum_sim_mat_corr, scoring='r2', cv=6)\n", (1961, 2021), False, 'from sklearn.model_selection import cross_validate\n'), ((1032, 1047), 'numpy.transpose', 'np.transpose', (['F'], {}), '(F)\n', (1044, 1047), True, 'import numpy as np\n'), ((1124, 1169), 'numpy.corrcoef', 'np.corrcoef', (['hum_sim_mat_corr', 'model_sim_corr'], {}), '(hum_sim_mat_corr, model_sim_corr)\n', (1135, 1169), True, 'import numpy as np\n')] |
import numpy as np
from hardware import settings
from functions import *
class procedure(settings, atom):
def __init__(self, simulation=False):
self.simulation = simulation
super().__init__(self.simulation)
#### Procedures ####
def test(self, t=5e-8, ao=10., do=1):
ao_channels = {
"test_ao": ao
}
do_channels = {
"test_do": do
}
dds_channels = {
"cooling_freq": (0 + self.cooling_beatnote)/self.cooling_freq_div ,
"repump_freq": (0 + self.repump_beatnote)/self.repump_freq_div
}
self.update(t, ao_channels, do_channels, dds_channels)
return
def mot(self, t=2, detune=10e6, mot_cooling=5, mot_repump_seed=5, current=5):
ao_channels = {
"mot_cooling": mot_cooling,
"mot_repump_seed": mot_repump_seed,
"coil_sum": current/self.VtoA,
"coil_diff": 0
}
do_channels = {
"mot_cooling_shutter": 1,
"mot_repump_seed_shutter": 1,
"slower_coil": 1,
"coil_top_dir": 1,
"coil_bottom_dir": 1
}
dds_channels = {
"cooling_freq": (detune + self.cooling_beatnote)/self.cooling_freq_div ,
"repump_freq": (0 + self.repump_beatnote)/self.repump_freq_div
}
self.update(t, ao_channels, do_channels, dds_channels)
return
def cmot(self, t=20e-3, detune=30e6, mot_cooling=1, mot_repump_seed=1, current=10):
do_channels = {"slower_coil": 0}
self.update(1e-3, {}, do_channels, {})
self.mot(t=t-1e-3, detune=detune, mot_cooling=mot_cooling, mot_repump_seed=mot_repump_seed, current=current)
return
def pgc(self, t=20e-3, detune=120e6, mot_cooling=0.2, mot_repump_seed=0.1):
self.mot(t=t, detune=detune, mot_cooling=mot_cooling, mot_repump_seed=mot_repump_seed, current=-10)
return
def drsc(self, t=10e-3, detune=251e6, drsc=5, optpump=1, current=-10, bias_x=1, bias_y=1, bias_z=0):
# Degenerate Raman sideband cooling procedure
return
def odt(self, t=2, odt_x_ao=10, odt_y_ao=10, odt_sheet_ao=10, current=0):
ao_channels = {
"drsc": 0,
"optpump": 0,
"coil_sum": current/4,
"coil_diff": 0,
"odt_x_ao": odt_x_ao,
"odt_y_ao": odt_y_ao,
"odt_sheet_ao": odt_sheet_ao
}
do_channels = {
"drsc_shutter": 0,
"optpump_shutter": 0,
"coil_top_dir": 1,
"odt_x_do": 1,
"odt_y_do": 1,
"odt_sheet_do": 1
}
self.update(t, ao_channels, do_channels, {})
return
def evap(self, t=4, odt_x_ao=10, odt_y_ao=10, odt_sheet_ao=10, tau=2):
step = 10000
for i in np.linspace(1e-4, t, num=step):
ao_channels = {
"odt_x_ao": odt_x_ao * np.exp(-1*i/tau),
"odt_y_ao": odt_y_ao * np.exp(-1*i/tau)
}
do_channels = {
"test_do": 1
}
self.update((t-2)/step, ao_channels, do_channels, {})
return
def tof(self, t=20e-3, detune=0):
ao_channels = {
"odt_x_ao": 0,
"odt_y_ao": 0,
"odt_sheet_ao": 0
}
do_channels = {
"himg_shutter": 1,
"odt_x_do": 1,
"odt_y_do": 1,
"odt_sheet_do": 1
}
dds_channels = {
"cooling_freq": (detune + self.cooling_beatnote)/self.cooling_freq_div ,
"repump_freq": (0 + self.repump_beatnote)/self.repump_freq_div
}
self.update(t, ao_channels, do_channels, dds_channels)
return
def insitu(self, detune=0, img=2):
self.tof(t=100e-6, detune=detune, img=img)
return
def abs_img(self, img=2):
if self.simulation:
self.raw_img = np.ones((2, 1024, 1280))
else:
self.update(100e-6, {"img": img}, {"img_trig": 1}, {})
self.update(100e-6, {"img": 0}, {"img_trig": 0}, {})
self.update(20e-3, {"img": img}, {"img_trig": 1}, {})
self.update(20.1e-3, {"img": 0}, {"img_trig": 0}, {})
self.raw_img = np.ones((2, 1024, 1280)) # dummy
return
def end(self, t=0.1):
self.mot(t=t)
return
if __name__ == "__main__":
import timeit
def main():
exp = procedure(simulation=True)
# exp.test()
exp.mot()
exp.cmot()
exp.pgc()
exp.drsc()
exp.odt()
exp.evap()
exp.tof()
exp.abs_img()
exp.run()
return
begin = timeit.default_timer()
main()
print('Time:', timeit.default_timer() - begin, 'sec')
| [
"timeit.default_timer",
"numpy.exp",
"numpy.ones",
"numpy.linspace"
] | [((4770, 4792), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4790, 4792), False, 'import timeit\n'), ((2881, 2913), 'numpy.linspace', 'np.linspace', (['(0.0001)', 't'], {'num': 'step'}), '(0.0001, t, num=step)\n', (2892, 2913), True, 'import numpy as np\n'), ((4002, 4026), 'numpy.ones', 'np.ones', (['(2, 1024, 1280)'], {}), '((2, 1024, 1280))\n', (4009, 4026), True, 'import numpy as np\n'), ((4333, 4357), 'numpy.ones', 'np.ones', (['(2, 1024, 1280)'], {}), '((2, 1024, 1280))\n', (4340, 4357), True, 'import numpy as np\n'), ((4824, 4846), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (4844, 4846), False, 'import timeit\n'), ((2981, 3001), 'numpy.exp', 'np.exp', (['(-1 * i / tau)'], {}), '(-1 * i / tau)\n', (2987, 3001), True, 'import numpy as np\n'), ((3038, 3058), 'numpy.exp', 'np.exp', (['(-1 * i / tau)'], {}), '(-1 * i / tau)\n', (3044, 3058), True, 'import numpy as np\n')] |
from typing import Union, Iterable, Sequence, Callable
import numpy as np
from .closed import prepare as prepare_closed
from .open import prepare as prepare_open
Line = Union[np.ndarray, Iterable[Sequence[float]]]
def _normalize_parameter(t):
if not (0 <= t <= 1):
raise ValueError('The interpolation parameter must be between 0 and 1.')
return t
def _normalize_line(line):
assert len(line) > 0
line = np.asarray(line)
assert line.ndim == 2
# TODO: remove duplicates
return line
def interpolate(start: Line, stop: Line, t: float, closed: bool = True) -> np.ndarray:
"""
Interpolate between ``start`` and ``stop`` according to interpolation parameter ``t``.
Parameters
----------
start: numpy.ndarray
the starting line. Expected shape (N, d),
where N is the number of points and d - their dimensionality.
stop: numpy.ndarray
the ending line. Expected shape (M, d),
where M is the number of points and d - their dimensionality.
t: float
the interpolation parameter. Must be between 0 and 1.
closed: bool
whether the lines are closed
Returns
-------
interpolated_line: numpy.ndarray
Notes
-----
The number of points in each line are not necessarily equal.
In case of closed lines only 2D points are supported.
"""
t = _normalize_parameter(t)
return interpolator(start, stop, closed)(t)
def interpolator(start: Line, stop: Line, closed: bool = True) -> Callable:
"""
Returns a function that interpolates between ``start`` and ``stop`` given an interpolation parameter.
Parameters
----------
start: numpy.ndarray
the starting line. Expected shape (N, d),
where N is the number of points and d - their dimensionality.
stop: numpy.ndarray
the ending line. Expected shape (M, d),
where M is the number of points and d - their dimensionality.
closed: bool
whether the lines are closed
Returns
-------
interpolator: a function that given the interpolation parameter t
returns the interpolated line.
"""
def interpolate_(t: float):
t = _normalize_parameter(t)
return start + delta * t
prepare = prepare_closed if closed else prepare_open
start, stop = prepare(_normalize_line(start), _normalize_line(stop))
delta = stop - start
return interpolate_
| [
"numpy.asarray"
] | [((434, 450), 'numpy.asarray', 'np.asarray', (['line'], {}), '(line)\n', (444, 450), True, 'import numpy as np\n')] |
from functools import partial
import numpy as np
import pyarrow.compute as pc
from vinum.core.functions import (
ConcatFunction,
FunctionType,
)
from vinum.parser.query import SQLOperator
SQL_OPERATOR_FUNCTIONS = {
SQLOperator.NEGATION: (np.negative, FunctionType.NUMPY),
SQLOperator.BINARY_NOT: (lambda x: ~x, FunctionType.NUMPY),
SQLOperator.BINARY_AND: (np.bitwise_and, FunctionType.NUMPY),
SQLOperator.BINARY_OR: (np.bitwise_or, FunctionType.NUMPY),
SQLOperator.BINARY_XOR: (np.bitwise_xor, FunctionType.NUMPY),
# Math operators
SQLOperator.ADDITION: (np.add, FunctionType.NUMPY),
SQLOperator.SUBTRACTION: (np.subtract, FunctionType.NUMPY),
SQLOperator.MULTIPLICATION: (np.multiply, FunctionType.NUMPY),
SQLOperator.DIVISION: (np.divide, FunctionType.NUMPY),
SQLOperator.MODULUS: (np.mod, FunctionType.NUMPY),
# Boolean operators
SQLOperator.AND: (pc.and_, FunctionType.ARROW),
SQLOperator.OR: (pc.or_, FunctionType.ARROW),
SQLOperator.NOT: (pc.invert, FunctionType.ARROW),
SQLOperator.EQUALS: (lambda x, y: x == y, FunctionType.NUMPY),
SQLOperator.NOT_EQUALS: (lambda x, y: x != y, FunctionType.NUMPY),
SQLOperator.GREATER_THAN: (lambda x, y: x > y, FunctionType.NUMPY),
SQLOperator.GREATER_THAN_OR_EQUAL:
(lambda x, y: x >= y, FunctionType.NUMPY),
SQLOperator.LESS_THAN: (lambda x, y: x < y, FunctionType.NUMPY),
SQLOperator.LESS_THAN_OR_EQUAL: (lambda x, y: x <= y, FunctionType.NUMPY),
SQLOperator.IS_NULL: (pc.is_null, FunctionType.ARROW),
SQLOperator.IS_NOT_NULL: (pc.is_valid, FunctionType.ARROW),
SQLOperator.IN: (np.isin, FunctionType.NUMPY),
SQLOperator.NOT_IN: (partial(np.isin, invert=True), FunctionType.NUMPY),
# SQL specific operators
SQLOperator.BETWEEN:
(lambda x, low, high: np.logical_and(x >= low, x <= high),
FunctionType.NUMPY),
SQLOperator.NOT_BETWEEN:
(lambda x, low, high: np.logical_or(x < low, x > high),
FunctionType.NUMPY),
# String operators
SQLOperator.CONCAT: (ConcatFunction, FunctionType.CLASS),
}
BINARY_OPERATORS = {
SQLOperator.ADDITION,
SQLOperator.SUBTRACTION,
SQLOperator.MULTIPLICATION,
SQLOperator.DIVISION,
SQLOperator.MODULUS,
SQLOperator.AND,
SQLOperator.OR,
SQLOperator.EQUALS,
SQLOperator.NOT_EQUALS,
SQLOperator.GREATER_THAN,
SQLOperator.GREATER_THAN_OR_EQUAL,
SQLOperator.LESS_THAN,
SQLOperator.LESS_THAN_OR_EQUAL,
}
| [
"functools.partial",
"numpy.logical_or",
"numpy.logical_and"
] | [((1698, 1727), 'functools.partial', 'partial', (['np.isin'], {'invert': '(True)'}), '(np.isin, invert=True)\n', (1705, 1727), False, 'from functools import partial\n'), ((1835, 1870), 'numpy.logical_and', 'np.logical_and', (['(x >= low)', '(x <= high)'], {}), '(x >= low, x <= high)\n', (1849, 1870), True, 'import numpy as np\n'), ((1961, 1993), 'numpy.logical_or', 'np.logical_or', (['(x < low)', '(x > high)'], {}), '(x < low, x > high)\n', (1974, 1993), True, 'import numpy as np\n')] |
# MIT License - Copyright <NAME> and contributors
# See the LICENSE.md file included in this source code package
"""Benchmarks for entropy estimation."""
import numpy as np
import timeit
setup = """
from ennemi import estimate_entropy
import numpy as np
rng = np.random.default_rng(0)
cov = np.array([
[ 1.0, 0.5, 0.6, -0.2],
[ 0.5, 1.0, 0.7, -0.5],
[ 0.6, 0.7, 2.0, -0.1],
[-0.2, -0.5, -0.1, 0.5]])
data = rng.multivariate_normal([0, 0, 0, 0], cov, size=16000)
"""
bench_1d = "estimate_entropy(data[:N,0], k=3)"
bench_2d = "estimate_entropy(data[:N,:2], k=3, multidim=True)"
bench_4d = "estimate_entropy(data[:N,:], k=3, multidim=True)"
bench_independent = "estimate_entropy(data[:N,:], k=3)"
bench_cond_2d = "estimate_entropy(data[:N,:2], k=3, cond=data[:N,3])"
bench_cond_2x2d = "estimate_entropy(data[:N,:2], k=3, cond=data[:N,2:])"
bench_independent_cond = "estimate_entropy(data[:N,:3], k=3, cond=data[:N,3])"
for (name, bench) in [ ("1D", bench_1d),
("4x1D", bench_independent),
("2D", bench_2d),
("4D", bench_4d),
("Cond 2D", bench_cond_2d),
("Cond 2D+2D", bench_cond_2x2d),
("Cond 3x1D", bench_independent_cond) ]:
for n in [ 1000, 4000, 16000 ]:
res = timeit.repeat(bench, setup, repeat=5, number=1, globals={"N": n})
print(f"{name:>10}, N={n:>5}: min={np.min(res):<6.3} s, mean={np.mean(res):<6.3} s")
| [
"timeit.repeat",
"numpy.mean",
"numpy.min"
] | [((1341, 1406), 'timeit.repeat', 'timeit.repeat', (['bench', 'setup'], {'repeat': '(5)', 'number': '(1)', 'globals': "{'N': n}"}), "(bench, setup, repeat=5, number=1, globals={'N': n})\n", (1354, 1406), False, 'import timeit\n'), ((1450, 1461), 'numpy.min', 'np.min', (['res'], {}), '(res)\n', (1456, 1461), True, 'import numpy as np\n'), ((1477, 1489), 'numpy.mean', 'np.mean', (['res'], {}), '(res)\n', (1484, 1489), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function, unicode_literals
import os
#os.environ['KMP_DUPLICATE_LIB_OK']='True'
from build_model import confusion_matrix, plot_confusion_matrix, plt, load_testdata
import numpy as np
import tensorflow as tf
import argparse
def load_data(dirname):
listfile=os.listdir(dirname)
X = []
Y = []
for file in listfile:
if "_" in file:
continue
wordname=file
textlist=os.listdir(dirname+wordname)
for text in textlist:
if "DS_" in text:
continue
textname=dirname+wordname+"/"+text
with open(textname, mode = 'r') as t:
numbers = [float(num) for num in t.read().split()]
while numbers[0] == 0:
numbers = numbers[1:]
for i in range(len(numbers),4200):
numbers.extend([0.000])
landmark_frame=[]
row=0
for i in range(0,35):
landmark_frame.extend(numbers[row:row+84])
row += 84
landmark_frame=np.array(landmark_frame)
landmark_frame=landmark_frame.reshape(-1,84)
X.append(np.array(landmark_frame))
Y.append(wordname)
X=np.array(X)
Y=np.array(Y)
print(Y)
x_train = X
x_train=np.array(x_train)
return x_train,Y
# Per ottenere ogni etichetta nel file label.txt
def load_label():
with open("label.txt", mode='r') as l:
listfile = [i for i in l.read().split()]
label = {}
count = 1
for l in listfile:
if "_" in l:
continue
label[l] = count
count += 1
return label
def main(output_data_path):
output_dir = output_data_path
print("Caricamento dati")
print("=========================================================")
#x_test,Y =load_data(output_dir)
#print("x_test:",x_test, "Y", Y)
print("Caricamento completato!\n")
print("New Model")
print("=========================================================")
#new_model = tf.keras.models.load_model('modello_rete.h5')
print("Caricamento completato!\n")
print("Etichette")
print("=========================================================")
labels = load_label()
print(labels)
print("Caricamento completato!\n")
print("Predizione")
print("=========================================================")
#xhat = x_test
#yhat = new_model.predict(xhat)
#predictions = np.array([np.argmax(pred) for pred in yhat])
# print(predictions)
print("Rev Labels")
print("=========================================================")
rev_labels = dict(zip(list(labels.values()), list(labels.keys())))
print(rev_labels)
print("Scrittura file")
print("=========================================================")
s = 0
count = 0
txtpath = output_data_path + "result.txt"
# for i in predictions:
# print("true_label: ",Y[s]," === ","predict_label: ",rev_labels[i])
# print("\n")
# if rev_labels[i] == Y[s]:
# count+=1
# s+=1
# print("Numero di entrate dalla media: " + str(count))
x_test, y_test = load_testdata(output_dir)
new_model = tf.keras.models.load_model('modello.h5')
x = x_test
yhat = new_model.predict(x)
print('Costruzione della matrice di confusione')
cfm = confusion_matrix(np.argmax(y_test, axis=1), np.argmax(yhat, axis=1))
np.set_printoptions(precision=2)
plt.figure(figsize=(10,10))
class_names = ['Bombazza', 'Bacio', 'Buono', 'OMG', 'Pazzo', '<NAME>']
class_names = sorted(class_names)
plot_confusion_matrix(cfm, classes=class_names, title='Matrice di confusione', normalize=False)
plt.savefig('/Users/drissouissiakavaleriofoule/Desktop/TESI/matrix2.png')
print('Salvataggio OK')
# print("Accuratezza", accuracy_score(Y, rev_labels))
#print("Precisione", precision_score(np.argmax(Y), np.argmax(yhat, axis=1), average='macro'))
#print("Recall", recall_score(np.argmax(Y), np.argmax(yhat, axis=1), average='micro'))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Predict Sign language with Mediapipe')
parser.add_argument("--output_data_path",help=" ")
args=parser.parse_args()
output_data_path = '/Users/drissouissiakavaleriofoule/Desktop/TESI/PROGETTO/CartellaVideo/outputvideo/Relative/'
main(output_data_path)
| [
"build_model.plt.savefig",
"numpy.set_printoptions",
"tensorflow.keras.models.load_model",
"argparse.ArgumentParser",
"build_model.plt.figure",
"numpy.argmax",
"numpy.array",
"build_model.plot_confusion_matrix",
"build_model.load_testdata",
"os.listdir"
] | [((319, 338), 'os.listdir', 'os.listdir', (['dirname'], {}), '(dirname)\n', (329, 338), False, 'import os\n'), ((1290, 1301), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1298, 1301), True, 'import numpy as np\n'), ((1308, 1319), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (1316, 1319), True, 'import numpy as np\n'), ((1361, 1378), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (1369, 1378), True, 'import numpy as np\n'), ((3279, 3304), 'build_model.load_testdata', 'load_testdata', (['output_dir'], {}), '(output_dir)\n', (3292, 3304), False, 'from build_model import confusion_matrix, plot_confusion_matrix, plt, load_testdata\n'), ((3321, 3361), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""modello.h5"""'], {}), "('modello.h5')\n", (3347, 3361), True, 'import tensorflow as tf\n'), ((3546, 3578), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (3565, 3578), True, 'import numpy as np\n'), ((3584, 3612), 'build_model.plt.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (3594, 3612), False, 'from build_model import confusion_matrix, plot_confusion_matrix, plt, load_testdata\n'), ((3730, 3830), 'build_model.plot_confusion_matrix', 'plot_confusion_matrix', (['cfm'], {'classes': 'class_names', 'title': '"""Matrice di confusione"""', 'normalize': '(False)'}), "(cfm, classes=class_names, title=\n 'Matrice di confusione', normalize=False)\n", (3751, 3830), False, 'from build_model import confusion_matrix, plot_confusion_matrix, plt, load_testdata\n'), ((3830, 3903), 'build_model.plt.savefig', 'plt.savefig', (['"""/Users/drissouissiakavaleriofoule/Desktop/TESI/matrix2.png"""'], {}), "('/Users/drissouissiakavaleriofoule/Desktop/TESI/matrix2.png')\n", (3841, 3903), False, 'from build_model import confusion_matrix, plot_confusion_matrix, plt, load_testdata\n'), ((4219, 4294), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Predict Sign language with Mediapipe"""'}), "(description='Predict Sign language with Mediapipe')\n", (4242, 4294), False, 'import argparse\n'), ((472, 502), 'os.listdir', 'os.listdir', (['(dirname + wordname)'], {}), '(dirname + wordname)\n', (482, 502), False, 'import os\n'), ((3490, 3515), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (3499, 3515), True, 'import numpy as np\n'), ((3517, 3540), 'numpy.argmax', 'np.argmax', (['yhat'], {'axis': '(1)'}), '(yhat, axis=1)\n', (3526, 3540), True, 'import numpy as np\n'), ((1124, 1148), 'numpy.array', 'np.array', (['landmark_frame'], {}), '(landmark_frame)\n', (1132, 1148), True, 'import numpy as np\n'), ((1227, 1251), 'numpy.array', 'np.array', (['landmark_frame'], {}), '(landmark_frame)\n', (1235, 1251), True, 'import numpy as np\n')] |
"""
gunicorn --bind 0.0.0.0:5000 wsgi:app
"""
from flask import Flask, jsonify
from flask_swagger import swagger
from flask import redirect, session, request, json, render_template
from xgboost import XGBClassifier
import pandas as pd
import matplotlib.pyplot as plt
import shap
import pickle
import numpy as np
from joblib import dump, load
from tensorflow import keras
from tensorflow_addons.metrics import F1Score
app = Flask(__name__)
"""
score feature
feature 3 0.067646 totalViewProducts
feature 8 0.062860 totalAddToCartQty
feature 4 0.055818 totalAddToCarts
feature 15 0.034540 productPriceMean
feature 9 0.033196 hourOfDay
feature 0 0.016931 uniqueSearches
feature 1 0.014983 totalSearches
feature 12 0.013691 has_campaign
"""
features = [
{
"id": "total_view_products", "name": "Total view products:",
"min_value": 1, "max_value": 30, "default_value": 1, "step": 1
},
{
"id": "total_add_to_cart_qty", "name": "Total add to cart qty",
"min_value": 0, "max_value": 15, "default_value": 0, "step": 1
},
{
"id": "total_add_to_carts", "name": "Total add to carts",
"min_value": 0, "max_value": 10, "default_value": 0, "step": 1
},
{
"id": "product_price_mean", "name": "Product price mean",
"min_value": 1, "max_value": 50, "default_value": 1, "step": 0.1
},
{
"id": "hour_of_day", "name": "Hour of day",
"min_value": 0, "max_value": 23, "default_value": 0, "step": 1
},
{
"id": "unique_searches", "name": "Unique searches",
"min_value": 0, "max_value": 20, "default_value": 0, "step": 1
},
{
"id": "total_searches", "name": "Total searches",
"min_value": 0, "max_value": 20, "default_value": 0, "step": 1
}
# {
# "id": "has_campaign", "name": "Has Campaign",
# "min_value": 0, "max_value": 1, "default_value": 0, "step": 1
# }
]
@app.route("/spec")
def spec():
return jsonify(swagger(app))
@app.route('/user_conversion')
def user_conversion():
"""
Best pipeline: GaussianNB(ExtraTreesClassifier(XGBClassifier(input_matrix, learning_rate=0.001, max_depth=10, min_child_weight=10, n_estimators=100, n_jobs=1, subsample=0.7500000000000001, verbosity=0), bootstrap=True, criterion=entropy, max_features=0.55, min_samples_leaf=10, min_samples_split=19, n_estimators=100))
"""
img_path = 'static/img/shap_bar_graph.png'
explanability = 'static/img/explanability.png'
feature_importance = 'static/img/feature_importance.png'
scores = 'static/img/scores.png'
events_dist = 'static/img/events_dist.png'
lstm_f1 = 'static/img/lstm_f1.png'
lstm_model_loss = 'static/img/lstm_model_loss.png'
return render_template(
'user_conversion.html', shap_bar_graph=img_path,
explanability=explanability, features=features,
feature_importance=feature_importance, scores=scores,
events_dist=events_dist, lstm_f1=lstm_f1, lstm_model_loss=lstm_model_loss)
@app.route('/user_conversion_lstm_predict')
def user_conversion_lstm_predict():
seq = request.args.get('seq', '000')
seq = str(seq)
seq = seq.zfill(40)
seq_arr = [int(i) for i in list(seq)]
lstm_model_new = keras.models.load_model(
'static/models/lstm/lstm_model.h5')
predictions = lstm_model_new.predict_proba(
np.array(seq_arr).reshape(1, 1, -1))
return app.response_class(
response=json.dumps({
"input": seq_arr,
"predictions": predictions.tolist()[0]
}),
status=200,
mimetype='application/json'
)
@app.route('/user_conversion_predict')
def user_conversion_predict():
inputs = []
for f in features:
if 'price' in f['id'] or 'revenue' in f['id']:
inputs.append(float(request.args.get(f['id'], 0)))
else:
inputs.append(int(request.args.get(f['id'], 0)))
inputs.append(0)
model = load('static/models/ensemble/stacking.joblib')
predictions = np.round(model.predict_proba(
np.array(inputs).reshape(1, -1)), 6)
nonconvert, convert = predictions.tolist()[0]
return app.response_class(
response=json.dumps({
"convert": convert,
"nonconvert": nonconvert,
"input": inputs
}),
status=200,
mimetype='application/json'
)
@app.route('/personalization')
def personalization():
return render_template('personalization.html')
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000, threaded=True)
| [
"tensorflow.keras.models.load_model",
"flask.request.args.get",
"flask.Flask",
"flask.json.dumps",
"numpy.array",
"flask_swagger.swagger",
"flask.render_template",
"joblib.load"
] | [((424, 439), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (429, 439), False, 'from flask import Flask, jsonify\n'), ((2815, 3070), 'flask.render_template', 'render_template', (['"""user_conversion.html"""'], {'shap_bar_graph': 'img_path', 'explanability': 'explanability', 'features': 'features', 'feature_importance': 'feature_importance', 'scores': 'scores', 'events_dist': 'events_dist', 'lstm_f1': 'lstm_f1', 'lstm_model_loss': 'lstm_model_loss'}), "('user_conversion.html', shap_bar_graph=img_path,\n explanability=explanability, features=features, feature_importance=\n feature_importance, scores=scores, events_dist=events_dist, lstm_f1=\n lstm_f1, lstm_model_loss=lstm_model_loss)\n", (2830, 3070), False, 'from flask import redirect, session, request, json, render_template\n'), ((3182, 3212), 'flask.request.args.get', 'request.args.get', (['"""seq"""', '"""000"""'], {}), "('seq', '000')\n", (3198, 3212), False, 'from flask import redirect, session, request, json, render_template\n'), ((3319, 3378), 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['"""static/models/lstm/lstm_model.h5"""'], {}), "('static/models/lstm/lstm_model.h5')\n", (3342, 3378), False, 'from tensorflow import keras\n'), ((4034, 4080), 'joblib.load', 'load', (['"""static/models/ensemble/stacking.joblib"""'], {}), "('static/models/ensemble/stacking.joblib')\n", (4038, 4080), False, 'from joblib import dump, load\n'), ((4524, 4563), 'flask.render_template', 'render_template', (['"""personalization.html"""'], {}), "('personalization.html')\n", (4539, 4563), False, 'from flask import redirect, session, request, json, render_template\n'), ((4606, 4635), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (4621, 4635), False, 'from flask import redirect, session, request, json, render_template\n'), ((2057, 2069), 'flask_swagger.swagger', 'swagger', (['app'], {}), '(app)\n', (2064, 2069), False, 'from flask_swagger import swagger\n'), ((4272, 4347), 'flask.json.dumps', 'json.dumps', (["{'convert': convert, 'nonconvert': nonconvert, 'input': inputs}"], {}), "({'convert': convert, 'nonconvert': nonconvert, 'input': inputs})\n", (4282, 4347), False, 'from flask import redirect, session, request, json, render_template\n'), ((3444, 3461), 'numpy.array', 'np.array', (['seq_arr'], {}), '(seq_arr)\n', (3452, 3461), True, 'import numpy as np\n'), ((3895, 3923), 'flask.request.args.get', 'request.args.get', (["f['id']", '(0)'], {}), "(f['id'], 0)\n", (3911, 3923), False, 'from flask import redirect, session, request, json, render_template\n'), ((3970, 3998), 'flask.request.args.get', 'request.args.get', (["f['id']", '(0)'], {}), "(f['id'], 0)\n", (3986, 3998), False, 'from flask import redirect, session, request, json, render_template\n'), ((4137, 4153), 'numpy.array', 'np.array', (['inputs'], {}), '(inputs)\n', (4145, 4153), True, 'import numpy as np\n')] |
from osgeo import ogr
import os
import numpy as np
from gdalhelpers.functions import create_points_at_angles_distance
PATH_DATA = os.path.join(os.path.dirname(__file__), "..", "tests", "test_data")
PATH_DATA_RESULTS = os.path.join(PATH_DATA, "results")
points = ogr.Open(os.path.join(PATH_DATA, "points.gpkg"))
angles = np.arange(0, 360, step=10).tolist()
new_points = create_points_at_angles_distance(points, angles=angles, distance=25)
ds_points_around = ogr.GetDriverByName("GPKG").CreateDataSource(os.path.join(PATH_DATA_RESULTS, "points_around.gpkg"))
ds_points_around.CopyLayer(new_points.GetLayer(), "points_around", ["OVERWRITE=YES"])
ds_points_around = None | [
"os.path.dirname",
"osgeo.ogr.GetDriverByName",
"gdalhelpers.functions.create_points_at_angles_distance",
"numpy.arange",
"os.path.join"
] | [((219, 253), 'os.path.join', 'os.path.join', (['PATH_DATA', '"""results"""'], {}), "(PATH_DATA, 'results')\n", (231, 253), False, 'import os\n'), ((373, 441), 'gdalhelpers.functions.create_points_at_angles_distance', 'create_points_at_angles_distance', (['points'], {'angles': 'angles', 'distance': '(25)'}), '(points, angles=angles, distance=25)\n', (405, 441), False, 'from gdalhelpers.functions import create_points_at_angles_distance\n'), ((144, 169), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (159, 169), False, 'import os\n'), ((273, 311), 'os.path.join', 'os.path.join', (['PATH_DATA', '"""points.gpkg"""'], {}), "(PATH_DATA, 'points.gpkg')\n", (285, 311), False, 'import os\n'), ((507, 560), 'os.path.join', 'os.path.join', (['PATH_DATA_RESULTS', '"""points_around.gpkg"""'], {}), "(PATH_DATA_RESULTS, 'points_around.gpkg')\n", (519, 560), False, 'import os\n'), ((323, 349), 'numpy.arange', 'np.arange', (['(0)', '(360)'], {'step': '(10)'}), '(0, 360, step=10)\n', (332, 349), True, 'import numpy as np\n'), ((462, 489), 'osgeo.ogr.GetDriverByName', 'ogr.GetDriverByName', (['"""GPKG"""'], {}), "('GPKG')\n", (481, 489), False, 'from osgeo import ogr\n')] |
from multiprocessing import Pool
import numpy as np
import skimage
from . import saliency, utils
try:
import tensorflow as tf
except Exception:
import warnings
warnings.warn("Could not import tensorflow. DeepGaze models will not be runnable.")
class IttyKoch:
""" Python Implementation of the Itty Koch Saliency Model
"""
def __init__(self,
mapwidth = 64,
gabor_wavelength = 3.5,
n_gabor_angles = 4,
center_bias = 1.5,
blur_radius = 0.04,
gabor_gamma = 1,
border_size = 10,
surround_sig = [1, 3],
logtransform = False,
smooting_final = 2,
top_down = 'peakiness',
n_jobs=1):
self.__dict__.update(locals())
def predict(self, img, return_chanmaps = False):
""" Compute and return the saliency map
img : The input image. Should have dimensions (W,H,C)
return_chanmaps : if True, also return the intermediate saliency maps
Returns: saliency map, [channel maps]
"""
print("processing")
batch_mode = (isinstance(img, list))
if not batch_mode: batch_mode = (img.ndim == 4)
if batch_mode:
if self.n_jobs == 1:
return [self.predict(i,return_chanmaps) for i in img]
with Pool(self.n_jobs) as p:
result = p.map(self.predict,img)
return result
if img.ndim == 2:
img = img[:,:,np.newaxis]
# compute saliency maps
maps = saliency.collect_maps(saliency.resize(img, self.mapwidth))
#salmaps = saliency.attenuate_borders(salmaps, self.border_size)
salmaps = np.stack([saliency.saliency(maps[...,i], self.surround_sig)
for i in range(maps.shape[2])], axis=-1)
if self.top_down == 'peakiness':
weights = np.array([saliency.peakiness(salmaps[...,i]) for i in range(salmaps.shape[2])])
else:
weights = np.array([1.0] + [1/24.]*24 + [1.0]*3)
weights /= weights.sum()
final_map = (salmaps*weights).sum(axis=-1)
if self.smooting_final is not None:
final_map = skimage.filters.gaussian(final_map, \
sigma=self.smooting_final, \
truncate=2, mode='reflect')
if self.center_bias is not None:
final_map = saliency.center_bias(final_map, length = self.center_bias)
if self.logtransform:
final_map = -np.log(1 + 1e-5 - final_map)
final_map = utils.minmaxnorm(final_map, axis=(0,1))
# optimize for NSS score by enforcing sharp maxima
#if self.smooting_final is not None:
# final_map = skimage.filters.gaussian(final_map, \
# sigma=2, \
#truncate=2, mode='reflect')
if return_chanmaps:
return final_map, salmaps#, maps
return final_map
class TensorflowModel:
def __init__(self,
batch_size = 10,
check_point = 'DeepGazeII.ckpt'):
self.__dict__.update(locals())
tf.reset_default_graph()
self.new_saver = tf.train.import_meta_graph('{}.meta'.format(check_point))
self.input_tensor = tf.get_collection('input_tensor')[0]
self.centerbias_tensor = tf.get_collection('centerbias_tensor')[0]
self.log_density = tf.get_collection('log_density')[0]
self.log_density_wo_centerbias = tf.get_collection('log_density_wo_centerbias')[0]
def predict(self,X, verbose=False):
""" Compute log probability density
"""
centerbias_data = np.zeros((1, X.shape[1], X.shape[2], 1))
log_density_prediction = np.zeros(X.shape[:3] + (1,))
with tf.Session() as sess:
self.new_saver.restore(sess, self.check_point)
for i in range(0, len(X), self.batch_size):
idc = slice(i, min(i+self.batch_size, len(X)))
bX = X[idc]
log_density_prediction[idc] = sess.run(self.log_density, {
self.input_tensor: bX,
self.centerbias_tensor: centerbias_data,
})
return log_density_prediction
class DeepGazeII(TensorflowModel):
""" Implementation of the Deep Gaze II model
Adapted from https://deepgaze.bethgelab.org/
"""
def __init__(self, *args, **kwargs):
super(self.__class__).__init__(self, *args,
check_point = 'DeepGazeII.ckpt',
**kwargs)
class ICF(TensorflowModel):
""" Implementation of the Deep Gaze II model
Adapted from https://deepgaze.bethgelab.org/
"""
def __init__(self, *args, **kwargs):
super(self.__class__).__init__(self, *args,
check_point = 'ICF.ckpt',
**kwargs)
| [
"numpy.log",
"tensorflow.get_collection",
"tensorflow.reset_default_graph",
"numpy.zeros",
"tensorflow.Session",
"numpy.array",
"multiprocessing.Pool",
"warnings.warn",
"skimage.filters.gaussian"
] | [((174, 262), 'warnings.warn', 'warnings.warn', (['"""Could not import tensorflow. DeepGaze models will not be runnable."""'], {}), "(\n 'Could not import tensorflow. DeepGaze models will not be runnable.')\n", (187, 262), False, 'import warnings\n'), ((3328, 3352), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3350, 3352), True, 'import tensorflow as tf\n'), ((3854, 3894), 'numpy.zeros', 'np.zeros', (['(1, X.shape[1], X.shape[2], 1)'], {}), '((1, X.shape[1], X.shape[2], 1))\n', (3862, 3894), True, 'import numpy as np\n'), ((3928, 3956), 'numpy.zeros', 'np.zeros', (['(X.shape[:3] + (1,))'], {}), '(X.shape[:3] + (1,))\n', (3936, 3956), True, 'import numpy as np\n'), ((2093, 2138), 'numpy.array', 'np.array', (['([1.0] + [1 / 24.0] * 24 + [1.0] * 3)'], {}), '([1.0] + [1 / 24.0] * 24 + [1.0] * 3)\n', (2101, 2138), True, 'import numpy as np\n'), ((2290, 2384), 'skimage.filters.gaussian', 'skimage.filters.gaussian', (['final_map'], {'sigma': 'self.smooting_final', 'truncate': '(2)', 'mode': '"""reflect"""'}), "(final_map, sigma=self.smooting_final, truncate=2,\n mode='reflect')\n", (2314, 2384), False, 'import skimage\n'), ((3465, 3498), 'tensorflow.get_collection', 'tf.get_collection', (['"""input_tensor"""'], {}), "('input_tensor')\n", (3482, 3498), True, 'import tensorflow as tf\n'), ((3535, 3573), 'tensorflow.get_collection', 'tf.get_collection', (['"""centerbias_tensor"""'], {}), "('centerbias_tensor')\n", (3552, 3573), True, 'import tensorflow as tf\n'), ((3604, 3636), 'tensorflow.get_collection', 'tf.get_collection', (['"""log_density"""'], {}), "('log_density')\n", (3621, 3636), True, 'import tensorflow as tf\n'), ((3681, 3727), 'tensorflow.get_collection', 'tf.get_collection', (['"""log_density_wo_centerbias"""'], {}), "('log_density_wo_centerbias')\n", (3698, 3727), True, 'import tensorflow as tf\n'), ((3971, 3983), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3981, 3983), True, 'import tensorflow as tf\n'), ((1422, 1439), 'multiprocessing.Pool', 'Pool', (['self.n_jobs'], {}), '(self.n_jobs)\n', (1426, 1439), False, 'from multiprocessing import Pool\n'), ((2656, 2685), 'numpy.log', 'np.log', (['(1 + 1e-05 - final_map)'], {}), '(1 + 1e-05 - final_map)\n', (2662, 2685), True, 'import numpy as np\n')] |
from deepSI.systems.system import System, System_deriv, System_data
import numpy as np
import jax.numpy as jnp
def f_double_pendulum(state, t=0, m1=1, m2=1, l1=1, l2=1, g=9.8):
t1, t2, w1, w2 = state
a1 = (l2 / l1) * (m2 / (m1 + m2)) * np.cos(t1 - t2)
a2 = (l1 / l2) * np.cos(t1 - t2)
f1 = -(l2 / l1) * (m2 / (m1 + m2)) * (w2**2) * np.sin(t1 - t2) - \
(g / l1) * np.sin(t1)
f2 = (l1 / l2) * (w1**2) * np.sin(t1 - t2) - (g / l2) * np.sin(t2)
g1 = (f1 - a1 * f2) / (1 - a1 * a2)
g2 = (f2 - a2 * f1) / (1 - a1 * a2)
return np.stack([w1, w2, g1, g2])
def normalize_double_pendulum(state):
# wrap generalized coordinates to [-pi, pi]
return jnp.concatenate([(state[:2] + np.pi) % (2 * np.pi) - np.pi, state[2:]])
class DoublePendulum(System_deriv):
def __init__(self, x0, m1=1, m2=1, l1=1, l2=1, g=9.8, dt=0.01):
super(DoublePendulum, self).__init__(dt=dt, nx=4, nu=2, ny=4)
self.x0 = x0
self.x = x0
self.m1 = m1
self.m2 = m2
self.l1 = l1
self.l2 = l2
self.g = g
self.dt = dt
def reset(self):
self.x = self.x0
return self.h(self.x) # return position
def get_state(self):
return self.x
def deriv(self,x,u):
return f_double_pendulum(x, m1=self.m1, m2=self.m2, l1=self.l1, l2=self.l2, g=self.g) + u
def h(self,x):
t1, t2, w1, w2 = normalize_double_pendulum(x)
return t1, t2, w1, w2
def double_pendulum_to_video(system_data: System_data, system: DoublePendulum = None):
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from moviepy.editor import ImageSequenceClip
from functools import partial
import proglog
from PIL import Image
if system is not None:
L1, L2 = system.l1, system.l2
dt = system.dt
else:
L1, L2 = 1,1
dt = 0.04 # conforms 25 fps
def make_plot(i, cart_coords, l1, l2, max_trail=30, trail_segments=20, r=0.05):
# Plot and save an image of the double pendulum configuration for time step i.
plt.cla()
x1, y1, x2, y2 = cart_coords
ax.plot([0, x1[i], x2[i]], [0, y1[i], y2[i]], lw=2, c='k') # rods
c0 = Circle((0, 0), r / 2, fc='k', zorder=10) # anchor point
c1 = Circle((x1[i], y1[i]), r, fc='b', ec='b', zorder=10) # mass 1
c2 = Circle((x2[i], y2[i]), r, fc='r', ec='r', zorder=10) # mass 2
ax.add_patch(c0)
ax.add_patch(c1)
ax.add_patch(c2)
# plot the pendulum trail (ns = number of segments)
s = max_trail // trail_segments
for j in range(trail_segments):
imin = i - (trail_segments - j) * s
if imin < 0: continue
imax = imin + s + 1
alpha = (j / trail_segments) ** 2 # fade the trail into alpha
ax.plot(x2[imin:imax], y2[imin:imax], c='r', solid_capstyle='butt',
lw=2, alpha=alpha)
# Center the image on the fixed anchor point. Make axes equal.
ax.set_xlim(-l1 - l2 - r, l1 + l2 + r)
ax.set_ylim(-l1 - l2 - r, l1 + l2 + r)
ax.set_aspect('equal', adjustable='box')
plt.axis('off')
# plt.savefig('./frames/_img{:04d}.png'.format(i//di), dpi=72)
def radial2cartesian(t1, t2, l1, l2):
# Convert from radial to Cartesian coordinates.
x1 = l1 * np.sin(t1)
y1 = -l1 * np.cos(t1)
x2 = x1 + l2 * np.sin(t2)
y2 = y1 - l2 * np.cos(t2)
return x1, y1, x2, y2
def fig2image(fig):
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
image = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return image
theta1, theta2 = system_data.x[:, 0], system_data.x[:, 1]
cart_coords = radial2cartesian(theta1, theta2, L1, L2)
fig = plt.figure(figsize=(8.3333, 6.25), dpi=72)
ax = fig.add_subplot(111)
import warnings
warnings.filterwarnings("ignore")
images = []
di = 1
N = 300
for i in range(0, N, di):
print("{}/{}".format(i // di, N // di), end='\n' if i // di % 20 == 0 else ' ')
make_plot(i, cart_coords, L1, L2)
images.append(fig2image(fig))
import importlib
importlib.reload(proglog)
proglog.default_bar_logger = partial(proglog.default_bar_logger, None)
return ImageSequenceClip(images, fps=25)
if __name__=='__main__':
from matplotlib import pyplot as plt
sys = DoublePendulum(x0=np.array([3*np.pi/7, 3*np.pi/4, 0, 0], dtype=np.float32))
exp = System_data(u=np.zeros(shape=(3000, 4), dtype=np.float32))
train_data = sys.apply_experiment(exp, save_state=True)
train_data.plot(show=False)
plt.show()
videoclip = double_pendulum_to_video(train_data)
videoclip.write_videofile('double_pendulum.mp4')
| [
"numpy.stack",
"functools.partial",
"matplotlib.pyplot.show",
"warnings.filterwarnings",
"jax.numpy.concatenate",
"numpy.zeros",
"matplotlib.pyplot.axis",
"matplotlib.patches.Circle",
"matplotlib.pyplot.figure",
"importlib.reload",
"matplotlib.pyplot.cla",
"numpy.sin",
"numpy.cos",
"moviep... | [((561, 587), 'numpy.stack', 'np.stack', (['[w1, w2, g1, g2]'], {}), '([w1, w2, g1, g2])\n', (569, 587), True, 'import numpy as np\n'), ((687, 758), 'jax.numpy.concatenate', 'jnp.concatenate', (['[(state[:2] + np.pi) % (2 * np.pi) - np.pi, state[2:]]'], {}), '([(state[:2] + np.pi) % (2 * np.pi) - np.pi, state[2:]])\n', (702, 758), True, 'import jax.numpy as jnp\n'), ((3898, 3940), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8.3333, 6.25)', 'dpi': '(72)'}), '(figsize=(8.3333, 6.25), dpi=72)\n', (3908, 3940), True, 'from matplotlib import pyplot as plt\n'), ((3996, 4029), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (4019, 4029), False, 'import warnings\n'), ((4294, 4319), 'importlib.reload', 'importlib.reload', (['proglog'], {}), '(proglog)\n', (4310, 4319), False, 'import importlib\n'), ((4353, 4394), 'functools.partial', 'partial', (['proglog.default_bar_logger', 'None'], {}), '(proglog.default_bar_logger, None)\n', (4360, 4394), False, 'from functools import partial\n'), ((4406, 4439), 'moviepy.editor.ImageSequenceClip', 'ImageSequenceClip', (['images'], {'fps': '(25)'}), '(images, fps=25)\n', (4423, 4439), False, 'from moviepy.editor import ImageSequenceClip\n'), ((4765, 4775), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4773, 4775), True, 'from matplotlib import pyplot as plt\n'), ((247, 262), 'numpy.cos', 'np.cos', (['(t1 - t2)'], {}), '(t1 - t2)\n', (253, 262), True, 'import numpy as np\n'), ((284, 299), 'numpy.cos', 'np.cos', (['(t1 - t2)'], {}), '(t1 - t2)\n', (290, 299), True, 'import numpy as np\n'), ((2105, 2114), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (2112, 2114), True, 'from matplotlib import pyplot as plt\n'), ((2241, 2281), 'matplotlib.patches.Circle', 'Circle', (['(0, 0)', '(r / 2)'], {'fc': '"""k"""', 'zorder': '(10)'}), "((0, 0), r / 2, fc='k', zorder=10)\n", (2247, 2281), False, 'from matplotlib.patches import Circle\n'), ((2311, 2363), 'matplotlib.patches.Circle', 'Circle', (['(x1[i], y1[i])', 'r'], {'fc': '"""b"""', 'ec': '"""b"""', 'zorder': '(10)'}), "((x1[i], y1[i]), r, fc='b', ec='b', zorder=10)\n", (2317, 2363), False, 'from matplotlib.patches import Circle\n'), ((2387, 2439), 'matplotlib.patches.Circle', 'Circle', (['(x2[i], y2[i])', 'r'], {'fc': '"""r"""', 'ec': '"""r"""', 'zorder': '(10)'}), "((x2[i], y2[i]), r, fc='r', ec='r', zorder=10)\n", (2393, 2439), False, 'from matplotlib.patches import Circle\n'), ((3197, 3212), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3205, 3212), True, 'from matplotlib import pyplot as plt\n'), ((351, 366), 'numpy.sin', 'np.sin', (['(t1 - t2)'], {}), '(t1 - t2)\n', (357, 366), True, 'import numpy as np\n'), ((388, 398), 'numpy.sin', 'np.sin', (['t1'], {}), '(t1)\n', (394, 398), True, 'import numpy as np\n'), ((430, 445), 'numpy.sin', 'np.sin', (['(t1 - t2)'], {}), '(t1 - t2)\n', (436, 445), True, 'import numpy as np\n'), ((459, 469), 'numpy.sin', 'np.sin', (['t2'], {}), '(t2)\n', (465, 469), True, 'import numpy as np\n'), ((3401, 3411), 'numpy.sin', 'np.sin', (['t1'], {}), '(t1)\n', (3407, 3411), True, 'import numpy as np\n'), ((3431, 3441), 'numpy.cos', 'np.cos', (['t1'], {}), '(t1)\n', (3437, 3441), True, 'import numpy as np\n'), ((4538, 4602), 'numpy.array', 'np.array', (['[3 * np.pi / 7, 3 * np.pi / 4, 0, 0]'], {'dtype': 'np.float32'}), '([3 * np.pi / 7, 3 * np.pi / 4, 0, 0], dtype=np.float32)\n', (4546, 4602), True, 'import numpy as np\n'), ((4621, 4664), 'numpy.zeros', 'np.zeros', ([], {'shape': '(3000, 4)', 'dtype': 'np.float32'}), '(shape=(3000, 4), dtype=np.float32)\n', (4629, 4664), True, 'import numpy as np\n'), ((3465, 3475), 'numpy.sin', 'np.sin', (['t2'], {}), '(t2)\n', (3471, 3475), True, 'import numpy as np\n'), ((3499, 3509), 'numpy.cos', 'np.cos', (['t2'], {}), '(t2)\n', (3505, 3509), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from sklearn.svm import LinearSVC
from sklearn.preprocessing import LabelEncoder
train = pd.read_csv("../input/train.csv")
test = pd.read_csv("../input/test.csv")
sample_submission = pd.read_csv("../input/sampleSubmission.csv")
training_labels = LabelEncoder().fit_transform(train['target'])
# SVMs tend to like features that look similar to ~ N(0,1), so let's stabilise
# the long tails
train_features = train.drop('target', axis=1)
train_features[train_features > 4] = 4
model = LinearSVC().fit(train_features, training_labels)
scores = model.decision_function(test)
predictions = 1.0 / (1.0 + np.exp(-scores))
row_sums = predictions.sum(axis=1)
predictions_normalised = predictions / row_sums[:, np.newaxis]
# create submission file
prediction_DF = pd.DataFrame(predictions_normalised, index=sample_submission.id.values, columns=sample_submission.columns[1:])
prediction_DF.to_csv('svc_submission.csv', index_label='id')
| [
"pandas.DataFrame",
"pandas.read_csv",
"sklearn.preprocessing.LabelEncoder",
"numpy.exp",
"sklearn.svm.LinearSVC"
] | [((129, 162), 'pandas.read_csv', 'pd.read_csv', (['"""../input/train.csv"""'], {}), "('../input/train.csv')\n", (140, 162), True, 'import pandas as pd\n'), ((170, 202), 'pandas.read_csv', 'pd.read_csv', (['"""../input/test.csv"""'], {}), "('../input/test.csv')\n", (181, 202), True, 'import pandas as pd\n'), ((223, 267), 'pandas.read_csv', 'pd.read_csv', (['"""../input/sampleSubmission.csv"""'], {}), "('../input/sampleSubmission.csv')\n", (234, 267), True, 'import pandas as pd\n'), ((796, 910), 'pandas.DataFrame', 'pd.DataFrame', (['predictions_normalised'], {'index': 'sample_submission.id.values', 'columns': 'sample_submission.columns[1:]'}), '(predictions_normalised, index=sample_submission.id.values,\n columns=sample_submission.columns[1:])\n', (808, 910), True, 'import pandas as pd\n'), ((286, 300), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (298, 300), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((523, 534), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {}), '()\n', (532, 534), False, 'from sklearn.svm import LinearSVC\n'), ((639, 654), 'numpy.exp', 'np.exp', (['(-scores)'], {}), '(-scores)\n', (645, 654), True, 'import numpy as np\n')] |
#TwoLayerNet
import sys, os,pickle
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
from dataset.mnist import load_mnist
from PIL import Image
def sigmoid(x):
return 1/(1 + np.exp(-x))
def softmax(a):
exp_a = np.exp(a)
sum_a = np.sum(exp_a) # これは、aが大きいと厳しい。
y = exp_a/sum_a
return y
def cross_entropy_error(y,t): #最小二乗誤差
t = np.array(t).reshape(1,t.size).T
delta = 1e-7#10^-7
return -np.sum(t*np.log(y + delta)) #deltaを入れることで、np.log(0) = infを防ぐ.
def numerical_gradient(f,x): #fのxでの偏微分
x = x.astype(float)
h = 1e-4
grad = np.zeros_like(x)
for idx in range(x.size):
tmp_val = x.flatten()[idx]
x.flatten()[idx] = tmp_val + h
fxh1 = f(x) #f(x+h)
x.flatten()[idx] = tmp_val - h
fxh2 = f(x) #f(x-h)
#以上二つで、ある次元のxのみ中心差分を取ってる。
grad.flatten()[idx] = (fxh1 - fxh2) / (2*h)
x.flatten()[idx] = tmp_val
return grad
class TwoLayerNet:
def __init__(self,input_size,hidden_size,output_size,weight_init_std = 0.01):
self.params = {}
self.params["W1"] = weight_init_std * np.random.randn(input_size,hidden_size) #random_randnは(2,3)なら2*3この乱数を作る
self.params["b1"] = np.zeros(hidden_size)
self.params["W2"] = weight_init_std * np.random.randn(hidden_size,output_size)
self.params["b2"] = np.zeros(output_size)
def predict(self,x):
W1,W2 = self.params["W1"],self.params["W2"]
b1,b2 = self.params["b1"],self.params["b2"]
a1 = np.dot(x,W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1,W2) + b2
y = softmax(a2)
return y
def loss (self,x,t):
y = self.predict(x)
return cross_entropy_error(y,t)
def accuracy(self,x,t):
y = self.predict(x)
y= np.argmax(y,axis = 1)
t = np.argmax(t,axis = 1)
accuracy= np.sum(y == t)/float(x.shape[0])
return accuracy
def numerical_gradient(self,x,t):
def loss_W(W):
return self.loss(x,t)
grads = {}
grads["W1"] = numerical_gradient(loss_W,self.params["W1"])
grads["b1"] = numerical_gradient(loss_W,self.params["b1"])
grads["W2"] = numerical_gradient(loss_W,self.params["W2"])
grads["b2"] = numerical_gradient(loss_W,self.params["b2"])
return grads
#ミニバッチ学習
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=False)
train_loss_list = []
#ハイパーパラメータ
iters_num = 10000
train_size = x_train.shape[0]
batch_size = int(input("batch_size="))
learning_rate = 0.1
network = TwoLayerNet(input_size = x_train.shape[1],hidden_size = 50,output_size = 10)
for i in range(iters_num):
batch_mask = np.random.choice(train_size,batch_size) #trainsize = 60000から、100個をランダムで選ぶ
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
grad = network.numerical_gradient(x_batch,t_batch) #微分する。
#パラメータの更新
for key in ("W1","b1","W2","b2"):
network.params[key] -= learning_rate*grad[key]
#学習経過
loss = network.loss(x_batch,t_batch)
train_loss_list.append(loss)
| [
"sys.path.append",
"numpy.zeros_like",
"numpy.sum",
"numpy.log",
"numpy.argmax",
"numpy.random.randn",
"numpy.zeros",
"dataset.mnist.load_mnist",
"numpy.array",
"numpy.exp",
"numpy.random.choice",
"numpy.dot"
] | [((35, 61), 'sys.path.append', 'sys.path.append', (['os.pardir'], {}), '(os.pardir)\n', (50, 61), False, 'import sys, os, pickle\n'), ((2393, 2434), 'dataset.mnist.load_mnist', 'load_mnist', ([], {'flatten': '(True)', 'normalize': '(False)'}), '(flatten=True, normalize=False)\n', (2403, 2434), False, 'from dataset.mnist import load_mnist\n'), ((246, 255), 'numpy.exp', 'np.exp', (['a'], {}), '(a)\n', (252, 255), True, 'import numpy as np\n'), ((268, 281), 'numpy.sum', 'np.sum', (['exp_a'], {}), '(exp_a)\n', (274, 281), True, 'import numpy as np\n'), ((596, 612), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (609, 612), True, 'import numpy as np\n'), ((2707, 2747), 'numpy.random.choice', 'np.random.choice', (['train_size', 'batch_size'], {}), '(train_size, batch_size)\n', (2723, 2747), True, 'import numpy as np\n'), ((1226, 1247), 'numpy.zeros', 'np.zeros', (['hidden_size'], {}), '(hidden_size)\n', (1234, 1247), True, 'import numpy as np\n'), ((1363, 1384), 'numpy.zeros', 'np.zeros', (['output_size'], {}), '(output_size)\n', (1371, 1384), True, 'import numpy as np\n'), ((1807, 1827), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (1816, 1827), True, 'import numpy as np\n'), ((1841, 1861), 'numpy.argmax', 'np.argmax', (['t'], {'axis': '(1)'}), '(t, axis=1)\n', (1850, 1861), True, 'import numpy as np\n'), ((205, 215), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (211, 215), True, 'import numpy as np\n'), ((1126, 1166), 'numpy.random.randn', 'np.random.randn', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (1141, 1166), True, 'import numpy as np\n'), ((1294, 1335), 'numpy.random.randn', 'np.random.randn', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (1309, 1335), True, 'import numpy as np\n'), ((1528, 1541), 'numpy.dot', 'np.dot', (['x', 'W1'], {}), '(x, W1)\n', (1534, 1541), True, 'import numpy as np\n'), ((1584, 1598), 'numpy.dot', 'np.dot', (['z1', 'W2'], {}), '(z1, W2)\n', (1590, 1598), True, 'import numpy as np\n'), ((1882, 1896), 'numpy.sum', 'np.sum', (['(y == t)'], {}), '(y == t)\n', (1888, 1896), True, 'import numpy as np\n'), ((379, 390), 'numpy.array', 'np.array', (['t'], {}), '(t)\n', (387, 390), True, 'import numpy as np\n'), ((455, 472), 'numpy.log', 'np.log', (['(y + delta)'], {}), '(y + delta)\n', (461, 472), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Plots of models over changes in parameters.
Creates plots to be joined by plots_join.sh into PDFs.
"""
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from empirical.util.classdef import Site, Fault, TectType, GMM
from empirical.util.empirical_factory import compute_gmm
from openquake.hazardlib import gsim
site = Site()
fault = Fault()
# IMs to loop through for magnitude and rrup scaling plots
ims = ["PGA", "PGV", 0.1, 0.5, 0.75, 1, 3, 5]
# rrup scaling plots fixed magnitudes
mws = [8, 8.25, 8.5, 8.75, 9, 9.2]
# magnitude scaling plots fixed rrups
rrs = [25, 50, 75, 100, 300, 600]
# set of subduction interface models
gmms_if = {
gsim.parker_2020.ParkerEtAl2020SInter: (
"Parker 2020",
TectType.SUBDUCTION_INTERFACE,
),
gsim.phung_2020.PhungEtAl2020SInter: ("Phung 2020", TectType.SUBDUCTION_INTERFACE),
gsim.chao_2020.ChaoEtAl2020SInter: ("Chao 2020", TectType.SUBDUCTION_INTERFACE),
gsim.hassani_atkinson_2020.HassaniAtkinson2020SInter: (
"Hassani Atkinson 2020",
TectType.SUBDUCTION_INTERFACE,
),
GMM.ZA_06: ("Zhao 2006", TectType.SUBDUCTION_INTERFACE),
GMM.BCH_16: ("BC Hydro 2016", TectType.SUBDUCTION_INTERFACE),
}
# set of subduction slab models
gmms_sl = {
gsim.parker_2020.ParkerEtAl2020SSlab: ("Parker 2020", TectType.SUBDUCTION_SLAB),
gsim.phung_2020.PhungEtAl2020SSlab: ("Phung 2020", TectType.SUBDUCTION_SLAB),
gsim.chao_2020.ChaoEtAl2020SSlab: ("Chao 2020", TectType.SUBDUCTION_SLAB),
gsim.hassani_atkinson_2020.HassaniAtkinson2020SSlab: (
"H<NAME> 2020",
TectType.SUBDUCTION_SLAB,
),
GMM.ZA_06: ("Zhao 2006", TectType.SUBDUCTION_SLAB),
GMM.BCH_16: ("BC Hydro 2016", TectType.SUBDUCTION_SLAB),
}
# control parameters (unchanging)
site.z1p0 = 80
site.fpeak = np.array([12])
site.z2p5 = 245
fault.hdepth = 20
fault.ztor = 4
fault.rake = 30
fault.dip = 45
fault.width = 20
# fixed magnitude, rrup range
for i, im in enumerate(ims):
for m, mag in enumerate(mws):
fault.Mw = mag
if type(im).__name__ in ["int", "float"]:
imt = "SA"
else:
imt = im
for gi, gmms in enumerate([gmms_if, gmms_sl]):
for g in gmms:
if type(g).__name__ == "MetaGSIM":
if imt not in [
x.__name__ for x in g.DEFINED_FOR_INTENSITY_MEASURE_TYPES
]:
# model does not support IM
continue
else:
if imt not in ["SA", "PGA"]:
# empirical engine ones don't have PGV etc...
# need to update as required
continue
fault.tect_type = gmms[g][1]
x = np.logspace(1, 3)
y = []
for rrup in x:
site.Rrup = rrup
site.Rjb = rrup * 0.9
period = None if imt != "SA" else im
# but zhao expects a list?
if g == GMM.ZA_06:
v = compute_gmm(fault, site, g, imt, period=[period])
if imt == "PGA":
# result
v, stdvs = v
else:
# list of result for each period
v, stdvs = v[0]
else:
v, stdvs = compute_gmm(fault, site, g, imt, period=period)
y.append(v[0] if hasattr(v, "__len__") else v)
y = np.array(y)
plt.loglog(x, y, label=gmms[g][0])
plt.fill_between(x, y * np.exp(-stdvs[0]), y * np.exp(stdvs[0]), alpha=0.1)
plt.legend()
plt.xlabel("rrup")
y = imt
if imt == "SA":
y += " " + str(im)
plt.ylabel(y)
plt.title("Mw = " + str(mag))
plt.savefig(f"r{gi}{i}{m}.png")
plt.close()
# fixed rrup, magnitude range
for i, im in enumerate(ims):
for r, rrup in enumerate(rrs):
site.Rrup = rrup
site.Rjb = rrup * 0.9
if type(im).__name__ in ["int", "float"]:
imt = "SA"
else:
imt = im
for gi, gmms in enumerate([gmms_if, gmms_sl]):
for g in gmms:
if type(g).__name__ == "MetaGSIM":
if imt not in [
x.__name__ for x in g.DEFINED_FOR_INTENSITY_MEASURE_TYPES
]:
# model does not support IM
continue
else:
if imt not in ["SA", "PGA"]:
# empirical engine ones don't have PGV etc...
# need to update as required
continue
fault.tect_type = gmms[g][1]
x = np.linspace(6, 9)
y = []
for mag in x:
fault.Mw = mag
period = None if imt != "SA" else im
# but zhao expects a list?
if g == GMM.ZA_06:
v = compute_gmm(fault, site, g, imt, period=[period])
if imt == "PGA":
# result
v, stdvs = v
else:
# list of result for each period
v, stdvs = v[0]
else:
v, stdvs = compute_gmm(fault, site, g, imt, period=period)
y.append(v[0] if hasattr(v, "__len__") else v)
y = np.array(y)
plt.loglog(x, y, label=gmms[g][0])
plt.fill_between(x, y * np.exp(-stdvs[0]), y * np.exp(stdvs[0]), alpha=0.1)
plt.legend()
plt.xlabel("Moment magnitude, Mw")
y = imt
if imt == "SA":
y += " " + str(im)
plt.ylabel(y)
plt.title("Rrup = " + str(rrup) + " km")
plt.savefig(f"m{gi}{i}{r}.png")
plt.close()
# spectra with fixed rrup / magnitude
imt = "SA"
for m, mag in enumerate([8, 9]):
fault.Mw = mag
for r, rrup in enumerate([25, 50, 100]):
site.Rrup = rrup
site.Rjb = rrup * 0.9
for gi, gmms in enumerate([gmms_if, gmms_sl]):
for g in gmms:
if type(g).__name__ == "MetaGSIM":
if imt not in [
x.__name__ for x in g.DEFINED_FOR_INTENSITY_MEASURE_TYPES
]:
# model does not support IM
continue
fault.tect_type = gmms[g][1]
x = np.logspace(-2, 1)
y = []
for period in x:
# but zhao must have a list input
if g == GMM.ZA_06:
v = compute_gmm(fault, site, g, imt, period=[period])
if imt == "PGA":
# result
v, stdvs = v
else:
# list of result for each period
v, stdvs = v[0]
else:
v, stdvs = compute_gmm(fault, site, g, imt, period=period)
y.append(v[0] if hasattr(v, "__len__") else v)
y = np.array(y)
plt.loglog(x, y, label=gmms[g][0])
plt.fill_between(x, y * np.exp(-stdvs[0]), y * np.exp(stdvs[0]), alpha=0.1)
plt.legend()
plt.xlabel("Oscillator period (s)")
y = imt
if imt == "SA":
y += " " + str(im)
plt.ylabel(y)
plt.title("Mw = " + str(mag) + " Rrup = " + str(rrup) + " km")
plt.savefig(f"s{gi}{m}{r}.png")
plt.close()
| [
"matplotlib.pyplot.loglog",
"empirical.util.empirical_factory.compute_gmm",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"numpy.logspace",
"empirical.util.classdef.Site",
"empirical.util.classdef.Fault",
"matplotlib.use",
"numpy.array",
"numpy.exp",
"numpy.linspace",
"matplotlib.pypl... | [((167, 188), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (181, 188), False, 'import matplotlib\n'), ((388, 394), 'empirical.util.classdef.Site', 'Site', ([], {}), '()\n', (392, 394), False, 'from empirical.util.classdef import Site, Fault, TectType, GMM\n'), ((403, 410), 'empirical.util.classdef.Fault', 'Fault', ([], {}), '()\n', (408, 410), False, 'from empirical.util.classdef import Site, Fault, TectType, GMM\n'), ((1865, 1879), 'numpy.array', 'np.array', (['[12]'], {}), '([12])\n', (1873, 1879), True, 'import numpy as np\n'), ((3847, 3859), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3857, 3859), True, 'import matplotlib.pyplot as plt\n'), ((3872, 3890), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""rrup"""'], {}), "('rrup')\n", (3882, 3890), True, 'import matplotlib.pyplot as plt\n'), ((3986, 3999), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y'], {}), '(y)\n', (3996, 3999), True, 'import matplotlib.pyplot as plt\n'), ((4054, 4085), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""r{gi}{i}{m}.png"""'], {}), "(f'r{gi}{i}{m}.png')\n", (4065, 4085), True, 'import matplotlib.pyplot as plt\n'), ((4098, 4109), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4107, 4109), True, 'import matplotlib.pyplot as plt\n'), ((5968, 5980), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5978, 5980), True, 'import matplotlib.pyplot as plt\n'), ((5993, 6027), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Moment magnitude, Mw"""'], {}), "('Moment magnitude, Mw')\n", (6003, 6027), True, 'import matplotlib.pyplot as plt\n'), ((6123, 6136), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y'], {}), '(y)\n', (6133, 6136), True, 'import matplotlib.pyplot as plt\n'), ((6202, 6233), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""m{gi}{i}{r}.png"""'], {}), "(f'm{gi}{i}{r}.png')\n", (6213, 6233), True, 'import matplotlib.pyplot as plt\n'), ((6246, 6257), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6255, 6257), True, 'import matplotlib.pyplot as plt\n'), ((7752, 7764), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7762, 7764), True, 'import matplotlib.pyplot as plt\n'), ((7777, 7812), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Oscillator period (s)"""'], {}), "('Oscillator period (s)')\n", (7787, 7812), True, 'import matplotlib.pyplot as plt\n'), ((7908, 7921), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['y'], {}), '(y)\n', (7918, 7921), True, 'import matplotlib.pyplot as plt\n'), ((8010, 8041), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""s{gi}{m}{r}.png"""'], {}), "(f's{gi}{m}{r}.png')\n", (8021, 8041), True, 'import matplotlib.pyplot as plt\n'), ((8054, 8065), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8063, 8065), True, 'import matplotlib.pyplot as plt\n'), ((2853, 2870), 'numpy.logspace', 'np.logspace', (['(1)', '(3)'], {}), '(1, 3)\n', (2864, 2870), True, 'import numpy as np\n'), ((3680, 3691), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (3688, 3691), True, 'import numpy as np\n'), ((3708, 3742), 'matplotlib.pyplot.loglog', 'plt.loglog', (['x', 'y'], {'label': 'gmms[g][0]'}), '(x, y, label=gmms[g][0])\n', (3718, 3742), True, 'import matplotlib.pyplot as plt\n'), ((5019, 5036), 'numpy.linspace', 'np.linspace', (['(6)', '(9)'], {}), '(6, 9)\n', (5030, 5036), True, 'import numpy as np\n'), ((5801, 5812), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (5809, 5812), True, 'import numpy as np\n'), ((5829, 5863), 'matplotlib.pyplot.loglog', 'plt.loglog', (['x', 'y'], {'label': 'gmms[g][0]'}), '(x, y, label=gmms[g][0])\n', (5839, 5863), True, 'import matplotlib.pyplot as plt\n'), ((6884, 6902), 'numpy.logspace', 'np.logspace', (['(-2)', '(1)'], {}), '(-2, 1)\n', (6895, 6902), True, 'import numpy as np\n'), ((7585, 7596), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (7593, 7596), True, 'import numpy as np\n'), ((7613, 7647), 'matplotlib.pyplot.loglog', 'plt.loglog', (['x', 'y'], {'label': 'gmms[g][0]'}), '(x, y, label=gmms[g][0])\n', (7623, 7647), True, 'import matplotlib.pyplot as plt\n'), ((3175, 3224), 'empirical.util.empirical_factory.compute_gmm', 'compute_gmm', (['fault', 'site', 'g', 'imt'], {'period': '[period]'}), '(fault, site, g, imt, period=[period])\n', (3186, 3224), False, 'from empirical.util.empirical_factory import compute_gmm\n'), ((3540, 3587), 'empirical.util.empirical_factory.compute_gmm', 'compute_gmm', (['fault', 'site', 'g', 'imt'], {'period': 'period'}), '(fault, site, g, imt, period=period)\n', (3551, 3587), False, 'from empirical.util.empirical_factory import compute_gmm\n'), ((3783, 3800), 'numpy.exp', 'np.exp', (['(-stdvs[0])'], {}), '(-stdvs[0])\n', (3789, 3800), True, 'import numpy as np\n'), ((3806, 3822), 'numpy.exp', 'np.exp', (['stdvs[0]'], {}), '(stdvs[0])\n', (3812, 3822), True, 'import numpy as np\n'), ((5296, 5345), 'empirical.util.empirical_factory.compute_gmm', 'compute_gmm', (['fault', 'site', 'g', 'imt'], {'period': '[period]'}), '(fault, site, g, imt, period=[period])\n', (5307, 5345), False, 'from empirical.util.empirical_factory import compute_gmm\n'), ((5661, 5708), 'empirical.util.empirical_factory.compute_gmm', 'compute_gmm', (['fault', 'site', 'g', 'imt'], {'period': 'period'}), '(fault, site, g, imt, period=period)\n', (5672, 5708), False, 'from empirical.util.empirical_factory import compute_gmm\n'), ((5904, 5921), 'numpy.exp', 'np.exp', (['(-stdvs[0])'], {}), '(-stdvs[0])\n', (5910, 5921), True, 'import numpy as np\n'), ((5927, 5943), 'numpy.exp', 'np.exp', (['stdvs[0]'], {}), '(stdvs[0])\n', (5933, 5943), True, 'import numpy as np\n'), ((7080, 7129), 'empirical.util.empirical_factory.compute_gmm', 'compute_gmm', (['fault', 'site', 'g', 'imt'], {'period': '[period]'}), '(fault, site, g, imt, period=[period])\n', (7091, 7129), False, 'from empirical.util.empirical_factory import compute_gmm\n'), ((7445, 7492), 'empirical.util.empirical_factory.compute_gmm', 'compute_gmm', (['fault', 'site', 'g', 'imt'], {'period': 'period'}), '(fault, site, g, imt, period=period)\n', (7456, 7492), False, 'from empirical.util.empirical_factory import compute_gmm\n'), ((7688, 7705), 'numpy.exp', 'np.exp', (['(-stdvs[0])'], {}), '(-stdvs[0])\n', (7694, 7705), True, 'import numpy as np\n'), ((7711, 7727), 'numpy.exp', 'np.exp', (['stdvs[0]'], {}), '(stdvs[0])\n', (7717, 7727), True, 'import numpy as np\n')] |
from pathlib import Path
from unittest import TestCase
import numpy as np
from dicom_parser.image import Image
from dicom_parser.series import Series
from tests.fixtures import (
SERIES_SPATIAL_RESOLUTION,
TEST_IMAGE_PATH,
TEST_RSFMRI_SERIES_PATH,
TEST_RSFMRI_SERIES_PIXEL_ARRAY,
TEST_SERIES_PATH,
TEST_UTILS_DIRECTORY,
)
class SeriesTestCase(TestCase):
def setUp(self):
self.localizer = Series(TEST_SERIES_PATH)
def test_initialization_with_string_path(self):
series = Series(TEST_SERIES_PATH)
self.assertIsInstance(series, Series)
self.assertIsInstance(series.path, Path)
self.assertIsInstance(series.images, tuple)
def test_initialization_with_pathlib_path(self):
series = Series(Path(TEST_SERIES_PATH))
self.assertIsInstance(series, Series)
self.assertIsInstance(series.path, Path)
self.assertIsInstance(series.images, tuple)
def test_initialization_with_file_path_raises_value_error(self):
with self.assertRaises(ValueError):
Series(TEST_IMAGE_PATH)
def test_initialization_with_invalid_path_raises_value_error(self):
with self.assertRaises(ValueError):
Series("/some/invalid_path/at/nowhere")
def test_initialization_with_no_dcms_in_path_raises_file_not_found_error(
self,
):
with self.assertRaises(FileNotFoundError):
Series(TEST_UTILS_DIRECTORY)
def test_get_images_got_correct_number_of_images(self):
series = Series(TEST_SERIES_PATH)
self.assertEqual(len(series.images), 11)
def test_images_are_ordered_by_instance_number(self):
series = Series(TEST_SERIES_PATH)
instance_numbers = tuple(
[image.header.get("InstanceNumber") for image in series.images]
)
expected = tuple(range(1, 12))
self.assertTupleEqual(instance_numbers, expected)
def test_data_property(self):
series = Series(TEST_SERIES_PATH)
self.assertIsInstance(series.data, np.ndarray)
self.assertTupleEqual(series.data.shape, (512, 512, 11))
def test_mosaic_series_returns_as_4d(self):
series = Series(TEST_RSFMRI_SERIES_PATH)
data = series.data
expected_shape = 96, 96, 64, 3
self.assertTupleEqual(data.shape, expected_shape)
def test_mosaic_series_data_same_as_nifti(self):
series = Series(TEST_RSFMRI_SERIES_PATH)
nii_data = np.load(TEST_RSFMRI_SERIES_PIXEL_ARRAY)
self.assertTrue(np.array_equal(series.data, nii_data))
def test_len(self):
rsfmri = Series(TEST_RSFMRI_SERIES_PATH)
self.assertEqual(len(self.localizer), 11)
self.assertEqual(len(rsfmri), 3)
def test_get_method_with_single_value_keyword(self):
result = self.localizer.get("EchoTime")
expected = 3.04
self.assertEqual(result, expected)
def test_get_method_with_single_value_tuple(self):
result = self.localizer.get(("0018", "0080"))
expected = 7.6
self.assertEqual(result, expected)
def test_get_method_with_multiple_values_keyword(self):
result = self.localizer.get("InstanceNumber")
expected = list(range(1, 12))
self.assertListEqual(result, expected)
def test_get_method_with_multiple_values_tuple(self):
result = self.localizer.get(("0008", "0018"))
expected = [
"1.3.12.2.1107.5.2.43.66024.2018012410454373581200543",
"1.3.12.2.1107.5.2.43.66024.201801241046013643300561",
"1.3.12.2.1107.5.2.43.66024.2018012410454348504800541",
"1.3.12.2.1107.5.2.43.66024.2018012410460458489400565",
"1.3.12.2.1107.5.2.43.66024.2018012410454687305600545",
"1.3.12.2.1107.5.2.43.66024.2018012410460815771100569",
"1.3.12.2.1107.5.2.43.66024.2018012410455043640800549",
"1.3.12.2.1107.5.2.43.66024.2018012410461190213200575",
"1.3.12.2.1107.5.2.43.66024.2018012410455394762200553",
"1.3.12.2.1107.5.2.43.66024.2018012410461517027500577",
"1.3.12.2.1107.5.2.43.66024.2018012410455755378100557",
]
self.assertListEqual(result, expected)
def test_get_method_with_missing_keyword(self):
result = self.localizer.get("MissingKey")
self.assertIsNone(result)
def test_get_method_with_missing_keyword_and_default(self):
result = self.localizer.get("MissingKey", "default_value")
expected = "default_value"
self.assertEqual(result, expected)
def test_indexing_operator_with_string(self):
result = self.localizer["EchoTime"]
expected = 3.04
self.assertEqual(result, expected)
def test_indexing_operator_with_tag_and_multiple_values(self):
result = self.localizer[("0020", "0013")]
expected = list(range(1, 12))
self.assertListEqual(result, expected)
def test_indexing_operator_with_int_returns_image_instance(self):
result = self.localizer[3]
self.assertIsInstance(result, Image)
def test_indexing_operator_with_int_returns_correct_instance(self):
result = self.localizer[3].header.get("InstanceNumber")
self.assertEqual(result, 4)
def test_indexing_operator_with_slice_returns_multiple_images(self):
result = self.localizer[3:6]
self.assertIsInstance(result, tuple)
self.assertEqual(len(result), 3)
def test_indexing_operator_with_invalid_key_raises_key_error(self):
with self.assertRaises(KeyError):
self.localizer["MissingKey"]
def test_indexing_operator_with_invalid_type_raises_type_error(self):
invalid_types = True, 4.20, b"bytes", [1, 2, 3]
for value_type in invalid_types:
with self.assertRaises(TypeError):
self.localizer[value_type]
def test_get_spatial_resolution(self):
series = Series(TEST_SERIES_PATH)
value = series.get_spatial_resolution()
self.assertTupleEqual(value, SERIES_SPATIAL_RESOLUTION)
def test_get_spatial_resolution_without_slice_thickness(self):
series = Series(TEST_SERIES_PATH)
del series[0].header.raw.SliceThickness
value = series.get_spatial_resolution()
expected = SERIES_SPATIAL_RESOLUTION[:-1]
self.assertTupleEqual(value, expected)
def test_spatial_resolution(self):
series = Series(TEST_SERIES_PATH)
value = series.spatial_resolution
self.assertTupleEqual(value, SERIES_SPATIAL_RESOLUTION)
| [
"numpy.array_equal",
"dicom_parser.series.Series",
"numpy.load",
"pathlib.Path"
] | [((428, 452), 'dicom_parser.series.Series', 'Series', (['TEST_SERIES_PATH'], {}), '(TEST_SERIES_PATH)\n', (434, 452), False, 'from dicom_parser.series import Series\n'), ((523, 547), 'dicom_parser.series.Series', 'Series', (['TEST_SERIES_PATH'], {}), '(TEST_SERIES_PATH)\n', (529, 547), False, 'from dicom_parser.series import Series\n'), ((1533, 1557), 'dicom_parser.series.Series', 'Series', (['TEST_SERIES_PATH'], {}), '(TEST_SERIES_PATH)\n', (1539, 1557), False, 'from dicom_parser.series import Series\n'), ((1683, 1707), 'dicom_parser.series.Series', 'Series', (['TEST_SERIES_PATH'], {}), '(TEST_SERIES_PATH)\n', (1689, 1707), False, 'from dicom_parser.series import Series\n'), ((1977, 2001), 'dicom_parser.series.Series', 'Series', (['TEST_SERIES_PATH'], {}), '(TEST_SERIES_PATH)\n', (1983, 2001), False, 'from dicom_parser.series import Series\n'), ((2188, 2219), 'dicom_parser.series.Series', 'Series', (['TEST_RSFMRI_SERIES_PATH'], {}), '(TEST_RSFMRI_SERIES_PATH)\n', (2194, 2219), False, 'from dicom_parser.series import Series\n'), ((2415, 2446), 'dicom_parser.series.Series', 'Series', (['TEST_RSFMRI_SERIES_PATH'], {}), '(TEST_RSFMRI_SERIES_PATH)\n', (2421, 2446), False, 'from dicom_parser.series import Series\n'), ((2466, 2505), 'numpy.load', 'np.load', (['TEST_RSFMRI_SERIES_PIXEL_ARRAY'], {}), '(TEST_RSFMRI_SERIES_PIXEL_ARRAY)\n', (2473, 2505), True, 'import numpy as np\n'), ((2611, 2642), 'dicom_parser.series.Series', 'Series', (['TEST_RSFMRI_SERIES_PATH'], {}), '(TEST_RSFMRI_SERIES_PATH)\n', (2617, 2642), False, 'from dicom_parser.series import Series\n'), ((5933, 5957), 'dicom_parser.series.Series', 'Series', (['TEST_SERIES_PATH'], {}), '(TEST_SERIES_PATH)\n', (5939, 5957), False, 'from dicom_parser.series import Series\n'), ((6155, 6179), 'dicom_parser.series.Series', 'Series', (['TEST_SERIES_PATH'], {}), '(TEST_SERIES_PATH)\n', (6161, 6179), False, 'from dicom_parser.series import Series\n'), ((6430, 6454), 'dicom_parser.series.Series', 'Series', (['TEST_SERIES_PATH'], {}), '(TEST_SERIES_PATH)\n', (6436, 6454), False, 'from dicom_parser.series import Series\n'), ((773, 795), 'pathlib.Path', 'Path', (['TEST_SERIES_PATH'], {}), '(TEST_SERIES_PATH)\n', (777, 795), False, 'from pathlib import Path\n'), ((1070, 1093), 'dicom_parser.series.Series', 'Series', (['TEST_IMAGE_PATH'], {}), '(TEST_IMAGE_PATH)\n', (1076, 1093), False, 'from dicom_parser.series import Series\n'), ((1223, 1262), 'dicom_parser.series.Series', 'Series', (['"""/some/invalid_path/at/nowhere"""'], {}), "('/some/invalid_path/at/nowhere')\n", (1229, 1262), False, 'from dicom_parser.series import Series\n'), ((1426, 1454), 'dicom_parser.series.Series', 'Series', (['TEST_UTILS_DIRECTORY'], {}), '(TEST_UTILS_DIRECTORY)\n', (1432, 1454), False, 'from dicom_parser.series import Series\n'), ((2530, 2567), 'numpy.array_equal', 'np.array_equal', (['series.data', 'nii_data'], {}), '(series.data, nii_data)\n', (2544, 2567), True, 'import numpy as np\n')] |
"""
Extracts features for the ImageNet dataset provided by torchvision using the
pre-trained resnet specified in `resnet.py`
"""
import logging
import os
import argparse
import numpy as np
from torchvision import transforms
from imagenet_dataset import ImageNet
from resnet import resnet50
import torch
from torch.nn import DataParallel
from torch.utils.data import DataLoader, Dataset
import time
import pickle
import subprocess
import pdb
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
parser = argparse.ArgumentParser(
description='Apply pretrained network on ImageNet dataset')
parser.add_argument('output_dir', type=str,
help='directory where datasets are located')
parser.add_argument('--batch_size', type=int, default=1000, metavar='N',
help='input batch size for training')
parser.add_argument('--data_dir', default='data/imagenet', type=str,
help='directory where datasets are located')
parser.add_argument('--model', default='resnet50', type=str,
help='name of the model')
parser.add_argument('--num_workers', type=int, default=30,
help='Number of workers for data loading')
parser.add_argument('--device', type=str, default='cuda',
choices=('cpu', 'cuda'),
help='Where to do the computation')
parser.add_argument('--overwrite', action='store_true', default=False,
help='Whether to overwrite existing output files')
# parse args, etc.
args = parser.parse_args()
batch_size = args.batch_size
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s | %(message)s",
handlers=[
logging.FileHandler(os.path.join(args.output_dir,
'feature_extraction.log')),
logging.StreamHandler()
])
logger = logging.getLogger()
logging.info('ImageNet feature extraction')
logging.info('Args: %s', args)
hash_cmd = subprocess.run('git rev-parse --short HEAD', shell=True, check=True,
stdout=subprocess.PIPE)
git_hash = hash_cmd.stdout.decode('utf-8').strip()
logging.info(f'Git commit: {git_hash}')
# get model
if args.model == 'resnet50':
model = resnet50(pretrained=True).to(args.device)
else:
raise ValueError('Unkown model %s' % args.model)
if args.device == 'cuda':
model = DataParallel(model)
# get input transform
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
def extract_features(split='val'):
# set output file names
output_path = os.path.join(args.output_dir, split + '.pt')
logging.info(f'Saving {split} set features to {output_path}')
if not args.overwrite and os.path.exists(output_path):
logging.info('Output file exists, skipping!')
return
# create dataset and dataloader objects
dataset = ImageNet(args.data_dir, split=split, transform=transform)
data_loader = DataLoader(dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers, pin_memory=True)
# run validations loop
model.eval()
features = []
predictions = []
count = correct = 0
start_time = time.time()
for i, (x, y) in enumerate(data_loader):
x, y = x.to(args.device), y.to(args.device)
with torch.no_grad():
scores, batch_features = model(x)
batch_predictions = scores.argmax(1)
batch_correct = (batch_predictions == y).float().sum()
count += scores.shape[0]
correct += batch_correct.item()
features.append(batch_features.cpu().numpy())
predictions.append(batch_predictions.cpu().numpy())
elapsed = time.time() - start_time
logging.info('Processed %d/%d images (%4.1f%%), %-4.0f images/sec, '
'Top-1 accuracy = %.3g%%' %
(count, len(dataset), 100 * count / len(dataset),
scores.shape[0] / elapsed, 100 * correct / count))
start_time = time.time()
# save output to file
logging.info('Saving features to file')
features = np.concatenate(features, axis=0)
predictions = np.concatenate(predictions, axis=0)
torch.save(dict(features=torch.Tensor(features),
predictions=torch.Tensor(predictions),
targets=torch.LongTensor(dataset.targets)),
output_path)
extract_features('val')
extract_features('train')
| [
"argparse.ArgumentParser",
"imagenet_dataset.ImageNet",
"torchvision.transforms.Normalize",
"torch.no_grad",
"os.path.join",
"torch.utils.data.DataLoader",
"os.path.exists",
"torch.Tensor",
"torchvision.transforms.CenterCrop",
"logging.StreamHandler",
"numpy.concatenate",
"torchvision.transfor... | [((521, 609), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Apply pretrained network on ImageNet dataset"""'}), "(description=\n 'Apply pretrained network on ImageNet dataset')\n", (544, 609), False, 'import argparse\n'), ((1942, 1961), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1959, 1961), False, 'import logging\n'), ((1963, 2006), 'logging.info', 'logging.info', (['"""ImageNet feature extraction"""'], {}), "('ImageNet feature extraction')\n", (1975, 2006), False, 'import logging\n'), ((2007, 2037), 'logging.info', 'logging.info', (['"""Args: %s"""', 'args'], {}), "('Args: %s', args)\n", (2019, 2037), False, 'import logging\n'), ((2050, 2147), 'subprocess.run', 'subprocess.run', (['"""git rev-parse --short HEAD"""'], {'shell': '(True)', 'check': '(True)', 'stdout': 'subprocess.PIPE'}), "('git rev-parse --short HEAD', shell=True, check=True, stdout\n =subprocess.PIPE)\n", (2064, 2147), False, 'import subprocess\n'), ((2198, 2237), 'logging.info', 'logging.info', (['f"""Git commit: {git_hash}"""'], {}), "(f'Git commit: {git_hash}')\n", (2210, 2237), False, 'import logging\n'), ((1600, 1631), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (1614, 1631), False, 'import os\n'), ((1637, 1665), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {}), '(args.output_dir)\n', (1648, 1665), False, 'import os\n'), ((2431, 2450), 'torch.nn.DataParallel', 'DataParallel', (['model'], {}), '(model)\n', (2443, 2450), False, 'from torch.nn import DataParallel\n'), ((2804, 2848), 'os.path.join', 'os.path.join', (['args.output_dir', "(split + '.pt')"], {}), "(args.output_dir, split + '.pt')\n", (2816, 2848), False, 'import os\n'), ((2853, 2914), 'logging.info', 'logging.info', (['f"""Saving {split} set features to {output_path}"""'], {}), "(f'Saving {split} set features to {output_path}')\n", (2865, 2914), False, 'import logging\n'), ((3102, 3159), 'imagenet_dataset.ImageNet', 'ImageNet', (['args.data_dir'], {'split': 'split', 'transform': 'transform'}), '(args.data_dir, split=split, transform=transform)\n', (3110, 3159), False, 'from imagenet_dataset import ImageNet\n'), ((3178, 3292), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': 'args.num_workers', 'pin_memory': '(True)'}), '(dataset, batch_size=args.batch_size, shuffle=False, num_workers=\n args.num_workers, pin_memory=True)\n', (3188, 3292), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((3471, 3482), 'time.time', 'time.time', ([], {}), '()\n', (3480, 3482), False, 'import time\n'), ((4339, 4378), 'logging.info', 'logging.info', (['"""Saving features to file"""'], {}), "('Saving features to file')\n", (4351, 4378), False, 'import logging\n'), ((4394, 4426), 'numpy.concatenate', 'np.concatenate', (['features'], {'axis': '(0)'}), '(features, axis=0)\n', (4408, 4426), True, 'import numpy as np\n'), ((4445, 4480), 'numpy.concatenate', 'np.concatenate', (['predictions'], {'axis': '(0)'}), '(predictions, axis=0)\n', (4459, 4480), True, 'import numpy as np\n'), ((2515, 2537), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256)'], {}), '(256)\n', (2532, 2537), False, 'from torchvision import transforms\n'), ((2547, 2573), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (2568, 2573), False, 'from torchvision import transforms\n'), ((2583, 2604), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2602, 2604), False, 'from torchvision import transforms\n'), ((2614, 2689), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (2634, 2689), False, 'from torchvision import transforms\n'), ((2945, 2972), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (2959, 2972), False, 'import os\n'), ((2982, 3027), 'logging.info', 'logging.info', (['"""Output file exists, skipping!"""'], {}), "('Output file exists, skipping!')\n", (2994, 3027), False, 'import logging\n'), ((4296, 4307), 'time.time', 'time.time', ([], {}), '()\n', (4305, 4307), False, 'import time\n'), ((1902, 1925), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1923, 1925), False, 'import logging\n'), ((2292, 2317), 'resnet.resnet50', 'resnet50', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (2300, 2317), False, 'from resnet import resnet50\n'), ((3593, 3608), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3606, 3608), False, 'import torch\n'), ((3979, 3990), 'time.time', 'time.time', ([], {}), '()\n', (3988, 3990), False, 'import time\n'), ((1795, 1850), 'os.path.join', 'os.path.join', (['args.output_dir', '"""feature_extraction.log"""'], {}), "(args.output_dir, 'feature_extraction.log')\n", (1807, 1850), False, 'import os\n'), ((4510, 4532), 'torch.Tensor', 'torch.Tensor', (['features'], {}), '(features)\n', (4522, 4532), False, 'import torch\n'), ((4566, 4591), 'torch.Tensor', 'torch.Tensor', (['predictions'], {}), '(predictions)\n', (4578, 4591), False, 'import torch\n'), ((4621, 4654), 'torch.LongTensor', 'torch.LongTensor', (['dataset.targets'], {}), '(dataset.targets)\n', (4637, 4654), False, 'import torch\n')] |
import numpy as np
from tqdm import tqdm
def get_pixel_value(img, c_pixel):
'''c_pixel: 1 -> 4 mittlersten Pixel; 2-> 16 innersten pixel; 3 -> 36 innersten pixel
Berechnung: c_pixel^2 * 4 oder (2 * c_pixel) ^ 2
bei 1 -> 4 = (64-2*1) / 2 = 31 for i, j = 2*1
bei 2 -> 16 = (64-2*2) / 2 = 30 for i, j = 2*2
bei 3 -> 36 = (64-2*3) / 2 = 29 for i, j = 2*3
'''
img_dim = len(img)
assert c_pixel > 0
assert c_pixel <= img_dim / 2
assert img_dim % 2 == 0
assert img.shape == (img_dim, img_dim)
if c_pixel == img_dim / 2:
img_mean = img.mean()
return img_mean
dimension = c_pixel * 2
values = []
base_index = int((img_dim - (2*c_pixel)) / 2)
for i in range(dimension):
for j in range(dimension):
values.append(img[base_index + i][base_index + j])
assert len(values) == c_pixel * c_pixel * 4
values_array = np.asarray(values)
mean = values_array.mean()
return mean | [
"numpy.asarray"
] | [((936, 954), 'numpy.asarray', 'np.asarray', (['values'], {}), '(values)\n', (946, 954), True, 'import numpy as np\n')] |
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
try:
import pyproxima2 as proxima
except ImportError: # pragma: no cover
proxima = None
from ... import tensor as mt
available_numpy_dtypes = [
np.dtype(np.float16),
np.dtype(np.float32),
np.dtype(np.int8),
np.dtype(np.int16),
]
if proxima:
_proxima_types = [
proxima.IndexMeta.FT_FP16,
proxima.IndexMeta.FT_FP32,
proxima.IndexMeta.FT_INT8,
proxima.IndexMeta.FT_INT16,
]
assert len(_proxima_types) == len(available_numpy_dtypes)
_type_mapping = {numpy_dtype: proxima_type
for numpy_dtype, proxima_type
in zip(available_numpy_dtypes, _proxima_types)}
def validate_tensor(tensor):
if hasattr(tensor, 'to_tensor'):
tensor = tensor.to_tensor()
else:
tensor = mt.tensor(tensor)
if tensor.ndim != 2:
raise ValueError('Input tensor should be 2-d')
return tensor
def get_proxima_type(np_dtype):
try:
return _type_mapping[np_dtype]
except KeyError:
raise TypeError(f"Does not support {np_dtype}, available types include "
f"{', '.join(t.name for t in _type_mapping)}")
| [
"numpy.dtype"
] | [((776, 796), 'numpy.dtype', 'np.dtype', (['np.float16'], {}), '(np.float16)\n', (784, 796), True, 'import numpy as np\n'), ((802, 822), 'numpy.dtype', 'np.dtype', (['np.float32'], {}), '(np.float32)\n', (810, 822), True, 'import numpy as np\n'), ((828, 845), 'numpy.dtype', 'np.dtype', (['np.int8'], {}), '(np.int8)\n', (836, 845), True, 'import numpy as np\n'), ((851, 869), 'numpy.dtype', 'np.dtype', (['np.int16'], {}), '(np.int16)\n', (859, 869), True, 'import numpy as np\n')] |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import json
import os
import h5py
import numpy as np
class MicrosoftCocoDataset:
"""
包装了 MicrosoftCoco 数据集, 我们通过此类来访问该数据集
1.初始化完毕后, 结果放入 self.dataset (字典), 数据集的各个部分如下:
训练集(train)
key shape value
train_captions (400135, 17) 图片评论
train_image_idxs (400135,) 评论到图片的映射, 通过映射找到评论对应的图片向量
train_features (82783, 512) 向量化后的图片, 维度为 512
train_urls (82783,) 图片的 url 地址
验证集(val)
val_captions (195954, 17)
val_image_idxs (195954,)
val_features (40504, 512)
val_urls (40504,)
idx_to_word 1004 单词标号到单词的词典
word_to_idx 1004 单词到单词标号的词典
Author: xrh
Date: 2021-9-30
"""
def __init__(self, base_dir='../../dataset/ImageCaption/microsoft_coco', sample_N=None, use_pca_features=True):
"""
:param base_dir: 数据集集的根路径
:param sample_N: 训练数据集(采样后)的样本数
:param use_pca_features: 是否使用 PCA 降维后的图像特征
"""
self.base_dir = base_dir
self.sample_N = sample_N
self.use_pca_features = use_pca_features
self.dataset = {}
self.__load_data() # 读取预处理后的数据集
self.vocab_obj = Vocab(id_to_word=self.dataset['idx_to_word'], word_to_id=self.dataset['word_to_idx'])
if sample_N is not None:
# 对训练数据集进行采样
np.random.seed(231) # 控制随机数, 在程序的当前上下文有效
N_train = np.shape(self.dataset['train_captions'])[0] # 训练集的样本总数
mask = np.random.randint(N_train, size=self.sample_N)
self.dataset['train_captions'] = self.dataset['train_captions'][mask]
self.dataset['train_image_idxs'] = self.dataset['train_image_idxs'][mask]
# 对验证(测试)数据集进行采样, 采样个数为 训练集的 1/2
N_val = np.shape(self.dataset['train_captions'])[0] # 训练集的样本总数
mask = np.random.randint(N_val, size=self.sample_N//2)
self.dataset['val_captions'] = self.dataset['val_captions'][mask]
self.dataset['val_image_idxs'] = self.dataset['val_image_idxs'][mask]
self.N = sample_N
else:
self.N = np.shape(self.dataset['train_captions'])[0]
self.feature_dim = self.dataset['train_features'].shape[1] # feature_dim - 图片向量的维度
self.caption_length = self.dataset['train_captions'].shape[1] # caption_length - 图片描述的长度
def __load_data(self):
"""
载入数据集
:return:
"""
caption_file = os.path.join(self.base_dir, 'coco2014_captions.h5')
with h5py.File(caption_file, 'r') as f:
for k, v in f.items():
self.dataset[k] = np.asarray(v)
if self.use_pca_features:
train_feat_file = os.path.join(self.base_dir, 'train2014_vgg16_fc7_pca.h5')
else:
train_feat_file = os.path.join(self.base_dir, 'train2014_vgg16_fc7.h5')
with h5py.File(train_feat_file, 'r') as f:
self.dataset['train_features'] = np.asarray(f['features'])
if self.use_pca_features:
val_feat_file = os.path.join(self.base_dir, 'val2014_vgg16_fc7_pca.h5')
else:
val_feat_file = os.path.join(self.base_dir, 'val2014_vgg16_fc7.h5')
with h5py.File(val_feat_file, 'r') as f:
self.dataset['val_features'] = np.asarray(f['features'])
dict_file = os.path.join(self.base_dir, 'coco2014_vocab.json')
with open(dict_file, 'r') as f:
dict_data = json.load(f)
for k, v in dict_data.items():
self.dataset[k] = v
train_url_file = os.path.join(self.base_dir, 'train2014_urls.txt')
with open(train_url_file, 'r') as f:
train_urls = np.asarray([line.strip() for line in f])
self.dataset['train_urls'] = train_urls
val_url_file = os.path.join(self.base_dir, 'val2014_urls.txt')
with open(val_url_file, 'r') as f:
val_urls = np.asarray([line.strip() for line in f])
self.dataset['val_urls'] = val_urls
def decode_captions(self, captions):
"""
将由单词标号组成的句子 解码为 原始句子
:param captions: 一个句子, 或者一个句子列表
:return:
"""
singleton = False
if captions.ndim == 1: # 说明 captions 只有一个句子
singleton = True
captions = captions[None]
decoded = []
N, T = captions.shape
for i in range(N):
words = []
for t in range(T):
word = self.dataset['idx_to_word'][captions[i, t]]
if word != '<NULL>':
words.append(word)
if word == '<END>':
break
decoded.append(' '.join(words))
if singleton:
decoded = decoded[0]
return decoded
def sample_minibatch(self, batch_size=128, Type='train', return_url=False):
"""
从数据集中采样 1个 batch 的样本用于训练
:param batch_size: 1个 batch的样本个数
:param Type: 数据集的类型, 'train' 为训练数据集
:param return_url: 是否返回图片的 url
:return:
captions, image_features
"""
split_size = self.dataset['%s_captions' % Type].shape[0]
mask = np.random.choice(split_size, batch_size) # 第二次调用此函数时, 类初始化(def __init__) 中的控制随机数就失效了
captions = self.dataset['%s_captions' % Type][mask]
image_idxs = self.dataset['%s_image_idxs' % Type][mask]
image_features = self.dataset['%s_features' % Type][image_idxs]
urls = self.dataset['%s_urls' % Type][image_idxs]
if return_url:
return captions, image_features, urls
return captions, image_features
class Vocab:
def __init__(self, word_to_id, id_to_word, _unk_str='<UNK>'):
self.word_to_id = word_to_id
self.id_to_word = id_to_word
self._unk_str = _unk_str
def map_id_to_word(self, id):
"""
输入单词标号, 返回单词
:param id:
:return:
"""
return self.id_to_word[id]
def map_word_to_id(self, word):
"""
输入单词, 返回单词标号
考虑未登录词:
1.若输入的单词不在词典中, 返回 '<UNK>' 的标号
:param word: 单词
:return:
"""
if word not in self.word_to_id:
return self.word_to_id[self._unk_str]
else:
return self.word_to_id[word]
| [
"h5py.File",
"json.load",
"numpy.random.seed",
"numpy.asarray",
"numpy.shape",
"numpy.random.randint",
"numpy.random.choice",
"os.path.join"
] | [((2521, 2572), 'os.path.join', 'os.path.join', (['self.base_dir', '"""coco2014_captions.h5"""'], {}), "(self.base_dir, 'coco2014_captions.h5')\n", (2533, 2572), False, 'import os\n'), ((3399, 3449), 'os.path.join', 'os.path.join', (['self.base_dir', '"""coco2014_vocab.json"""'], {}), "(self.base_dir, 'coco2014_vocab.json')\n", (3411, 3449), False, 'import os\n'), ((3632, 3681), 'os.path.join', 'os.path.join', (['self.base_dir', '"""train2014_urls.txt"""'], {}), "(self.base_dir, 'train2014_urls.txt')\n", (3644, 3681), False, 'import os\n'), ((3865, 3912), 'os.path.join', 'os.path.join', (['self.base_dir', '"""val2014_urls.txt"""'], {}), "(self.base_dir, 'val2014_urls.txt')\n", (3877, 3912), False, 'import os\n'), ((5228, 5268), 'numpy.random.choice', 'np.random.choice', (['split_size', 'batch_size'], {}), '(split_size, batch_size)\n', (5244, 5268), True, 'import numpy as np\n'), ((1406, 1425), 'numpy.random.seed', 'np.random.seed', (['(231)'], {}), '(231)\n', (1420, 1425), True, 'import numpy as np\n'), ((1546, 1592), 'numpy.random.randint', 'np.random.randint', (['N_train'], {'size': 'self.sample_N'}), '(N_train, size=self.sample_N)\n', (1563, 1592), True, 'import numpy as np\n'), ((1903, 1952), 'numpy.random.randint', 'np.random.randint', (['N_val'], {'size': '(self.sample_N // 2)'}), '(N_val, size=self.sample_N // 2)\n', (1920, 1952), True, 'import numpy as np\n'), ((2586, 2614), 'h5py.File', 'h5py.File', (['caption_file', '"""r"""'], {}), "(caption_file, 'r')\n", (2595, 2614), False, 'import h5py\n'), ((2769, 2826), 'os.path.join', 'os.path.join', (['self.base_dir', '"""train2014_vgg16_fc7_pca.h5"""'], {}), "(self.base_dir, 'train2014_vgg16_fc7_pca.h5')\n", (2781, 2826), False, 'import os\n'), ((2871, 2924), 'os.path.join', 'os.path.join', (['self.base_dir', '"""train2014_vgg16_fc7.h5"""'], {}), "(self.base_dir, 'train2014_vgg16_fc7.h5')\n", (2883, 2924), False, 'import os\n'), ((2938, 2969), 'h5py.File', 'h5py.File', (['train_feat_file', '"""r"""'], {}), "(train_feat_file, 'r')\n", (2947, 2969), False, 'import h5py\n'), ((3021, 3046), 'numpy.asarray', 'np.asarray', (["f['features']"], {}), "(f['features'])\n", (3031, 3046), True, 'import numpy as np\n'), ((3110, 3165), 'os.path.join', 'os.path.join', (['self.base_dir', '"""val2014_vgg16_fc7_pca.h5"""'], {}), "(self.base_dir, 'val2014_vgg16_fc7_pca.h5')\n", (3122, 3165), False, 'import os\n'), ((3208, 3259), 'os.path.join', 'os.path.join', (['self.base_dir', '"""val2014_vgg16_fc7.h5"""'], {}), "(self.base_dir, 'val2014_vgg16_fc7.h5')\n", (3220, 3259), False, 'import os\n'), ((3273, 3302), 'h5py.File', 'h5py.File', (['val_feat_file', '"""r"""'], {}), "(val_feat_file, 'r')\n", (3282, 3302), False, 'import h5py\n'), ((3352, 3377), 'numpy.asarray', 'np.asarray', (["f['features']"], {}), "(f['features'])\n", (3362, 3377), True, 'import numpy as np\n'), ((3514, 3526), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3523, 3526), False, 'import json\n'), ((1471, 1511), 'numpy.shape', 'np.shape', (["self.dataset['train_captions']"], {}), "(self.dataset['train_captions'])\n", (1479, 1511), True, 'import numpy as np\n'), ((1828, 1868), 'numpy.shape', 'np.shape', (["self.dataset['train_captions']"], {}), "(self.dataset['train_captions'])\n", (1836, 1868), True, 'import numpy as np\n'), ((2178, 2218), 'numpy.shape', 'np.shape', (["self.dataset['train_captions']"], {}), "(self.dataset['train_captions'])\n", (2186, 2218), True, 'import numpy as np\n'), ((2690, 2703), 'numpy.asarray', 'np.asarray', (['v'], {}), '(v)\n', (2700, 2703), True, 'import numpy as np\n')] |
import tensorflow as tf
from helpers import ndc_rays, get_rays
import numpy as np
import imageio
import os
import time
def raw2outputs(raw, z_vals, rays_d):\
def raw2alpha(raw, dists, act_fn=tf.nn.relu):
return 1.0 - tf.exp(-act_fn(raw) * dists)
dists = z_vals[..., 1:] - z_vals[..., :-1]
dists = tf.concat(
[dists, tf.broadcast_to([1e10], dists[..., :1].shape)],
axis=-1)
dists = dists * tf.linalg.norm(rays_d[..., None, :], axis=-1)
# [N_rays, N_samples, 3]
rgb = tf.math.sigmoid(raw[..., :3])
noise = 0.
if raw_noise_std > 0.:
noise = tf.random.normal(
raw[..., 3].shape
) * raw_noise_std
# This is how you get alpha
# sigma = tf.nn.relu(raw + noise)
# alpha = 1 - exp(-sigma * dists)
alpha = raw2alpha(raw[..., 3] + noise, dists)
# T[0] = 1
# T[i] = exp(-sum((sigma * dists)[1:i])
# = prod(exp(-sigma * dists)[1:i])
# = prod((1 - alpha)[1:i])
# => T = cumprod(1 - alpha, exclusive=True)
weights = alpha * tf.math.cumprod(1 - alpha + 1e-10, axis=-1, exclusive=True)
# [N_rays, 3]
rgb_map = tf.reduce_sum(rgb * weights[..., None], axis=-2)
depth_map = tf.reduce_sum(weights * z_vals, axis=-1)
disp_map = 1. / tf.maximum(1e-10, depth_map / tf.reduce_sum(weights, axis=-1))
acc_map = tf.reduce_sum(weights, -1)
if white_bkgd:
rgb_map = rgb_map + (1 - acc_map[..., None])
return rgb_map, disp_map, acc_map, weights, depth_map
def render_rays(ray_batch,
network_fn,
network_query_fn,
N_samples,
retraw=False,
lindisp=False,
perturb=0.,
N_importance=0,
network_fine=None,
white_bkgd=False,
raw_noise_std=0.,
verbose=False):
N_rays = tf.shape(ray_batch)[0]
# [N_rays, 3], [N_rays, 3]
rays_o, rays_d = ray_batch[:, 0:3], ray_batch[:, 3:6]
viewdirs = ray_batch[:, -3:] if ray_batch.shape[-1] > 8 else None
# [N_rays, 1], [N_rays, 1]
near, far = tf.split(ray_batch[..., 6:8], axis=-1, num_or_size_splits=2)
# [N_samples]
t_vals = tf.linspace(0., 1., N_samples)
# Interpolate between near and far
# either with equal spacing all the way along the depth
# or inversely along the depth
# [N_rays, N_samples]
# TODO: they broadcast but I think it
# is already that shape but confirm
if not lindisp:
z_vals = near * (1. - t_vals) + far * t_vals
else:
z_vals = 1. / (1. / near * (1 - t_vals) + 1. / far * t_vals)
# This is what get_sample_bounds does essentially I think
if perturb:
mid = (z_vals[..., 1:] + z_vals[..., :-1]) / 2.
upper = tf.concat([mid, z_vals[..., 1:]], axis=-1)
lower = tf.concat([z_vals[..., :-1], mid], axis=-1)
t_rand = tf.random.uniform(tf.shape(z_vals))
z_vals = lower + (upper - lower) * t_rand
pts = rays_o[..., None, :] + rays_d[..., None, :] * z_vals[..., None]
raw = network_query_fn(pts, viewdirs, network_fn)
rgb_map, disp_map, acc_map, weights, depth_map = raw2outputs(raw, z_vals, rays_d)
if N_importance > 0:
rgb_map_0, disp_map_0, acc_map_0 = rgb_map, disp_map, acc_map
z_vals_mid = .5 * (z_vals[..., 1:] + z_vals[..., :-1])
z_samples = sample_pdf(
z_vals_mid, weights[..., 1:-1], N_importance,
det=(perturb==0.)
)
z_samples = tf.stop_gradient(z_samples)
z_vals = tf.sort(
tf.concat([z_vals, z_samples], -1), -1)
pts = rays_o[..., None, :] + rays_d[..., None, :] * z_vals[..., None, :]
run_fn = network_fn if network_fine is None else network_fine
raw = network_query_fn(pts, viewdirs, run_fn)
rgb_map, disp_map, acc_map, weights, depth_map = raw2outputs(raw, z_vals, rays_d)
ret = {'rgb_map': rgb_map,
'disp_map': disp_map,
'acc_map': acc_map}
if retraw:
ret['raw'] = raw
if N_importance > 0:
ret.update(
{'rgb0': rgb_map_0,
'disp0': disp_map_0,
'acc0': acc_map_0,
'z_std': tf.math.reduce_std(z_samples, -1)}
)
for k in ret:
tf.debugging.check_numerics(ret[k], 'output {}'.format(k))
return ret
def batchify_rays(rays_flat, chunk=1024*32, **kwargs):
rays_dt = tf.data.Dataset.from_tensor_slices(
rays_flat
).batch(chunk)
all_ret = {}
for rays_batch in rays_dt:
ret = render_rays(rays_batch, **kwargs)
for k, v in ret.items():
all_ret.setdefault(k, []).append(v)
all_ret = {k: tf.concat(v) for k, v in all_ret.items()}
return all_ret
def render(H, W, focal, chunk=1024 * 32, rays=None, c2w=None,
ndc=True, near=0., far=1.,
use_viewdirs=False, c2w_staticcam=None,
**kwargs):
if c2w is not None:
rays_o, rays_d = get_rays(H, W, focal, c2w)
else:
rays_o, rays_d = rays
if use_viewdirs:
viewdirs = rays_d
if c2w_staticcam is not None:
rays_o, rays_d = get_rays(H, W, focal, c2w_staticcam)
viewdirs = viewdirs / tf.linalg.norm(viewdirs, axis=-1, keepdims=True)
viewdirs = tf.cast(tf.reshape(viewdirs, [-1, 3]), dtype=tf.float32)
sh = rays_d.shape
if ndc:
rays_o, rays_d = ndc_rays(
H, W, focal, tf.cast(1., tf.float32), rays_o, rays_d
)
rays_o = tf.cast(tf.reshape(rays_o, [-1, 3]), dtype=tf.float32)
rays_d = tf.cast(tf.reshape(rays_o, [-1, 3]), dtype=tf.float32)
near = tf.empty(rays_d[..., :1], near)
far = tf.empty(rays_d[..., :1], far)
rays = tf.concat([rays_o, rays_d, near, far], axis=-1)
if use_viewdirs:
rays = tf.concat([rays, viewdirs], axis=-1)
all_ret = batchify_rays(rays, chunk, **kwargs)
for k in all_ret:
k_sh = list(sh[:-1] + list(all_ret[k].shape[1:]))
all_ret[k] = tf.reshape(all_ret[k], k_sh)
k_extract = ['rgb_map', 'disp_map', 'acc_map']
ret_list = [all_ret[k] for k in k_extract]
ret_dict = {k: v for k, v in all_ret.items() if k not in k_extract}
return ret_list + [ret_dict]
def render_path(render_poses, hwf, chunk, render_kwargs,
gt_imgs=None, savedir=None, render_factor=0):
H, W, focal = hwf
if render_factor != 0:
H = H // render_factor
W = W // render_factor
focal = focal / render_factor
rgbs = []
disps = []
t = time.time()
for i, c2w in enumerate(render_poses):
print(i, time.time() - t)
rgb, disp, acc, _ = render(
H, W, focal, chunk=chunk, c2w=c2w[:3, :4],
**render_kwargs)
rgbs.append(rgb.numpy())
if i == 0:
print(rgb.shape, disp.shape)
if gt_imgs is not None and render_factor == 0:
p = -10. * np.log10(np.mean(np.square(rgb - gt_imgs[i])))
print(p)
if savedir is not None:
rgb8 = to8b(rgbs[-1])
filename = os.path.join(savedir, '{:03d}.png'.format(i))
imageio.imwrite(filename, rgb8)
rgbs = np.stack(rgbs, 0)
disps = np.stack(disps, 0)
return rgbs, disps
| [
"tensorflow.reduce_sum",
"tensorflow.reshape",
"tensorflow.math.reduce_std",
"tensorflow.split",
"tensorflow.math.sigmoid",
"tensorflow.concat",
"tensorflow.cast",
"tensorflow.broadcast_to",
"numpy.stack",
"tensorflow.random.normal",
"tensorflow.linspace",
"tensorflow.stop_gradient",
"numpy.... | [((520, 549), 'tensorflow.math.sigmoid', 'tf.math.sigmoid', (['raw[..., :3]'], {}), '(raw[..., :3])\n', (535, 549), True, 'import tensorflow as tf\n'), ((1135, 1183), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(rgb * weights[..., None])'], {'axis': '(-2)'}), '(rgb * weights[..., None], axis=-2)\n', (1148, 1183), True, 'import tensorflow as tf\n'), ((1201, 1241), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(weights * z_vals)'], {'axis': '(-1)'}), '(weights * z_vals, axis=-1)\n', (1214, 1241), True, 'import tensorflow as tf\n'), ((1341, 1367), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['weights', '(-1)'], {}), '(weights, -1)\n', (1354, 1367), True, 'import tensorflow as tf\n'), ((2119, 2179), 'tensorflow.split', 'tf.split', (['ray_batch[..., 6:8]'], {'axis': '(-1)', 'num_or_size_splits': '(2)'}), '(ray_batch[..., 6:8], axis=-1, num_or_size_splits=2)\n', (2127, 2179), True, 'import tensorflow as tf\n'), ((2212, 2244), 'tensorflow.linspace', 'tf.linspace', (['(0.0)', '(1.0)', 'N_samples'], {}), '(0.0, 1.0, N_samples)\n', (2223, 2244), True, 'import tensorflow as tf\n'), ((5668, 5699), 'tensorflow.empty', 'tf.empty', (['rays_d[..., :1]', 'near'], {}), '(rays_d[..., :1], near)\n', (5676, 5699), True, 'import tensorflow as tf\n'), ((5710, 5740), 'tensorflow.empty', 'tf.empty', (['rays_d[..., :1]', 'far'], {}), '(rays_d[..., :1], far)\n', (5718, 5740), True, 'import tensorflow as tf\n'), ((5753, 5800), 'tensorflow.concat', 'tf.concat', (['[rays_o, rays_d, near, far]'], {'axis': '(-1)'}), '([rays_o, rays_d, near, far], axis=-1)\n', (5762, 5800), True, 'import tensorflow as tf\n'), ((6573, 6584), 'time.time', 'time.time', ([], {}), '()\n', (6582, 6584), False, 'import time\n'), ((7215, 7232), 'numpy.stack', 'np.stack', (['rgbs', '(0)'], {}), '(rgbs, 0)\n', (7223, 7232), True, 'import numpy as np\n'), ((7245, 7263), 'numpy.stack', 'np.stack', (['disps', '(0)'], {}), '(disps, 0)\n', (7253, 7263), True, 'import numpy as np\n'), ((434, 479), 'tensorflow.linalg.norm', 'tf.linalg.norm', (['rays_d[..., None, :]'], {'axis': '(-1)'}), '(rays_d[..., None, :], axis=-1)\n', (448, 479), True, 'import tensorflow as tf\n'), ((1042, 1101), 'tensorflow.math.cumprod', 'tf.math.cumprod', (['(1 - alpha + 1e-10)'], {'axis': '(-1)', 'exclusive': '(True)'}), '(1 - alpha + 1e-10, axis=-1, exclusive=True)\n', (1057, 1101), True, 'import tensorflow as tf\n'), ((1887, 1906), 'tensorflow.shape', 'tf.shape', (['ray_batch'], {}), '(ray_batch)\n', (1895, 1906), True, 'import tensorflow as tf\n'), ((2790, 2832), 'tensorflow.concat', 'tf.concat', (['[mid, z_vals[..., 1:]]'], {'axis': '(-1)'}), '([mid, z_vals[..., 1:]], axis=-1)\n', (2799, 2832), True, 'import tensorflow as tf\n'), ((2849, 2892), 'tensorflow.concat', 'tf.concat', (['[z_vals[..., :-1], mid]'], {'axis': '(-1)'}), '([z_vals[..., :-1], mid], axis=-1)\n', (2858, 2892), True, 'import tensorflow as tf\n'), ((3522, 3549), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['z_samples'], {}), '(z_samples)\n', (3538, 3549), True, 'import tensorflow as tf\n'), ((4713, 4725), 'tensorflow.concat', 'tf.concat', (['v'], {}), '(v)\n', (4722, 4725), True, 'import tensorflow as tf\n'), ((5000, 5026), 'helpers.get_rays', 'get_rays', (['H', 'W', 'focal', 'c2w'], {}), '(H, W, focal, c2w)\n', (5008, 5026), False, 'from helpers import ndc_rays, get_rays\n'), ((5541, 5568), 'tensorflow.reshape', 'tf.reshape', (['rays_o', '[-1, 3]'], {}), '(rays_o, [-1, 3])\n', (5551, 5568), True, 'import tensorflow as tf\n'), ((5609, 5636), 'tensorflow.reshape', 'tf.reshape', (['rays_o', '[-1, 3]'], {}), '(rays_o, [-1, 3])\n', (5619, 5636), True, 'import tensorflow as tf\n'), ((5838, 5874), 'tensorflow.concat', 'tf.concat', (['[rays, viewdirs]'], {'axis': '(-1)'}), '([rays, viewdirs], axis=-1)\n', (5847, 5874), True, 'import tensorflow as tf\n'), ((6028, 6056), 'tensorflow.reshape', 'tf.reshape', (['all_ret[k]', 'k_sh'], {}), '(all_ret[k], k_sh)\n', (6038, 6056), True, 'import tensorflow as tf\n'), ((348, 402), 'tensorflow.broadcast_to', 'tf.broadcast_to', (['[10000000000.0]', 'dists[..., :1].shape'], {}), '([10000000000.0], dists[..., :1].shape)\n', (363, 402), True, 'import tensorflow as tf\n'), ((609, 644), 'tensorflow.random.normal', 'tf.random.normal', (['raw[..., 3].shape'], {}), '(raw[..., 3].shape)\n', (625, 644), True, 'import tensorflow as tf\n'), ((2928, 2944), 'tensorflow.shape', 'tf.shape', (['z_vals'], {}), '(z_vals)\n', (2936, 2944), True, 'import tensorflow as tf\n'), ((3589, 3623), 'tensorflow.concat', 'tf.concat', (['[z_vals, z_samples]', '(-1)'], {}), '([z_vals, z_samples], -1)\n', (3598, 3623), True, 'import tensorflow as tf\n'), ((4442, 4487), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['rays_flat'], {}), '(rays_flat)\n', (4476, 4487), True, 'import tensorflow as tf\n'), ((5182, 5218), 'helpers.get_rays', 'get_rays', (['H', 'W', 'focal', 'c2w_staticcam'], {}), '(H, W, focal, c2w_staticcam)\n', (5190, 5218), False, 'from helpers import ndc_rays, get_rays\n'), ((5250, 5298), 'tensorflow.linalg.norm', 'tf.linalg.norm', (['viewdirs'], {'axis': '(-1)', 'keepdims': '(True)'}), '(viewdirs, axis=-1, keepdims=True)\n', (5264, 5298), True, 'import tensorflow as tf\n'), ((5326, 5355), 'tensorflow.reshape', 'tf.reshape', (['viewdirs', '[-1, 3]'], {}), '(viewdirs, [-1, 3])\n', (5336, 5355), True, 'import tensorflow as tf\n'), ((5470, 5494), 'tensorflow.cast', 'tf.cast', (['(1.0)', 'tf.float32'], {}), '(1.0, tf.float32)\n', (5477, 5494), True, 'import tensorflow as tf\n'), ((7171, 7202), 'imageio.imwrite', 'imageio.imwrite', (['filename', 'rgb8'], {}), '(filename, rgb8)\n', (7186, 7202), False, 'import imageio\n'), ((1293, 1324), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['weights'], {'axis': '(-1)'}), '(weights, axis=-1)\n', (1306, 1324), True, 'import tensorflow as tf\n'), ((4216, 4249), 'tensorflow.math.reduce_std', 'tf.math.reduce_std', (['z_samples', '(-1)'], {}), '(z_samples, -1)\n', (4234, 4249), True, 'import tensorflow as tf\n'), ((6645, 6656), 'time.time', 'time.time', ([], {}), '()\n', (6654, 6656), False, 'import time\n'), ((6972, 6999), 'numpy.square', 'np.square', (['(rgb - gt_imgs[i])'], {}), '(rgb - gt_imgs[i])\n', (6981, 6999), True, 'import numpy as np\n')] |
__author__ = 'indiquant'
from datetime import datetime
import numpy as np
class Option(object):
def __init__(self, undl, cp, mat, strike, bidpx, askpx, lastpx, volume):
self._undl = undl
self._cp = cp
self._mat = mat
self._strike = strike
self._bidpx = bidpx
self._askpx = askpx
self._lastpx = lastpx
self._volume = volume
@property
def bidpx(self):
return self._bidpx
@property
def askpx(self):
return self._askpx
@property
def midpx(self):
if (self._bidpx is not None) and (self._askpx is not None):
return (self._bidpx + self._askpx) / 2.0
elif self._bidpx is not None:
return self._bidpx
elif self._askpx is not None:
return self._askpx
else:
return None
@property
def volume(self):
return self._volume
class PutCallSurface(object):
def __init__(self, undl):
self._undl = undl
self._surface = {'C': {}, 'P': {}}
self._mats = np.array([])
self._strikes = {}
self._issorted = True
def add(self, cp, mat, k, bidpx, askpx, lastpx, volume):
mat = self._datefint(mat)
self._issorted = False
self._mats = np.append(self._mats, mat)
if mat not in self._strikes:
self._strikes[mat] = np.array([k])
else:
self._strikes[mat] = np.append(self._strikes[mat], k)
op = Option(self._undl, cp, mat, k, bidpx, askpx, lastpx, volume)
if mat in self._surface[cp]:
self._surface[cp][mat][k] = op
else:
self._surface[cp][mat] = {k: op}
def get_grid(self, cp='C'):
self._sort()
strikes = np.array([])
for m, _strikes in self._strikes.items():
strikes = np.append(strikes, _strikes)
strikes = np.sort(np.unique(strikes))
px_array, sz_array = [], []
for m in self._mats:
px_row, sz_row = [], []
for k in strikes:
try:
op = self._surface[cp][m][k]
px_row.append(op.midpx)
sz_row.append(op.volume)
except KeyError:
px_row.append(np.nan)
sz_row.append(np.nan)
px_array.append(px_row)
sz_array.append(sz_row)
return np.array(px_array), np.array(sz_array)
def _datefint(self, dt):
"""
:type dt: int
"""
return datetime.strptime(str(dt), '%Y%m%d').date()
def _sort(self):
if not self._issorted:
self._mats = np.sort(np.unique(self._mats))
for m in self._strikes:
self._strikes[m] = np.sort(np.unique(self._strikes[m]))
self._issorted = True
| [
"numpy.append",
"numpy.array",
"numpy.unique"
] | [((1075, 1087), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1083, 1087), True, 'import numpy as np\n'), ((1294, 1320), 'numpy.append', 'np.append', (['self._mats', 'mat'], {}), '(self._mats, mat)\n', (1303, 1320), True, 'import numpy as np\n'), ((1774, 1786), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1782, 1786), True, 'import numpy as np\n'), ((1391, 1404), 'numpy.array', 'np.array', (['[k]'], {}), '([k])\n', (1399, 1404), True, 'import numpy as np\n'), ((1453, 1485), 'numpy.append', 'np.append', (['self._strikes[mat]', 'k'], {}), '(self._strikes[mat], k)\n', (1462, 1485), True, 'import numpy as np\n'), ((1859, 1887), 'numpy.append', 'np.append', (['strikes', '_strikes'], {}), '(strikes, _strikes)\n', (1868, 1887), True, 'import numpy as np\n'), ((1914, 1932), 'numpy.unique', 'np.unique', (['strikes'], {}), '(strikes)\n', (1923, 1932), True, 'import numpy as np\n'), ((2430, 2448), 'numpy.array', 'np.array', (['px_array'], {}), '(px_array)\n', (2438, 2448), True, 'import numpy as np\n'), ((2450, 2468), 'numpy.array', 'np.array', (['sz_array'], {}), '(sz_array)\n', (2458, 2468), True, 'import numpy as np\n'), ((2690, 2711), 'numpy.unique', 'np.unique', (['self._mats'], {}), '(self._mats)\n', (2699, 2711), True, 'import numpy as np\n'), ((2792, 2819), 'numpy.unique', 'np.unique', (['self._strikes[m]'], {}), '(self._strikes[m])\n', (2801, 2819), True, 'import numpy as np\n')] |
# ##### BEGIN GPL LICENSE BLOCK #####
# KeenTools for blender is a blender addon for using KeenTools in Blender.
# Copyright (C) 2019 KeenTools
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# ##### END GPL LICENSE BLOCK #####
import numpy as np
import bpy
import gpu
import bgl
from gpu_extras.batch import batch_for_shader
from . shaders import (simple_fill_vertex_shader,
black_fill_fragment_shader, residual_vertex_shader,
residual_fragment_shader, raster_image_vertex_shader,
raster_image_fragment_shader)
from ..config import Config
from ..utils.images import (check_bpy_image_has_same_size,
find_bpy_image_by_name,
remove_bpy_image, add_alpha_channel)
class FBEdgeShaderBase:
""" Wireframe drawing class """
handler_list = []
@classmethod
def add_handler_list(cls, handler):
cls.handler_list.append(handler)
@classmethod
def remove_handler_list(cls, handler):
if handler in cls.handler_list:
cls.handler_list.remove(handler)
@classmethod
def is_handler_list_empty(cls):
return len(cls.handler_list) == 0
def __init__(self):
self.draw_handler = None # for handler storage
self.fill_shader = None
self.line_shader = None
self.fill_batch = None
self.line_batch = None
# Triangle vertices & indices
self.vertices = []
self.indices = []
# Edge vertices
self.edges_vertices = []
self.edges_indices = []
self.edges_colors = []
self.vertices_colors = []
# Check if blender started in background mode
if not bpy.app.background:
self.init_shaders()
def is_working(self):
return not (self.draw_handler is None)
def init_color_data(self, color=(0.5, 0.0, 0.7, 0.2)):
self.edges_colors = np.full(
(len(self.edges_vertices), 4), color).tolist()
def init_special_areas(self, mesh, pairs, color=(0.5, 0.0, 0.7, 0.2)):
for i, edge in enumerate(mesh.edges):
vv = edge.vertices
if ((vv[0], vv[1]) in pairs) or ((vv[1], vv[0]) in pairs):
self.edges_colors[i * 2] = color
self.edges_colors[i * 2 + 1] = color
def register_handler(self, args):
if self.draw_handler is not None:
self.unregister_handler()
self.draw_handler = bpy.types.SpaceView3D.draw_handler_add(
self.draw_callback, args, "WINDOW", "POST_VIEW")
self.add_handler_list(self.draw_handler)
def unregister_handler(self):
if self.draw_handler is not None:
bpy.types.SpaceView3D.draw_handler_remove(
self.draw_handler, "WINDOW")
self.remove_handler_list(self.draw_handler)
self.draw_handler = None
def add_color_vertices(self, color, verts):
for i, v in enumerate(verts):
self.vertices.append(verts[i])
self.vertices_colors.append(color)
def add_vertices_colors(self, verts, colors):
for i, v in enumerate(verts):
self.vertices.append(verts[i])
self.vertices_colors.append(colors[i])
def set_color_vertices(self, color, verts):
self.clear_vertices()
self.add_color_vertices(color, verts)
def set_vertices_colors(self, verts, colors):
self.clear_vertices()
self.add_vertices_colors(verts, colors)
def clear_vertices(self):
self.vertices = []
self.vertices_colors = []
def init_shaders(self):
pass
def draw_callback(self, op, context):
pass
class FBEdgeShader2D(FBEdgeShaderBase):
def __init__(self):
self.edge_lengths = []
super().__init__()
def init_shaders(self):
self.line_shader = gpu.types.GPUShader(
residual_vertex_shader(), residual_fragment_shader())
def draw_callback(self, op, context):
# Force Stop
if self.is_handler_list_empty():
self.unregister_handler()
return
if self.line_shader is None or self.line_batch is None:
return
bgl.glEnable(bgl.GL_BLEND)
bgl.glEnable(bgl.GL_LINE_SMOOTH)
bgl.glHint(bgl.GL_LINE_SMOOTH_HINT, bgl.GL_NICEST)
bgl.glBlendFunc(bgl.GL_SRC_ALPHA, bgl.GL_ONE_MINUS_SRC_ALPHA)
self.line_shader.bind()
self.line_batch.draw(self.line_shader)
def create_batch(self):
# Our shader batch
self.line_batch = batch_for_shader(
self.line_shader, 'LINES',
{"pos": self.vertices, "color": self.vertices_colors,
"lineLength": self.edge_lengths}
)
def register_handler(self, args):
if self.draw_handler is not None:
self.unregister_handler()
self.draw_handler = bpy.types.SpaceView3D.draw_handler_add(
self.draw_callback, args, "WINDOW", "POST_PIXEL")
self.add_handler_list(self.draw_handler)
class FBRasterEdgeShader3D(FBEdgeShaderBase):
@staticmethod
def _gamma_color(col, power=2.2):
return [x ** power for x in col]
@staticmethod
def _inverse_gamma_color(col, power=2.2):
return [x ** (1.0 / power) for x in col]
def __init__(self):
self._edges_indices = np.array([], dtype=np.int)
self._edges_uvs = []
self._colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
self._opacity = 0.3
self._use_simple_shader = False
super().__init__()
def init_colors(self, colors, opacity):
self._colors = [self._inverse_gamma_color(color[:3]) for color in colors]
self._opacity = opacity
def switch_to_simple_shader(self):
self._use_simple_shader = True
def switch_to_complex_shader(self):
self._use_simple_shader = False
def init_wireframe_image(self, fb, show_specials):
if not show_specials or not fb.face_texture_available():
self.switch_to_simple_shader()
return False
fb.set_face_texture_colors(self._colors)
image_data = fb.face_texture()[::2, ::2, :] # sample down x0.5
size = image_data.shape[:2]
assert size[0] > 0 and size[1] > 0
image_name = Config.coloring_texture_name
wireframe_image = find_bpy_image_by_name(image_name)
if wireframe_image is None or \
not check_bpy_image_has_same_size(wireframe_image, size):
remove_bpy_image(wireframe_image)
wireframe_image = bpy.data.images.new(image_name,
width=size[1],
height=size[0],
alpha=True,
float_buffer=False)
if wireframe_image:
rgba = add_alpha_channel(image_data)
wireframe_image.pixels[:] = rgba.ravel()
wireframe_image.pack()
self.switch_to_complex_shader()
return True
self.switch_to_simple_shader()
return False
def _activate_coloring_image(self, image):
if image.gl_load():
raise Exception()
image.gl_touch()
def _deactivate_coloring_image(self, image):
if image is not None:
image.gl_free()
def _check_coloring_image(self, image):
if self._use_simple_shader:
return True
if image is None:
return False
if image.bindcode == 0:
self._activate_coloring_image(image)
return True
def draw_callback(self, op, context):
# Force Stop
wireframe_image = find_bpy_image_by_name(Config.coloring_texture_name)
if self.is_handler_list_empty() or \
not self._check_coloring_image(wireframe_image):
self.unregister_handler()
return
bgl.glEnable(bgl.GL_BLEND)
bgl.glEnable(bgl.GL_LINE_SMOOTH)
bgl.glHint(bgl.GL_LINE_SMOOTH_HINT, bgl.GL_NICEST)
bgl.glBlendFunc(bgl.GL_SRC_ALPHA, bgl.GL_ONE_MINUS_SRC_ALPHA)
bgl.glEnable(bgl.GL_DEPTH_TEST)
bgl.glEnable(bgl.GL_POLYGON_OFFSET_FILL)
bgl.glPolygonOffset(1.0, 1.0)
bgl.glColorMask(bgl.GL_FALSE, bgl.GL_FALSE, bgl.GL_FALSE, bgl.GL_FALSE)
bgl.glPolygonMode(bgl.GL_FRONT_AND_BACK, bgl.GL_FILL)
self.fill_batch.draw(self.fill_shader)
bgl.glColorMask(bgl.GL_TRUE, bgl.GL_TRUE, bgl.GL_TRUE, bgl.GL_TRUE)
bgl.glDisable(bgl.GL_POLYGON_OFFSET_FILL)
bgl.glDepthMask(bgl.GL_FALSE)
bgl.glPolygonMode(bgl.GL_FRONT_AND_BACK, bgl.GL_LINE)
bgl.glEnable(bgl.GL_DEPTH_TEST)
if not self._use_simple_shader:
# coloring_image.bindcode should not be zero
# if we don't want to destroy video driver in Blender
if not wireframe_image or wireframe_image.bindcode == 0:
self.switch_to_simple_shader()
else:
bgl.glActiveTexture(bgl.GL_TEXTURE0)
bgl.glBindTexture(bgl.GL_TEXTURE_2D,
wireframe_image.bindcode)
self.line_shader.bind()
self.line_shader.uniform_int('image', 0)
self.line_shader.uniform_float('opacity', self._opacity)
self.line_batch.draw(self.line_shader)
if self._use_simple_shader:
self.simple_line_shader.bind()
self.simple_line_shader.uniform_float(
'color', ((*self._colors[0][:3], self._opacity)))
self.simple_line_batch.draw(self.simple_line_shader)
bgl.glPolygonMode(bgl.GL_FRONT_AND_BACK, bgl.GL_FILL)
bgl.glDepthMask(bgl.GL_TRUE)
bgl.glDisable(bgl.GL_DEPTH_TEST)
def create_batches(self):
if bpy.app.background:
return
self.fill_batch = batch_for_shader(
self.fill_shader, 'TRIS',
{'pos': self.vertices},
indices=self.indices,
)
self.simple_line_batch = batch_for_shader(
self.simple_line_shader, 'LINES',
{'pos': self.edges_vertices},
)
self.line_batch = batch_for_shader(
self.line_shader, 'LINES',
{'pos': self.edges_vertices, 'texCoord': self._edges_uvs}
)
def init_shaders(self):
self.fill_shader = gpu.types.GPUShader(
simple_fill_vertex_shader(), black_fill_fragment_shader())
self.line_shader = gpu.types.GPUShader(
raster_image_vertex_shader(), raster_image_fragment_shader())
self.simple_line_shader = gpu.shader.from_builtin('3D_UNIFORM_COLOR')
def init_geom_data(self, obj):
mesh = obj.data
mesh.calc_loop_triangles()
verts = np.empty((len(mesh.vertices), 3), 'f')
indices = np.empty((len(mesh.loop_triangles), 3), 'i')
mesh.vertices.foreach_get(
"co", np.reshape(verts, len(mesh.vertices) * 3))
mesh.loop_triangles.foreach_get(
"vertices", np.reshape(indices, len(mesh.loop_triangles) * 3))
# Object matrix usage
m = np.array(obj.matrix_world, dtype=np.float32).transpose()
vv = np.ones((len(mesh.vertices), 4), dtype=np.float32)
vv[:, :-1] = verts
vv = vv @ m
self.vertices = vv[:, :3] # Transformed vertices
self.indices = indices
def _clear_edge_indices(self):
self._edges_indices = np.array([], dtype=np.int)
self._edges_uvs = []
def init_edge_indices(self, builder):
if not builder.face_texture_available():
self._clear_edge_indices()
return
keyframes = builder.keyframes()
if len(keyframes) == 0:
return
geo = builder.applied_args_replaced_uvs_model_at(keyframes[0])
me = geo.mesh(0)
face_counts = [me.face_size(x) for x in range(me.faces_count())]
indices = np.empty((sum(face_counts), 2), 'i')
tex_coords = np.empty((sum(face_counts) * 2, 2), 'f')
i = 0
for face, count in enumerate(face_counts):
tex_coords[i * 2] = me.uv(face, count - 1)
tex_coords[i * 2 + 1] = me.uv(face, 0)
indices[i] = (me.face_point(face, count - 1),
me.face_point(face, 0))
i += 1
for k in range(1, count):
tex_coords[i * 2] = me.uv(face, k - 1)
tex_coords[i * 2 +1] = me.uv(face, k)
indices[i] = (me.face_point(face, k - 1),
me.face_point(face, k))
i += 1
self._edges_indices = indices
self._edges_uvs = tex_coords
self.update_edges_vertices()
def update_edges_vertices(self):
self.edges_vertices = self.vertices[self._edges_indices.ravel()]
| [
"gpu.shader.from_builtin",
"bpy.types.SpaceView3D.draw_handler_remove",
"bgl.glPolygonMode",
"bgl.glEnable",
"bgl.glActiveTexture",
"bgl.glHint",
"bgl.glBindTexture",
"bpy.types.SpaceView3D.draw_handler_add",
"bgl.glColorMask",
"bpy.data.images.new",
"numpy.array",
"bgl.glPolygonOffset",
"bg... | [((3074, 3165), 'bpy.types.SpaceView3D.draw_handler_add', 'bpy.types.SpaceView3D.draw_handler_add', (['self.draw_callback', 'args', '"""WINDOW"""', '"""POST_VIEW"""'], {}), "(self.draw_callback, args, 'WINDOW',\n 'POST_VIEW')\n", (3112, 3165), False, 'import bpy\n'), ((4816, 4842), 'bgl.glEnable', 'bgl.glEnable', (['bgl.GL_BLEND'], {}), '(bgl.GL_BLEND)\n', (4828, 4842), False, 'import bgl\n'), ((4851, 4883), 'bgl.glEnable', 'bgl.glEnable', (['bgl.GL_LINE_SMOOTH'], {}), '(bgl.GL_LINE_SMOOTH)\n', (4863, 4883), False, 'import bgl\n'), ((4892, 4942), 'bgl.glHint', 'bgl.glHint', (['bgl.GL_LINE_SMOOTH_HINT', 'bgl.GL_NICEST'], {}), '(bgl.GL_LINE_SMOOTH_HINT, bgl.GL_NICEST)\n', (4902, 4942), False, 'import bgl\n'), ((4951, 5012), 'bgl.glBlendFunc', 'bgl.glBlendFunc', (['bgl.GL_SRC_ALPHA', 'bgl.GL_ONE_MINUS_SRC_ALPHA'], {}), '(bgl.GL_SRC_ALPHA, bgl.GL_ONE_MINUS_SRC_ALPHA)\n', (4966, 5012), False, 'import bgl\n'), ((5175, 5310), 'gpu_extras.batch.batch_for_shader', 'batch_for_shader', (['self.line_shader', '"""LINES"""', "{'pos': self.vertices, 'color': self.vertices_colors, 'lineLength': self.\n edge_lengths}"], {}), "(self.line_shader, 'LINES', {'pos': self.vertices, 'color':\n self.vertices_colors, 'lineLength': self.edge_lengths})\n", (5191, 5310), False, 'from gpu_extras.batch import batch_for_shader\n'), ((5501, 5593), 'bpy.types.SpaceView3D.draw_handler_add', 'bpy.types.SpaceView3D.draw_handler_add', (['self.draw_callback', 'args', '"""WINDOW"""', '"""POST_PIXEL"""'], {}), "(self.draw_callback, args, 'WINDOW',\n 'POST_PIXEL')\n", (5539, 5593), False, 'import bpy\n'), ((5966, 5992), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int'}), '([], dtype=np.int)\n', (5974, 5992), True, 'import numpy as np\n'), ((8588, 8614), 'bgl.glEnable', 'bgl.glEnable', (['bgl.GL_BLEND'], {}), '(bgl.GL_BLEND)\n', (8600, 8614), False, 'import bgl\n'), ((8623, 8655), 'bgl.glEnable', 'bgl.glEnable', (['bgl.GL_LINE_SMOOTH'], {}), '(bgl.GL_LINE_SMOOTH)\n', (8635, 8655), False, 'import bgl\n'), ((8664, 8714), 'bgl.glHint', 'bgl.glHint', (['bgl.GL_LINE_SMOOTH_HINT', 'bgl.GL_NICEST'], {}), '(bgl.GL_LINE_SMOOTH_HINT, bgl.GL_NICEST)\n', (8674, 8714), False, 'import bgl\n'), ((8723, 8784), 'bgl.glBlendFunc', 'bgl.glBlendFunc', (['bgl.GL_SRC_ALPHA', 'bgl.GL_ONE_MINUS_SRC_ALPHA'], {}), '(bgl.GL_SRC_ALPHA, bgl.GL_ONE_MINUS_SRC_ALPHA)\n', (8738, 8784), False, 'import bgl\n'), ((8794, 8825), 'bgl.glEnable', 'bgl.glEnable', (['bgl.GL_DEPTH_TEST'], {}), '(bgl.GL_DEPTH_TEST)\n', (8806, 8825), False, 'import bgl\n'), ((8834, 8874), 'bgl.glEnable', 'bgl.glEnable', (['bgl.GL_POLYGON_OFFSET_FILL'], {}), '(bgl.GL_POLYGON_OFFSET_FILL)\n', (8846, 8874), False, 'import bgl\n'), ((8883, 8912), 'bgl.glPolygonOffset', 'bgl.glPolygonOffset', (['(1.0)', '(1.0)'], {}), '(1.0, 1.0)\n', (8902, 8912), False, 'import bgl\n'), ((8922, 8993), 'bgl.glColorMask', 'bgl.glColorMask', (['bgl.GL_FALSE', 'bgl.GL_FALSE', 'bgl.GL_FALSE', 'bgl.GL_FALSE'], {}), '(bgl.GL_FALSE, bgl.GL_FALSE, bgl.GL_FALSE, bgl.GL_FALSE)\n', (8937, 8993), False, 'import bgl\n'), ((9002, 9055), 'bgl.glPolygonMode', 'bgl.glPolygonMode', (['bgl.GL_FRONT_AND_BACK', 'bgl.GL_FILL'], {}), '(bgl.GL_FRONT_AND_BACK, bgl.GL_FILL)\n', (9019, 9055), False, 'import bgl\n'), ((9113, 9180), 'bgl.glColorMask', 'bgl.glColorMask', (['bgl.GL_TRUE', 'bgl.GL_TRUE', 'bgl.GL_TRUE', 'bgl.GL_TRUE'], {}), '(bgl.GL_TRUE, bgl.GL_TRUE, bgl.GL_TRUE, bgl.GL_TRUE)\n', (9128, 9180), False, 'import bgl\n'), ((9189, 9230), 'bgl.glDisable', 'bgl.glDisable', (['bgl.GL_POLYGON_OFFSET_FILL'], {}), '(bgl.GL_POLYGON_OFFSET_FILL)\n', (9202, 9230), False, 'import bgl\n'), ((9240, 9269), 'bgl.glDepthMask', 'bgl.glDepthMask', (['bgl.GL_FALSE'], {}), '(bgl.GL_FALSE)\n', (9255, 9269), False, 'import bgl\n'), ((9278, 9331), 'bgl.glPolygonMode', 'bgl.glPolygonMode', (['bgl.GL_FRONT_AND_BACK', 'bgl.GL_LINE'], {}), '(bgl.GL_FRONT_AND_BACK, bgl.GL_LINE)\n', (9295, 9331), False, 'import bgl\n'), ((9340, 9371), 'bgl.glEnable', 'bgl.glEnable', (['bgl.GL_DEPTH_TEST'], {}), '(bgl.GL_DEPTH_TEST)\n', (9352, 9371), False, 'import bgl\n'), ((10332, 10385), 'bgl.glPolygonMode', 'bgl.glPolygonMode', (['bgl.GL_FRONT_AND_BACK', 'bgl.GL_FILL'], {}), '(bgl.GL_FRONT_AND_BACK, bgl.GL_FILL)\n', (10349, 10385), False, 'import bgl\n'), ((10394, 10422), 'bgl.glDepthMask', 'bgl.glDepthMask', (['bgl.GL_TRUE'], {}), '(bgl.GL_TRUE)\n', (10409, 10422), False, 'import bgl\n'), ((10431, 10463), 'bgl.glDisable', 'bgl.glDisable', (['bgl.GL_DEPTH_TEST'], {}), '(bgl.GL_DEPTH_TEST)\n', (10444, 10463), False, 'import bgl\n'), ((10571, 10664), 'gpu_extras.batch.batch_for_shader', 'batch_for_shader', (['self.fill_shader', '"""TRIS"""', "{'pos': self.vertices}"], {'indices': 'self.indices'}), "(self.fill_shader, 'TRIS', {'pos': self.vertices}, indices=\n self.indices)\n", (10587, 10664), False, 'from gpu_extras.batch import batch_for_shader\n'), ((10741, 10826), 'gpu_extras.batch.batch_for_shader', 'batch_for_shader', (['self.simple_line_shader', '"""LINES"""', "{'pos': self.edges_vertices}"], {}), "(self.simple_line_shader, 'LINES', {'pos': self.edges_vertices}\n )\n", (10757, 10826), False, 'from gpu_extras.batch import batch_for_shader\n'), ((10884, 10990), 'gpu_extras.batch.batch_for_shader', 'batch_for_shader', (['self.line_shader', '"""LINES"""', "{'pos': self.edges_vertices, 'texCoord': self._edges_uvs}"], {}), "(self.line_shader, 'LINES', {'pos': self.edges_vertices,\n 'texCoord': self._edges_uvs})\n", (10900, 10990), False, 'from gpu_extras.batch import batch_for_shader\n'), ((11327, 11370), 'gpu.shader.from_builtin', 'gpu.shader.from_builtin', (['"""3D_UNIFORM_COLOR"""'], {}), "('3D_UNIFORM_COLOR')\n", (11350, 11370), False, 'import gpu\n'), ((12165, 12191), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int'}), '([], dtype=np.int)\n', (12173, 12191), True, 'import numpy as np\n'), ((3313, 3383), 'bpy.types.SpaceView3D.draw_handler_remove', 'bpy.types.SpaceView3D.draw_handler_remove', (['self.draw_handler', '"""WINDOW"""'], {}), "(self.draw_handler, 'WINDOW')\n", (3354, 3383), False, 'import bpy\n'), ((7184, 7282), 'bpy.data.images.new', 'bpy.data.images.new', (['image_name'], {'width': 'size[1]', 'height': 'size[0]', 'alpha': '(True)', 'float_buffer': '(False)'}), '(image_name, width=size[1], height=size[0], alpha=True,\n float_buffer=False)\n', (7203, 7282), False, 'import bpy\n'), ((9686, 9722), 'bgl.glActiveTexture', 'bgl.glActiveTexture', (['bgl.GL_TEXTURE0'], {}), '(bgl.GL_TEXTURE0)\n', (9705, 9722), False, 'import bgl\n'), ((9739, 9801), 'bgl.glBindTexture', 'bgl.glBindTexture', (['bgl.GL_TEXTURE_2D', 'wireframe_image.bindcode'], {}), '(bgl.GL_TEXTURE_2D, wireframe_image.bindcode)\n', (9756, 9801), False, 'import bgl\n'), ((11841, 11885), 'numpy.array', 'np.array', (['obj.matrix_world'], {'dtype': 'np.float32'}), '(obj.matrix_world, dtype=np.float32)\n', (11849, 11885), True, 'import numpy as np\n')] |
"""
Modules contains visibility related classes.
This contains classes to hold general visibilities and specialised classes hold visibilities from
certain spacecraft or instruments
"""
from datetime import datetime
import astropy.units as u
import numpy as np
from astropy.table import Table
from sunpy.io.fits import fits
from sunpy.map import Map
from sunpy.time import parse_time
from .transform import dft_map, idft_map
__all__ = ['Visibility', 'RHESSIVisibility']
class Visibility(object):
r"""
Hold a set of related visibilities and information.
Attributes
----------
uv : `numpy.ndarray`
Array of 2xN u, v coordinates where visibilities will be evaluated
vis : `numpy.ndarray`
Array of N complex visibilities at coordinates in `uv`
xyoffset : `float` (x, y), optional
The offset x, y offset of phase center
pixel_size : `float` (dx, dy), optional
Pixel size in x and y directions
Methods
-------
Examples
--------
Notes
-----
"""
# TODO should really ensure vis has units to photons cm^-1 s^1 etc
@u.quantity_input(uv=1/u.arcsec, center=u.arcsec, pixel_size=u.arcsec)
def __init__(self, uv, vis, xyoffset=(0., 0.) * u.arcsec, pixel_size=(1., 1.) * u.arcsec):
r"""
Initialise a new Visibility object.
Parameters
----------
uv : `numpy.ndarray`
Array of 2xN u, v coordinates where visibilities will be evaluated
vis : `numpy.ndarray`
The complex visibilities
xyoffset : `tuple` (x-center, y-center), optional
The offset x, y offset of phase center
pixel_size : `tuple` (x-size, y-size)
Pixel in the given direction (x, y)
"""
self.uv = uv
self.vis = np.array(vis, dtype=complex)
self.xyoffset = xyoffset
self.pixel_size = pixel_size
def __repr__(self):
r"""
Return a printable representation of the visibility.
Returns
-------
`str`
"""
return f"{self.uv.size}, {self.vis}"
def __eq__(self, other):
r"""
Equality for Visibility class
Parameters
----------
other : `Visibility`
The other visibility to compare
Returns
-------
`boolean`
"""
props_equal = []
for key in self.__dict__.keys():
props_equal.append(np.array_equal(self.__dict__[key], other.__dict__[key]))
if all(props_equal):
return True
else:
return False
@classmethod
def from_fits_file(cls, filename):
r"""
Create a new visibility object from a fits file.
Parameters
----------
filename : `basestring`
The path/filename of the the fits file to read
Returns
-------
`Visibility`
The new visibility object
Raises
------
TypeError
If the fits file is not from a supported instrument
"""
with fits.open(filename) as hdu_list:
primary_header = hdu_list[0].header
if primary_header.get('source') == 'xrayvision':
return Visibility.from_fits(hdu_list)
elif primary_header.get('TELESCOP') == 'RHESSI' and \
primary_header.get('INSTRUME') == 'RHESSI':
return RHESSIVisibility.from_fits_old(hdu_list=hdu_list)
else:
raise TypeError("This type of fits visibility file is not supported")
@classmethod
def from_fits(cls, hdu_list):
"""
Parameters
----------
hdu_list
Returns
-------
"""
vis_hdu = hdu_list[1]
spatial_unit = u.Unit(vis_hdu.header.get('unit', 'arcsec'))
xyoffset = np.unique(vis_hdu.data['xyoffset'], axis=0)
pixel_size = np.unique(vis_hdu.data['pixel_size'], axis=0)
return Visibility(vis_hdu.data['uv'].T / spatial_unit, vis_hdu.data['vis'].T,
xyoffset.flatten() * spatial_unit, pixel_size.flatten() * spatial_unit)
@classmethod
@u.quantity_input(center=u.arcsec, pixel_size=u.arcsec)
def from_image(cls, image, uv, center=(0.0, 0.0) * u.arcsec, pixel_size=(1.0, 1.0) * u.arcsec):
r"""
Create a new Visibility object from the given image array.
Parameters
----------
image : `numpy.ndarray`
The 2D input image
uv : `numpy.ndarray`
Array of 2xN u, v coordinates where the visibilities will be evaluated
center : `float` (x, y)
The coordinates of the center of the image
pixel_size : `float` (dx, dy)
The pixel size in x and y directions
Returns
-------
`Visibility`
The new visibility object
"""
vis = dft_map(image, uv, center=center, pixel_size=pixel_size)
return Visibility(uv, vis, center, pixel_size)
@classmethod
@u.quantity_input(uv=1 / u.arcsec)
def from_map(cls, inmap, uv):
r"""
Create a new Visibility object from the given map.
Parameters
----------
inmap : `sunpy.map.Map`
The input map
uv : `numpy.ndarray`
Array of 2xN u, v coordinates where the visibilities will be evaluated
Returns
-------
`Visibility`
The new visibility object
"""
meta = inmap.meta
new_pos = np.array([0., 0.])
if "crval1" in meta:
new_pos[0] = float(meta["crval1"])
if "crval2" in meta:
new_pos[1] = float(meta["crval2"])
new_psize = np.array([1., 1.])
if "cdelt1" in meta:
new_psize[0] = float(meta["cdelt1"])
if "cdelt2" in meta:
new_psize[1] = float(meta["cdelt2"])
return cls.from_image(inmap.data, uv, center=new_pos * u.arcsec,
pixel_size=new_psize * u.arcsec)
@u.quantity_input(center=u.arcsec, pixel_size=u.arcsec)
def to_image(self, shape, center=[0., 0.]*u.arcsec, pixel_size=None):
r"""
Create a image by performing a back projection or inverse transform on the visibilities.
Parameters
----------
shape : `int`
Shape of the output image to create (m, n)
center : `float`, (x, y)
Coordinates of the map center if given will override `self.xyoffset`
pixel_size : `float` (dx, dy), optional
Size of the pixels in x, y if only one give assumed same in both directions will \
override `self.pixel_size`
Returns
-------
`numpy.ndarray`
Output image
"""
pixel = self.pixel_size
if pixel_size:
if pixel_size.ndim == 0:
pixel = pixel_size.repeat(2)
elif pixel_size.ndim == 1 and pixel_size.size == 2:
pixel = pixel_size
else:
raise ValueError(
f"Pixel_size must be scalar or of length of 2 not {pixel_size.shape}") # noqa
return idft_map(self.vis, shape, self.uv, center=center, pixel_size=pixel)
@u.quantity_input(center=u.arcsec, pixel_size=u.arcsec)
def to_map(self, shape=(33, 33), center=None, pixel_size=None):
r"""
Create a map by performing a back projection or inverse transform on the visibilities.
Parameters
----------
shape : `int` (m, n)
Shape of the output map in pixels
center : `float` (x, y)
Coordinates of the map center if given will override `self.xyoffset`
pixel_size : `float` (dx, dy), optional
Size of the pixels in x, y if only one give assumed same in both directions
Returns
-------
`sunpy.map.Map`
Map object with the map created from the visibilities and the meta data will contain the
offset and the pixel size
"""
header = {'crval1': self.xyoffset[0, 0].value if self.xyoffset.ndim == 2
else self.xyoffset[0].value,
'crval2': self.xyoffset[0, 1].value if self.xyoffset.ndim == 2
else self.xyoffset[1].value,
'cdelt1': self.pixel_size[0].value,
'cdelt2': self.pixel_size[1].value,
'ctype1': 'HPLN-TAN',
'ctype2': 'HPLT-TAN',
'naxis': 2,
'naxis1': shape[0],
'naxis2': shape[1]}
if center:
header['crval1'] = center[0].value
header['crval2'] = center[1].value
if pixel_size:
if pixel_size.ndim == 0:
header['cdelt1'] = pixel_size.value
header['cdelt2'] = pixel_size.value
elif pixel_size.ndim == 1 and pixel_size.size == 2:
header['cdelt1'] = pixel_size[0].value
header['cdelt2'] = pixel_size[1].value
else:
raise ValueError(f"pixel_size can have a length of 1 or 2 not {pixel_size.shape}")
data = self.to_image(shape, pixel_size=pixel_size)
return Map((data, header))
def to_fits_file(self, path):
"""
Write the visibilities to a fits file.
Parameters
----------
path : 'basestr'
Path to fits file
Returns
-------
"""
primary_hdu = fits.PrimaryHDU()
primary_hdu.header['source'] = 'xrayvision'
vis_table = Table([self.uv.value.T, self.vis,
np.repeat([self.xyoffset.value], self.vis.shape, axis=0),
np.repeat([self.pixel_size.value], self.vis.shape, axis=0)],
names=('uv', 'vis', 'xyoffset', 'pixel_size'))
vis_hdu = fits.BinTableHDU.from_columns(fits.ColDefs(vis_table.as_array()))
if self.uv.unit.bases == self.xyoffset.unit.bases == self.pixel_size.unit.bases:
vis_hdu.header.set('unit', str(self.uv.unit.bases[0]))
else:
raise ValueError(f'Units must have the same base unit uv: {self.uv.unit}, xyoffset: '
f'{self.xyoffset.unit}, pixel_size: {self.pixel_size.unit}')
hdul = fits.HDUList([primary_hdu, vis_hdu])
try:
hdul.writeto(path)
except Exception as e:
raise e
class RHESSIVisibility(Visibility):
"""
A set of RHESSI visibilities.
Parameters
----------
uv : `numpy.ndarray`
The u, v coordinates of the visibilities
vis : `numpy.ndarray`
The complex visibility
isc : `int based array-like`
Related to the grid/detector
harm : `int`
Harmonic used
erange : `numpy.ndarray`
Energy range
trange : `numpy.ndarray`
Time range
totflux : `numpy.ndarray`
Total flux
sigamp : `numpy.ndarray`
Sigma or error on visibility
chi2 : `numpy.ndarray`
Chi squared from fit
xyoffset : `np.ndarray`
Offset from Sun centre
type : `str`
count, photon, electron
units : `str`
If it is in idl format it will be converted
atten_state : `int`
State of the attenuator
count : `numpy.ndarray`
detector counts
pixel_size : `array-like`
size of a pixel in arcseconds
Examples
--------
Notes
-----
"""
# For single time and energy ranges these data columns should be constant
CONSTANT_DATA_COLUMNS = ['harm', 'erange', 'trange', 'xyoffset', 'type', 'units',
'atten_state', 'norm_ph_factor']
DYANMIC_DATA_COLUMNS = ['isc', 'u', 'v', 'obsvis', 'totflux', 'sigamp', 'chi2', 'count']
COLUMN_DEFS = {'ATTEN_STATE': 'I', 'CHI2': 'E', 'COUNT': 'E', 'ERANGE': '2E', 'HARM': 'I',
'ISC': 'I', 'NORM_PH_FACTOR': 'E', 'OBSVIS': 'C', 'SIGAMP': 'E', 'TOTFLUX': 'E',
'TRANGE': '2D', 'TYPE': '6A', 'U': 'E', 'UNITS': '24A', 'V': 'E',
'XYOFFSET': '2E'}
def __init__(self, uv, vis, isc=None, harm: int = 1,
erange: np.array = np.array([0.0, 0.0]),
trange: np.array = np.array([datetime.now(), datetime.now()]),
totflux=None, sigamp=None, chi2=None,
xyoffset: np.array = np.array([0.0, 0.0]),
type: str = "photon",
units: str = "Photons cm!u-2!n s!u-1!n",
atten_state: int = 1, count=None,
pixel_size: np.array = np.array([1.0, 1.0])*u.arcsec,
norm_ph_factor=0,
*, meta):
r"""
Initialise a new RHESSI visibility.
Parameters
----------
uv
vis
isc
harm
erange
trange
totflux
sigamp
chi2
xyoffset
type
units
atten_state
count
pixel_size
norm_ph_factor
"""
super().__init__(uv=uv, vis=vis, xyoffset=xyoffset, pixel_size=pixel_size)
if isc is None:
self.isc = np.zeros(vis.shape)
else:
self.isc = isc
self.harm = harm
self.erange = erange
self.trange = trange
if totflux is None:
self.totflux = np.zeros(vis.shape)
else:
self.totflux = totflux
if sigamp is None:
self.sigamp = np.zeros(vis.shape)
else:
self.sigamp = sigamp
if chi2 is None:
self.chi2 = np.zeros(vis.shape)
else:
self.chi2 = chi2
self.type = type
self.units = units
self.atten_state = atten_state
if count is None:
self.count = np.zeros(vis.shape)
else:
self.count = count
self.norm_ph_factor = norm_ph_factor
self.meta = meta
@staticmethod
def exists_and_unique(hdu, column, indices):
"""
Check if the data column exits have the same value for all indices
Parameters
----------
hdu : `astropy.io.fits.BinTableHDU` header data unit
HDU to check
column : `str`
The data column name
indices : `list`
Returns
-------
Raises
------
"""
if column.casefold() in [name.casefold() for name in hdu.data.columns.names]:
column = column.lower()
if np.all(hdu.data[column][indices] == hdu.data[column][indices[0]]):
return hdu.data[column][indices[0]]
else:
raise ValueError(f"Column: {column} was not constant")
else:
raise ValueError(f"Column: {column} does not exist")
@staticmethod
def convert_units_to_tex(string: str):
"""
Convert from idl format to latex, if it already is there will be no conversation.
Parameters
----------
string : `str`
The IDL format string to be converted
Returns
-------
`str`
The LATEX equivalent of the IDL format string
Examples
--------
Notes
-----
"""
final_string = ""
opened = 0
check_for_instruction = False
for i in range(len(string)):
if check_for_instruction:
if string[i] == 'n':
final_string += opened * "}"
opened = 0
elif string[i] == 'u':
final_string += "^{"
opened += 1
elif string[i] == 's':
final_string += "_{"
opened += 1
check_for_instruction = False
elif string[i] == '!':
check_for_instruction = True
else:
final_string += string[i]
final_string += opened * "}"
return final_string
@classmethod
def from_fits(cls, hdu_list):
"""
Parameters
----------
hdu_list
Returns
-------
"""
for hdu in hdu_list:
if hdu.name == "VISIBILITY":
rhessi_columns = cls.COLUMN_DEFS.copy()
[rhessi_columns.pop(x) for x in ('U', 'V', 'OBSVIS')]
data = {}
for prop, _ in rhessi_columns.items():
if prop.casefold() in ['xyoffset', 'pixel_size']:
data[prop.casefold()] = hdu.data[prop] * u.arcsec
else:
data[prop.casefold()] = hdu.data[prop]
data['meta'] = hdu_list[0].header
return RHESSIVisibility(uv=np.vstack((hdu.data['u']*-1.0,
hdu.data['v']*-1.0))/u.arcsec,
vis=hdu.data['obsvis'], **data)
raise ValueError('Fits HDUs did not contain visibility extension')
@classmethod
def from_fits_old(cls, hdu_list):
"""
Create RHESSIVisibility from compatible fits hdus.
Parameters
----------
hdu_list : `list`
List of RHESSI visibility hdus
Returns
-------
`list`
A list of `RHESSIVisibilty`
Examples
--------
Notes
-----
It separates the Visibility data based on the time and energy
ranges.
"""
for hdu in hdu_list:
if hdu.name == "VISIBILITY":
energy_ranges = hdu.data["erange"]
unique_energy_ranges = np.unique(energy_ranges, axis=0)
time_ranges = hdu.data["trange"]
unique_time_ranges = np.unique(time_ranges, axis=0)
visibilities = np.zeros((unique_time_ranges.shape[0],
unique_energy_ranges.shape[0]), dtype=object)
# Creating the RHESSIVisibilities
for i, time_range in enumerate(unique_time_ranges):
for j, energy_range in enumerate(unique_energy_ranges):
indices = np.argwhere((time_ranges[:, 0] == time_range[0]) &
(time_ranges[:, 1] == time_range[1]) &
(energy_ranges[:, 0] == energy_range[0]) &
(energy_ranges[:, 1] == energy_range[1])).reshape(-1)
static = {name: cls.exists_and_unique(hdu, name, indices)
for name in cls.CONSTANT_DATA_COLUMNS}
static['meta'] = hdu_list[0].header
static['xyoffset'] = static['xyoffset'] * u.arcsec
dynamic = {name: hdu.data[name][indices] for name in
cls.DYANMIC_DATA_COLUMNS if name not in ['u', 'v', 'obsvis']}
cur_vis = RHESSIVisibility(uv=np.vstack((hdu.data['u'][indices] * -1.0,
hdu.data['v'][indices] * -1.0)) / u.arcsec,
vis=hdu.data['obsvis'][indices],
**{**static, **dynamic})
visibilities[i, j] = cur_vis
if visibilities.size == 1:
return visibilities[0, 0]
else:
# return RHESSIVisibilityList(visibilities)
return visibilities
def to_map(self, shape=(33, 33), center=None, pixel_size=None):
map = super().to_map(shape=shape, center=center, pixel_size=pixel_size)
map.meta['wavelnth'] = self.erange
map.meta['date_obs'] = parse_time(self.trange[0])
map.meta['date-obs'] = parse_time(self.trange[0])
map.meta['date_end'] = parse_time(self.trange[1])
for key, value in self.meta.items():
if key.casefold() not in map.meta:
map.meta[key.casefold()] = value
return map
def to_fits_file(self, path):
"""
Write the visibility to a fits file.
Parameters
----------
path
Returns
-------
"""
# TODO Bit hacky need a better appraoch if the file orginally came from RHESSI should keep
# all the orgial hdus for later writing back to fits. If a new file need to figure out what
# minimal required headers and extensions are.
primary_hdu = fits.PrimaryHDU()
primary_hdu.header = self.meta
modifed_colums = ('U', 'V', 'OBSVIS')
orig_columns = self.COLUMN_DEFS.copy()
modifed_colum_formats = [orig_columns.pop(x) for x in modifed_colums]
fits_columns = []
for name, format in orig_columns.items():
value = getattr(self, name.casefold())
if name.casefold() in self.CONSTANT_DATA_COLUMNS:
value = np.tile(value, (self.vis.size, 1))
fits_columns.append(fits.Column(name=name, array=value,
format=format))
fits_columns.append(fits.Column(name='U', array=self.uv[0, :]*-1.0,
format=modifed_colum_formats[0]))
fits_columns.append(fits.Column(name='V', array=self.uv[1, :]*-1.0,
format=modifed_colum_formats[1]))
fits_columns.append(fits.Column(name='OBSVIS', array=self.vis,
format=modifed_colum_formats[2]))
vis_hdu = fits.BinTableHDU.from_columns(fits.ColDefs(fits_columns))
hdu_list = fits.HDUList([primary_hdu, vis_hdu])
hdu_list[1].name = 'VISIBILITY'
hdu_list.writeto(path)
| [
"sunpy.io.fits.fits.open",
"sunpy.map.Map",
"astropy.units.quantity_input",
"sunpy.io.fits.fits.Column",
"numpy.zeros",
"sunpy.io.fits.fits.PrimaryHDU",
"numpy.all",
"datetime.datetime.now",
"numpy.argwhere",
"numpy.array",
"numpy.tile",
"sunpy.io.fits.fits.ColDefs",
"numpy.array_equal",
"... | [((1117, 1188), 'astropy.units.quantity_input', 'u.quantity_input', ([], {'uv': '(1 / u.arcsec)', 'center': 'u.arcsec', 'pixel_size': 'u.arcsec'}), '(uv=1 / u.arcsec, center=u.arcsec, pixel_size=u.arcsec)\n', (1133, 1188), True, 'import astropy.units as u\n'), ((4215, 4269), 'astropy.units.quantity_input', 'u.quantity_input', ([], {'center': 'u.arcsec', 'pixel_size': 'u.arcsec'}), '(center=u.arcsec, pixel_size=u.arcsec)\n', (4231, 4269), True, 'import astropy.units as u\n'), ((5094, 5127), 'astropy.units.quantity_input', 'u.quantity_input', ([], {'uv': '(1 / u.arcsec)'}), '(uv=1 / u.arcsec)\n', (5110, 5127), True, 'import astropy.units as u\n'), ((6101, 6155), 'astropy.units.quantity_input', 'u.quantity_input', ([], {'center': 'u.arcsec', 'pixel_size': 'u.arcsec'}), '(center=u.arcsec, pixel_size=u.arcsec)\n', (6117, 6155), True, 'import astropy.units as u\n'), ((7327, 7381), 'astropy.units.quantity_input', 'u.quantity_input', ([], {'center': 'u.arcsec', 'pixel_size': 'u.arcsec'}), '(center=u.arcsec, pixel_size=u.arcsec)\n', (7343, 7381), True, 'import astropy.units as u\n'), ((1812, 1840), 'numpy.array', 'np.array', (['vis'], {'dtype': 'complex'}), '(vis, dtype=complex)\n', (1820, 1840), True, 'import numpy as np\n'), ((3897, 3940), 'numpy.unique', 'np.unique', (["vis_hdu.data['xyoffset']"], {'axis': '(0)'}), "(vis_hdu.data['xyoffset'], axis=0)\n", (3906, 3940), True, 'import numpy as np\n'), ((3962, 4007), 'numpy.unique', 'np.unique', (["vis_hdu.data['pixel_size']"], {'axis': '(0)'}), "(vis_hdu.data['pixel_size'], axis=0)\n", (3971, 4007), True, 'import numpy as np\n'), ((5591, 5611), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (5599, 5611), True, 'import numpy as np\n'), ((5783, 5803), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (5791, 5803), True, 'import numpy as np\n'), ((9326, 9345), 'sunpy.map.Map', 'Map', (['(data, header)'], {}), '((data, header))\n', (9329, 9345), False, 'from sunpy.map import Map\n'), ((9602, 9619), 'sunpy.io.fits.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (9617, 9619), False, 'from sunpy.io.fits import fits\n'), ((10432, 10468), 'sunpy.io.fits.fits.HDUList', 'fits.HDUList', (['[primary_hdu, vis_hdu]'], {}), '([primary_hdu, vis_hdu])\n', (10444, 10468), False, 'from sunpy.io.fits import fits\n'), ((12327, 12347), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (12335, 12347), True, 'import numpy as np\n'), ((12522, 12542), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (12530, 12542), True, 'import numpy as np\n'), ((20012, 20038), 'sunpy.time.parse_time', 'parse_time', (['self.trange[0]'], {}), '(self.trange[0])\n', (20022, 20038), False, 'from sunpy.time import parse_time\n'), ((20070, 20096), 'sunpy.time.parse_time', 'parse_time', (['self.trange[0]'], {}), '(self.trange[0])\n', (20080, 20096), False, 'from sunpy.time import parse_time\n'), ((20128, 20154), 'sunpy.time.parse_time', 'parse_time', (['self.trange[1]'], {}), '(self.trange[1])\n', (20138, 20154), False, 'from sunpy.time import parse_time\n'), ((20785, 20802), 'sunpy.io.fits.fits.PrimaryHDU', 'fits.PrimaryHDU', ([], {}), '()\n', (20800, 20802), False, 'from sunpy.io.fits import fits\n'), ((21937, 21973), 'sunpy.io.fits.fits.HDUList', 'fits.HDUList', (['[primary_hdu, vis_hdu]'], {}), '([primary_hdu, vis_hdu])\n', (21949, 21973), False, 'from sunpy.io.fits import fits\n'), ((3111, 3130), 'sunpy.io.fits.fits.open', 'fits.open', (['filename'], {}), '(filename)\n', (3120, 3130), False, 'from sunpy.io.fits import fits\n'), ((12732, 12752), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (12740, 12752), True, 'import numpy as np\n'), ((13306, 13325), 'numpy.zeros', 'np.zeros', (['vis.shape'], {}), '(vis.shape)\n', (13314, 13325), True, 'import numpy as np\n'), ((13505, 13524), 'numpy.zeros', 'np.zeros', (['vis.shape'], {}), '(vis.shape)\n', (13513, 13524), True, 'import numpy as np\n'), ((13627, 13646), 'numpy.zeros', 'np.zeros', (['vis.shape'], {}), '(vis.shape)\n', (13635, 13646), True, 'import numpy as np\n'), ((13743, 13762), 'numpy.zeros', 'np.zeros', (['vis.shape'], {}), '(vis.shape)\n', (13751, 13762), True, 'import numpy as np\n'), ((13948, 13967), 'numpy.zeros', 'np.zeros', (['vis.shape'], {}), '(vis.shape)\n', (13956, 13967), True, 'import numpy as np\n'), ((14662, 14727), 'numpy.all', 'np.all', (['(hdu.data[column][indices] == hdu.data[column][indices[0]])'], {}), '(hdu.data[column][indices] == hdu.data[column][indices[0]])\n', (14668, 14727), True, 'import numpy as np\n'), ((21421, 21508), 'sunpy.io.fits.fits.Column', 'fits.Column', ([], {'name': '"""U"""', 'array': '(self.uv[0, :] * -1.0)', 'format': 'modifed_colum_formats[0]'}), "(name='U', array=self.uv[0, :] * -1.0, format=\n modifed_colum_formats[0])\n", (21432, 21508), False, 'from sunpy.io.fits import fits\n'), ((21572, 21659), 'sunpy.io.fits.fits.Column', 'fits.Column', ([], {'name': '"""V"""', 'array': '(self.uv[1, :] * -1.0)', 'format': 'modifed_colum_formats[1]'}), "(name='V', array=self.uv[1, :] * -1.0, format=\n modifed_colum_formats[1])\n", (21583, 21659), False, 'from sunpy.io.fits import fits\n'), ((21723, 21798), 'sunpy.io.fits.fits.Column', 'fits.Column', ([], {'name': '"""OBSVIS"""', 'array': 'self.vis', 'format': 'modifed_colum_formats[2]'}), "(name='OBSVIS', array=self.vis, format=modifed_colum_formats[2])\n", (21734, 21798), False, 'from sunpy.io.fits import fits\n'), ((21889, 21915), 'sunpy.io.fits.fits.ColDefs', 'fits.ColDefs', (['fits_columns'], {}), '(fits_columns)\n', (21901, 21915), False, 'from sunpy.io.fits import fits\n'), ((2473, 2528), 'numpy.array_equal', 'np.array_equal', (['self.__dict__[key]', 'other.__dict__[key]'], {}), '(self.__dict__[key], other.__dict__[key])\n', (2487, 2528), True, 'import numpy as np\n'), ((9753, 9809), 'numpy.repeat', 'np.repeat', (['[self.xyoffset.value]', 'self.vis.shape'], {'axis': '(0)'}), '([self.xyoffset.value], self.vis.shape, axis=0)\n', (9762, 9809), True, 'import numpy as np\n'), ((9838, 9896), 'numpy.repeat', 'np.repeat', (['[self.pixel_size.value]', 'self.vis.shape'], {'axis': '(0)'}), '([self.pixel_size.value], self.vis.shape, axis=0)\n', (9847, 9896), True, 'import numpy as np\n'), ((12395, 12409), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12407, 12409), False, 'from datetime import datetime\n'), ((12411, 12425), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (12423, 12425), False, 'from datetime import datetime\n'), ((17838, 17870), 'numpy.unique', 'np.unique', (['energy_ranges'], {'axis': '(0)'}), '(energy_ranges, axis=0)\n', (17847, 17870), True, 'import numpy as np\n'), ((17957, 17987), 'numpy.unique', 'np.unique', (['time_ranges'], {'axis': '(0)'}), '(time_ranges, axis=0)\n', (17966, 17987), True, 'import numpy as np\n'), ((18020, 18108), 'numpy.zeros', 'np.zeros', (['(unique_time_ranges.shape[0], unique_energy_ranges.shape[0])'], {'dtype': 'object'}), '((unique_time_ranges.shape[0], unique_energy_ranges.shape[0]),\n dtype=object)\n', (18028, 18108), True, 'import numpy as np\n'), ((21228, 21262), 'numpy.tile', 'np.tile', (['value', '(self.vis.size, 1)'], {}), '(value, (self.vis.size, 1))\n', (21235, 21262), True, 'import numpy as np\n'), ((21296, 21346), 'sunpy.io.fits.fits.Column', 'fits.Column', ([], {'name': 'name', 'array': 'value', 'format': 'format'}), '(name=name, array=value, format=format)\n', (21307, 21346), False, 'from sunpy.io.fits import fits\n'), ((16929, 16984), 'numpy.vstack', 'np.vstack', (["(hdu.data['u'] * -1.0, hdu.data['v'] * -1.0)"], {}), "((hdu.data['u'] * -1.0, hdu.data['v'] * -1.0))\n", (16938, 16984), True, 'import numpy as np\n'), ((18375, 18558), 'numpy.argwhere', 'np.argwhere', (['((time_ranges[:, 0] == time_range[0]) & (time_ranges[:, 1] == time_range[1]\n ) & (energy_ranges[:, 0] == energy_range[0]) & (energy_ranges[:, 1] ==\n energy_range[1]))'], {}), '((time_ranges[:, 0] == time_range[0]) & (time_ranges[:, 1] ==\n time_range[1]) & (energy_ranges[:, 0] == energy_range[0]) & (\n energy_ranges[:, 1] == energy_range[1]))\n', (18386, 18558), True, 'import numpy as np\n'), ((19222, 19295), 'numpy.vstack', 'np.vstack', (["(hdu.data['u'][indices] * -1.0, hdu.data['v'][indices] * -1.0)"], {}), "((hdu.data['u'][indices] * -1.0, hdu.data['v'][indices] * -1.0))\n", (19231, 19295), True, 'import numpy as np\n')] |
from MCEq.misc import info
import six
import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis
class NRLMSISE00Base(object):
def __init__(self):
# Cache altitude value of last call
self.last_alt = None
self.inp = cmsis.nrlmsise_input()
self.output = cmsis.nrlmsise_output()
self.flags = cmsis.nrlmsise_flags()
self.month2doy = {
'January': 1,
'February': 32,
'March': 60,
'April': 91,
'May': 121,
'June': 152,
'July': 182,
'August': 213,
'September': 244,
'October': 274,
'November': 305,
'December': 335
}
# Longitude, latitude, height
self.locations = {
'SouthPole': (0., -90., 2834. * 100.),
'Karlsruhe': (8.4, 49., 110. * 100.),
'Geneva': (6.1, 46.2, 370. * 100.),
'Tokyo': (139., 35., 5. * 100.),
'SanGrasso': (13.5, 42.4, 5. * 100.),
'TelAviv': (34.8, 32.1, 5. * 100.),
'KSC': (-80.7, 32.1, 5. * 100.),
'SoudanMine': (-92.2, 47.8, 5. * 100.),
'Tsukuba': (140.1, 36.2, 30. * 100.),
'LynnLake': (-101.1, 56.9, 360. * 100.),
'PeaceRiver': (-117.2, 56.15, 36000. * 100.),
'FtSumner': (-104.2, 34.5, 31000. * 100.)
}
self.daytimes = {'day': 43200., 'night': 0.}
self.current_location = 'SouthPole'
self.init_default_values()
def surface_vert_depth(self, loc='SouthPole', month='June'):
self.set_location('SouthPole')
self.set_season('June')
def height2depth(self, altitude_cm):
from scipy.integrate import quad
return quad(self.get_density, altitude_cm, 112.8 * 1e5,
epsrel=0.001)[0]
def _retrieve_result(self, *args, **kwargs):
"""Calls NRLMSISE library's main function"""
raise Exception('Not implemented for the base class')
def get_temperature(self, altitude_cm):
"""Returns temperature in K"""
self._retrieve_result(altitude_cm)
return self.output.t[1]
def get_density(self, altitude_cm):
"""Returns density in g/cm^3"""
self._retrieve_result(altitude_cm)
return self.output.d[5]
class cNRLMSISE00(NRLMSISE00Base):
def init_default_values(self):
"""Sets default to June at South Pole"""
self.inp.doy = cmsis.c_int(self.month2doy['June']) # Day of year
self.inp.year = cmsis.c_int(0) # No effect
self.inp.sec = cmsis.c_double(self.daytimes['day']) # 12:00
self.inp.alt = cmsis.c_double(self.locations[self.current_location][2])
self.inp.g_lat = cmsis.c_double(
self.locations[self.current_location][1])
self.inp.g_long = cmsis.c_double(
self.locations[self.current_location][0])
self.inp.lst = cmsis.c_double(self.inp.sec.value / 3600. +
self.inp.g_long.value / 15.)
# Do not touch this except you know what you are doing
self.inp.f107A = cmsis.c_double(150.)
self.inp.f107 = cmsis.c_double(150.)
self.inp.ap = cmsis.c_double(4.)
self.inp.ap_a = cmsis.pointer(cmsis.ap_array())
self.alt_surface = self.locations[self.current_location][2]
self.flags.switches[0] = cmsis.c_int(0)
for i in range(1, 24):
self.flags.switches[i] = cmsis.c_int(1)
def set_location(self, tag):
if tag not in list(self.locations):
raise Exception(
"NRLMSISE00::set_location(): Unknown location tag '{0}'.".
format(tag))
self.inp.alt = cmsis.c_double(self.locations[tag][2])
self.set_location_coord(*self.locations[tag][:2])
self.current_location = tag
self.alt_surface = self.locations[self.current_location][2]
def set_location_coord(self, longitude, latitude):
info(5, 'long={0:5.2f}, lat={1:5.2f}'.format(longitude, latitude))
if abs(latitude) > 90 or abs(longitude) > 180:
raise Exception("NRLMSISE00::set_location_coord(): Invalid inp.")
self.inp.g_lat = cmsis.c_double(latitude)
self.inp.g_long = cmsis.c_double(longitude)
def set_season(self, tag):
if tag not in self.month2doy:
raise Exception("NRLMSISE00::set_location(): Unknown season tag.")
info(5, 'Season', tag, 'doy=', self.month2doy[tag])
self.inp.doy = self.month2doy[tag]
def set_doy(self, doy):
if doy < 0 or doy > 365:
raise Exception("NRLMSISE00::set_doy(): Day of year out of range.")
info(5, 'day of year', doy)
self.inp.doy = cmsis.c_int(doy)
def _retrieve_result(self, altitude_cm):
if self.last_alt == altitude_cm:
return
inp = self.inp
inp.alt = cmsis.c_double(altitude_cm / 1e5)
cmsis.msis.gtd7_py(inp.year, inp.doy, inp.sec, inp.alt, inp.g_lat,
inp.g_long, inp.lst, inp.f107A, inp.f107, inp.ap,
inp.ap_a, cmsis.byref(self.flags),
cmsis.byref(self.output))
self.last_alt = altitude_cm
def test():
import numpy as np
import matplotlib.pyplot as plt
msis = cNRLMSISE00()
den = np.vectorize(msis.get_density)
plt.figure(figsize=(16, 5))
plt.suptitle('NRLMSISE-00')
h_vec = np.linspace(0, 112.8 * 1e5, 500)
msis.set_season('January')
msis.set_location('SouthPole')
den_sp_jan = den(h_vec)
msis.set_season('January')
msis.set_location('Karlsruhe')
den_ka_jan = den(h_vec)
plt.subplot(131)
plt.semilogy(h_vec / 1e5, den_sp_jan, label='MSIS00: SP Jan.')
plt.semilogy(h_vec / 1e5, den_ka_jan, label='MSIS00: KA Jan.')
plt.legend()
plt.xlabel('vertical height in km')
plt.ylabel(r'density $\rho(h)$ in g/cm$^3$')
plt.subplot(132)
plt.plot(h_vec / 1e5, den_ka_jan / den_sp_jan, label='MSIS00: KA/SP')
plt.xlabel('vertical height in km')
plt.ylabel(r'density ratio')
plt.legend(loc='upper left')
plt.subplot(133)
msis.set_location('SouthPole')
for i in range(360 / 30):
msis.inp.doy = i * 30
plt.plot(h_vec / 1e5, den(h_vec) / den_sp_jan, label=str(i + 1))
plt.legend(ncol=2, loc=3)
plt.title('MSIS00: SouthPole')
plt.xlabel('vertical height in km')
plt.ylabel(r'$\rho$(Month) / $\rho$(January)')
plt.ylim(ymin=0.6)
plt.tight_layout()
plt.figure(figsize=(6, 5))
h2d = np.vectorize(msis.height2depth)
plt.semilogy(h_vec / 1e5, h2d(h_vec))
plt.ylabel(r'Slant depth X [g/cm$^2$]')
plt.xlabel(r'Atmospheric height $h$ [km]')
plt.subplots_adjust(left=0.15, bottom=0.11)
plt.show()
if __name__ == '__main__':
test()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.suptitle",
"MCEq.misc.info",
"MCEq.geometry.nrlmsise00.nrlmsise00.ap_array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"MCEq.geometry.nrlmsise00.nrlmsise00.byref",
"MCEq.geometry.nrlmsise00.nrlmsise00.nrlmsise_output",
"numpy.linspace... | [((5362, 5392), 'numpy.vectorize', 'np.vectorize', (['msis.get_density'], {}), '(msis.get_density)\n', (5374, 5392), True, 'import numpy as np\n'), ((5398, 5425), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 5)'}), '(figsize=(16, 5))\n', (5408, 5425), True, 'import matplotlib.pyplot as plt\n'), ((5430, 5457), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""NRLMSISE-00"""'], {}), "('NRLMSISE-00')\n", (5442, 5457), True, 'import matplotlib.pyplot as plt\n'), ((5471, 5508), 'numpy.linspace', 'np.linspace', (['(0)', '(112.8 * 100000.0)', '(500)'], {}), '(0, 112.8 * 100000.0, 500)\n', (5482, 5508), True, 'import numpy as np\n'), ((5698, 5714), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(131)'], {}), '(131)\n', (5709, 5714), True, 'import matplotlib.pyplot as plt\n'), ((5719, 5786), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['(h_vec / 100000.0)', 'den_sp_jan'], {'label': '"""MSIS00: SP Jan."""'}), "(h_vec / 100000.0, den_sp_jan, label='MSIS00: SP Jan.')\n", (5731, 5786), True, 'import matplotlib.pyplot as plt\n'), ((5786, 5853), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['(h_vec / 100000.0)', 'den_ka_jan'], {'label': '"""MSIS00: KA Jan."""'}), "(h_vec / 100000.0, den_ka_jan, label='MSIS00: KA Jan.')\n", (5798, 5853), True, 'import matplotlib.pyplot as plt\n'), ((5853, 5865), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5863, 5865), True, 'import matplotlib.pyplot as plt\n'), ((5870, 5905), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""vertical height in km"""'], {}), "('vertical height in km')\n", (5880, 5905), True, 'import matplotlib.pyplot as plt\n'), ((5910, 5954), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""density $\\\\rho(h)$ in g/cm$^3$"""'], {}), "('density $\\\\rho(h)$ in g/cm$^3$')\n", (5920, 5954), True, 'import matplotlib.pyplot as plt\n'), ((5960, 5976), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(132)'], {}), '(132)\n', (5971, 5976), True, 'import matplotlib.pyplot as plt\n'), ((5981, 6055), 'matplotlib.pyplot.plot', 'plt.plot', (['(h_vec / 100000.0)', '(den_ka_jan / den_sp_jan)'], {'label': '"""MSIS00: KA/SP"""'}), "(h_vec / 100000.0, den_ka_jan / den_sp_jan, label='MSIS00: KA/SP')\n", (5989, 6055), True, 'import matplotlib.pyplot as plt\n'), ((6055, 6090), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""vertical height in km"""'], {}), "('vertical height in km')\n", (6065, 6090), True, 'import matplotlib.pyplot as plt\n'), ((6095, 6122), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""density ratio"""'], {}), "('density ratio')\n", (6105, 6122), True, 'import matplotlib.pyplot as plt\n'), ((6128, 6156), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (6138, 6156), True, 'import matplotlib.pyplot as plt\n'), ((6162, 6178), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(133)'], {}), '(133)\n', (6173, 6178), True, 'import matplotlib.pyplot as plt\n'), ((6351, 6376), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'ncol': '(2)', 'loc': '(3)'}), '(ncol=2, loc=3)\n', (6361, 6376), True, 'import matplotlib.pyplot as plt\n'), ((6381, 6411), 'matplotlib.pyplot.title', 'plt.title', (['"""MSIS00: SouthPole"""'], {}), "('MSIS00: SouthPole')\n", (6390, 6411), True, 'import matplotlib.pyplot as plt\n'), ((6416, 6451), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""vertical height in km"""'], {}), "('vertical height in km')\n", (6426, 6451), True, 'import matplotlib.pyplot as plt\n'), ((6456, 6503), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\rho$(Month) / $\\\\rho$(January)"""'], {}), "('$\\\\rho$(Month) / $\\\\rho$(January)')\n", (6466, 6503), True, 'import matplotlib.pyplot as plt\n'), ((6507, 6525), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': '(0.6)'}), '(ymin=0.6)\n', (6515, 6525), True, 'import matplotlib.pyplot as plt\n'), ((6530, 6548), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6546, 6548), True, 'import matplotlib.pyplot as plt\n'), ((6554, 6580), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 5)'}), '(figsize=(6, 5))\n', (6564, 6580), True, 'import matplotlib.pyplot as plt\n'), ((6591, 6622), 'numpy.vectorize', 'np.vectorize', (['msis.height2depth'], {}), '(msis.height2depth)\n', (6603, 6622), True, 'import numpy as np\n'), ((6669, 6707), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Slant depth X [g/cm$^2$]"""'], {}), "('Slant depth X [g/cm$^2$]')\n", (6679, 6707), True, 'import matplotlib.pyplot as plt\n'), ((6713, 6754), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Atmospheric height $h$ [km]"""'], {}), "('Atmospheric height $h$ [km]')\n", (6723, 6754), True, 'import matplotlib.pyplot as plt\n'), ((6760, 6803), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'left': '(0.15)', 'bottom': '(0.11)'}), '(left=0.15, bottom=0.11)\n', (6779, 6803), True, 'import matplotlib.pyplot as plt\n'), ((6808, 6818), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6816, 6818), True, 'import matplotlib.pyplot as plt\n'), ((239, 261), 'MCEq.geometry.nrlmsise00.nrlmsise00.nrlmsise_input', 'cmsis.nrlmsise_input', ([], {}), '()\n', (259, 261), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((284, 307), 'MCEq.geometry.nrlmsise00.nrlmsise00.nrlmsise_output', 'cmsis.nrlmsise_output', ([], {}), '()\n', (305, 307), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((329, 351), 'MCEq.geometry.nrlmsise00.nrlmsise00.nrlmsise_flags', 'cmsis.nrlmsise_flags', ([], {}), '()\n', (349, 351), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((2467, 2502), 'MCEq.geometry.nrlmsise00.nrlmsise00.c_int', 'cmsis.c_int', (["self.month2doy['June']"], {}), "(self.month2doy['June'])\n", (2478, 2502), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((2542, 2556), 'MCEq.geometry.nrlmsise00.nrlmsise00.c_int', 'cmsis.c_int', (['(0)'], {}), '(0)\n', (2553, 2556), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((2593, 2629), 'MCEq.geometry.nrlmsise00.nrlmsise00.c_double', 'cmsis.c_double', (["self.daytimes['day']"], {}), "(self.daytimes['day'])\n", (2607, 2629), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((2662, 2718), 'MCEq.geometry.nrlmsise00.nrlmsise00.c_double', 'cmsis.c_double', (['self.locations[self.current_location][2]'], {}), '(self.locations[self.current_location][2])\n', (2676, 2718), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((2744, 2800), 'MCEq.geometry.nrlmsise00.nrlmsise00.c_double', 'cmsis.c_double', (['self.locations[self.current_location][1]'], {}), '(self.locations[self.current_location][1])\n', (2758, 2800), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((2840, 2896), 'MCEq.geometry.nrlmsise00.nrlmsise00.c_double', 'cmsis.c_double', (['self.locations[self.current_location][0]'], {}), '(self.locations[self.current_location][0])\n', (2854, 2896), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((2933, 3007), 'MCEq.geometry.nrlmsise00.nrlmsise00.c_double', 'cmsis.c_double', (['(self.inp.sec.value / 3600.0 + self.inp.g_long.value / 15.0)'], {}), '(self.inp.sec.value / 3600.0 + self.inp.g_long.value / 15.0)\n', (2947, 3007), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((3132, 3153), 'MCEq.geometry.nrlmsise00.nrlmsise00.c_double', 'cmsis.c_double', (['(150.0)'], {}), '(150.0)\n', (3146, 3153), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((3177, 3198), 'MCEq.geometry.nrlmsise00.nrlmsise00.c_double', 'cmsis.c_double', (['(150.0)'], {}), '(150.0)\n', (3191, 3198), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((3220, 3239), 'MCEq.geometry.nrlmsise00.nrlmsise00.c_double', 'cmsis.c_double', (['(4.0)'], {}), '(4.0)\n', (3234, 3239), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((3397, 3411), 'MCEq.geometry.nrlmsise00.nrlmsise00.c_int', 'cmsis.c_int', (['(0)'], {}), '(0)\n', (3408, 3411), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((3730, 3768), 'MCEq.geometry.nrlmsise00.nrlmsise00.c_double', 'cmsis.c_double', (['self.locations[tag][2]'], {}), '(self.locations[tag][2])\n', (3744, 3768), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((4220, 4244), 'MCEq.geometry.nrlmsise00.nrlmsise00.c_double', 'cmsis.c_double', (['latitude'], {}), '(latitude)\n', (4234, 4244), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((4271, 4296), 'MCEq.geometry.nrlmsise00.nrlmsise00.c_double', 'cmsis.c_double', (['longitude'], {}), '(longitude)\n', (4285, 4296), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((4454, 4505), 'MCEq.misc.info', 'info', (['(5)', '"""Season"""', 'tag', '"""doy="""', 'self.month2doy[tag]'], {}), "(5, 'Season', tag, 'doy=', self.month2doy[tag])\n", (4458, 4505), False, 'from MCEq.misc import info\n'), ((4699, 4726), 'MCEq.misc.info', 'info', (['(5)', '"""day of year"""', 'doy'], {}), "(5, 'day of year', doy)\n", (4703, 4726), False, 'from MCEq.misc import info\n'), ((4750, 4766), 'MCEq.geometry.nrlmsise00.nrlmsise00.c_int', 'cmsis.c_int', (['doy'], {}), '(doy)\n', (4761, 4766), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((4915, 4953), 'MCEq.geometry.nrlmsise00.nrlmsise00.c_double', 'cmsis.c_double', (['(altitude_cm / 100000.0)'], {}), '(altitude_cm / 100000.0)\n', (4929, 4953), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((1756, 1823), 'scipy.integrate.quad', 'quad', (['self.get_density', 'altitude_cm', '(112.8 * 100000.0)'], {'epsrel': '(0.001)'}), '(self.get_density, altitude_cm, 112.8 * 100000.0, epsrel=0.001)\n', (1760, 1823), False, 'from scipy.integrate import quad\n'), ((3277, 3293), 'MCEq.geometry.nrlmsise00.nrlmsise00.ap_array', 'cmsis.ap_array', ([], {}), '()\n', (3291, 3293), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((3480, 3494), 'MCEq.geometry.nrlmsise00.nrlmsise00.c_int', 'cmsis.c_int', (['(1)'], {}), '(1)\n', (3491, 3494), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((5138, 5161), 'MCEq.geometry.nrlmsise00.nrlmsise00.byref', 'cmsis.byref', (['self.flags'], {}), '(self.flags)\n', (5149, 5161), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n'), ((5190, 5214), 'MCEq.geometry.nrlmsise00.nrlmsise00.byref', 'cmsis.byref', (['self.output'], {}), '(self.output)\n', (5201, 5214), True, 'import MCEq.geometry.nrlmsise00.nrlmsise00 as cmsis\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 16 18:15:37 2021
@author: jan
"""
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
import numpy as np
from sklearn.mixture import GaussianMixture
import os
import uuid
class VisualizeData:
def __init__(self,path,tf_id, genome, biosource, chromosome):
"""
Initialize variables and set up directory if necessary
Parameters
----------
path: TYPE: str
Path to results
tf_id : TYPE: str
ID of the transcription factor
Returns
-------
None.
"""
self.chromosome = chromosome
#genome_path = (os.path.join(path, genome))
self.path_plots = (os.path.join(path,'plots',genome ,biosource ,tf_id))
path_scripts = os.path.dirname(__file__)
path_bin = os.path.split(path_scripts)
path_main = os.path.split(path_bin[0])
self.path_visualization = os.path.join(path_main[0], "visualization","src","assets","img")
try:
os.makedirs(self.path_plots)
except:
pass
try:
os.makedirs(self.path_visualization)
except:
pass
def makeArray(self, scores_array):
"""
Parameters
----------
scores_array : TYPE: list of float64
Distribution
Returns
-------
x : TYPE: nparray of float64
Distribution
y : TYPE: nparray of float64
Distribution
"""
x = []
y = []
for i in range(0, len(scores_array)):
v = scores_array[i]
xi = v[0]
yi = v[1]
x.append(xi)
y.append(yi)
np.array(x)
np.array(y)
return x,y
#Make Density Scatter Heatmap
def displayDensityScatter(self,scores_array, tf_id):
"""
Method to illustrate distribution via Density Scatter(Heat-Map)
Parameters
----------
scores_array: TYPE: list of float64 vectors
Returns
-------
Path: TYPE: str
path to plots
"""
x,y = VisualizeData.makeArray(self, scores_array)
# Calculate the point density
xy = np.vstack([x,y])
z = gaussian_kde(xy)(xy)
fig, ax = plt.subplots()
ax.scatter(x, y, c=z, s=50, edgecolors='face')
# ax.set(xlim=(0,100), ylim=(0,100))
plt.xlabel("ATAC")
plt.ylabel("Chip")
# plt.colorbar()
figure_path = self.path_plots + "/DensityScatter_" + tf_id + ".svg"
plt.savefig(figure_path, format="svg")
# plt.show()
return self.path_plots
#Make contourPlot
def contourPlot(self, scores_array, n_cgauss, tf_id):
"""
Method to display distribution via contour-plot
Parameters
----------
scores_array: TYPE: list of float64 vectors
distribution to plot
n_cgauss: TYPE: int
number of componets for a Gasussian Mixture Model
Returns
-------
None.
"""
gmm = GaussianMixture(n_components=n_cgauss)
gmm.fit(scores_array)
x,y = VisualizeData.makeArray(self, scores_array)
# Calculate the point density
xy = np.vstack([x,y])
z = gaussian_kde(xy)(xy)
# Make the plot
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_ylabel('CHIP')
ax.set_xlabel('ATAC')
ax.plot_trisurf(x, y, z, cmap=plt.cm.coolwarm, linewidth=1, antialiased=False)
# ax.plot_surface(x, y, z, color='b')
filename = "Contour_" + tf_id + "_" + str(uuid.uuid4().hex) +".svg"
figure_path = os.path.join(self.path_plots, "Contour_" + tf_id + ".svg")
plt.savefig(figure_path, format="svg")
vil_fig_path = os.path.join(self.path_visualization, filename)
plt.savefig(vil_fig_path, format="svg")
# plt.show()
return z,filename
#Make altitude Plot
def altitudePlot(self, data, n_cgauss, tf_id):
"""
Method to display a distribution via altitude-plot. Lines for altitude
measurments.
Parameters
----------
data: TYPE: list of float64 vectors
distribution to plot
n_cgauss: TYPE: int
number of components
Returns
-------
None.
"""
dist = data *100
gmm = GaussianMixture(n_components=n_cgauss)
gmm.fit(dist)
X, Y = np.meshgrid(np.linspace(start = -1, stop = 100, num = 100), np.linspace(start = -1, stop = 100, num = 100))
XY = np.array([X.ravel(), Y.ravel()]).T
Z = gmm.score_samples(XY)
Z = Z.reshape(100,100)
plt.contour(X,Y,Z)
plt.scatter(dist[:,0], dist[:,1])
figure_path = self.path_plots + "/Altitude_" + tf_id + ".svg"
plt.savefig(figure_path, format= "svg")
#plt.savefig(/visualization/assests/img/, kwargs)
# plt.show()
#FOR TESTING // EXAMPLE SEE BELOW
# if __name__ == '__main__':
# path = "/home/python/"
# tf_id = "1234"
# genome = "genom"
# biosource = "Dito"
# v = VisualizeData(path, tf_id, genome, biosource, "chr1") | [
"matplotlib.pyplot.savefig",
"uuid.uuid4",
"os.makedirs",
"matplotlib.pyplot.scatter",
"os.path.dirname",
"scipy.stats.gaussian_kde",
"sklearn.mixture.GaussianMixture",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.contour",
"numpy.linspace",
"ma... | [((837, 890), 'os.path.join', 'os.path.join', (['path', '"""plots"""', 'genome', 'biosource', 'tf_id'], {}), "(path, 'plots', genome, biosource, tf_id)\n", (849, 890), False, 'import os\n'), ((917, 942), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (932, 942), False, 'import os\n'), ((966, 993), 'os.path.split', 'os.path.split', (['path_scripts'], {}), '(path_scripts)\n', (979, 993), False, 'import os\n'), ((1018, 1044), 'os.path.split', 'os.path.split', (['path_bin[0]'], {}), '(path_bin[0])\n', (1031, 1044), False, 'import os\n'), ((1083, 1150), 'os.path.join', 'os.path.join', (['path_main[0]', '"""visualization"""', '"""src"""', '"""assets"""', '"""img"""'], {}), "(path_main[0], 'visualization', 'src', 'assets', 'img')\n", (1095, 1150), False, 'import os\n'), ((2085, 2096), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2093, 2096), True, 'import numpy as np\n'), ((2109, 2120), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2117, 2120), True, 'import numpy as np\n'), ((2719, 2736), 'numpy.vstack', 'np.vstack', (['[x, y]'], {}), '([x, y])\n', (2728, 2736), True, 'import numpy as np\n'), ((2808, 2822), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2820, 2822), True, 'import matplotlib.pyplot as plt\n'), ((2956, 2974), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ATAC"""'], {}), "('ATAC')\n", (2966, 2974), True, 'import matplotlib.pyplot as plt\n'), ((2987, 3005), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Chip"""'], {}), "('Chip')\n", (2997, 3005), True, 'import matplotlib.pyplot as plt\n'), ((3127, 3165), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figure_path'], {'format': '"""svg"""'}), "(figure_path, format='svg')\n", (3138, 3165), True, 'import matplotlib.pyplot as plt\n'), ((3756, 3794), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'n_cgauss'}), '(n_components=n_cgauss)\n', (3771, 3794), False, 'from sklearn.mixture import GaussianMixture\n'), ((3963, 3980), 'numpy.vstack', 'np.vstack', (['[x, y]'], {}), '([x, y])\n', (3972, 3980), True, 'import numpy as np\n'), ((4076, 4088), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4086, 4088), True, 'import matplotlib.pyplot as plt\n'), ((4472, 4530), 'os.path.join', 'os.path.join', (['self.path_plots', "('Contour_' + tf_id + '.svg')"], {}), "(self.path_plots, 'Contour_' + tf_id + '.svg')\n", (4484, 4530), False, 'import os\n'), ((4543, 4581), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figure_path'], {'format': '"""svg"""'}), "(figure_path, format='svg')\n", (4554, 4581), True, 'import matplotlib.pyplot as plt\n'), ((4609, 4656), 'os.path.join', 'os.path.join', (['self.path_visualization', 'filename'], {}), '(self.path_visualization, filename)\n', (4621, 4656), False, 'import os\n'), ((4669, 4708), 'matplotlib.pyplot.savefig', 'plt.savefig', (['vil_fig_path'], {'format': '"""svg"""'}), "(vil_fig_path, format='svg')\n", (4680, 4708), True, 'import matplotlib.pyplot as plt\n'), ((5337, 5375), 'sklearn.mixture.GaussianMixture', 'GaussianMixture', ([], {'n_components': 'n_cgauss'}), '(n_components=n_cgauss)\n', (5352, 5375), False, 'from sklearn.mixture import GaussianMixture\n'), ((5684, 5704), 'matplotlib.pyplot.contour', 'plt.contour', (['X', 'Y', 'Z'], {}), '(X, Y, Z)\n', (5695, 5704), True, 'import matplotlib.pyplot as plt\n'), ((5715, 5750), 'matplotlib.pyplot.scatter', 'plt.scatter', (['dist[:, 0]', 'dist[:, 1]'], {}), '(dist[:, 0], dist[:, 1])\n', (5726, 5750), True, 'import matplotlib.pyplot as plt\n'), ((5861, 5899), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figure_path'], {'format': '"""svg"""'}), "(figure_path, format='svg')\n", (5872, 5899), True, 'import matplotlib.pyplot as plt\n'), ((1181, 1209), 'os.makedirs', 'os.makedirs', (['self.path_plots'], {}), '(self.path_plots)\n', (1192, 1209), False, 'import os\n'), ((1297, 1333), 'os.makedirs', 'os.makedirs', (['self.path_visualization'], {}), '(self.path_visualization)\n', (1308, 1333), False, 'import os\n'), ((2752, 2768), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['xy'], {}), '(xy)\n', (2764, 2768), False, 'from scipy.stats import gaussian_kde\n'), ((3996, 4012), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['xy'], {}), '(xy)\n', (4008, 4012), False, 'from scipy.stats import gaussian_kde\n'), ((5446, 5486), 'numpy.linspace', 'np.linspace', ([], {'start': '(-1)', 'stop': '(100)', 'num': '(100)'}), '(start=-1, stop=100, num=100)\n', (5457, 5486), True, 'import numpy as np\n'), ((5494, 5534), 'numpy.linspace', 'np.linspace', ([], {'start': '(-1)', 'stop': '(100)', 'num': '(100)'}), '(start=-1, stop=100, num=100)\n', (5505, 5534), True, 'import numpy as np\n'), ((4407, 4419), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4417, 4419), False, 'import uuid\n')] |
import numpy as np
import ROOT
DIR_BOTH = 0
DIR_UP = 1
DIR_DOWN = -1
NUM_SECTORS = 68
NUM_SECTORS_Y = 14
# systematics has:
# a dict with with coordinate names "X", "Y" as keys
# - each value of these keys is a list/an array of systematic errors for each sector
# - so the list has the length of the number of sectors for that coordinate
# - these errors are quadratically added
# direction which can be DIR_DOWN, DIR_BOTH, or DIR_UP, depending on whether it adds only down, symmetric or up
# isRelative which is a flag telling whether the error is relative to the APE value or absolute
class SystematicErrors:
def __init__(self):
self.X = np.zeros(NUM_SECTORS)
self.Y = np.zeros(NUM_SECTORS)
# is not made seperately for X and Y. If this is wanted, make two separate objects
self.isRelative = np.zeros(NUM_SECTORS, dtype=int)
self.direction = np.empty(NUM_SECTORS, dtype=int)
self.direction.fill(DIR_BOTH) # just so it is clear that
def __getitem__(self, key):
return getattr(self, key)
def getXFromList(self, X, startat=0):
for i,x in enumerate(X):
self.X[i+startat] = x
def getYFromList(self, Y, startat=0):
for i,y in enumerate(Y):
self.Y[i+startat] = y
# each line has the structure: xerr yerr isrel direction
def write(self, fileName):
with open(fileName, "w") as fi:
for x, y, rel, direc in zip(self.X, self.X, self.isRelative, self.direction):
fi.write("{} {} {} {}".format(x, y, rel, direc))
def read(self, fileName):
with open(fileName, "r") as fi:
sector = 0
for line in fi:
x, y, rel, direc = line.rstrip().split(" ")
self.X[sector] = float(x)
self.Y[sector] = float(y)
self.isRelative[sector] = int(rel)
self.direction[sector] = int(direc)
sector += 1
return self
# difference between ape values in each sector
# returns a SystematicErrors object with values
def apeDifference(minuend, subtrahend):
fileA = ROOT.TFile(minuend, "READ")
fileB = ROOT.TFile(subtrahend, "READ")
apeTreeA_X = fileA.Get("iterTreeX")
apeTreeA_X.SetDirectory(0)
apeTreeB_X = fileB.Get("iterTreeX")
apeTreeB_X.SetDirectory(0)
apeTreeA_Y = fileA.Get("iterTreeY")
apeTreeA_Y.SetDirectory(0)
apeTreeB_Y = fileB.Get("iterTreeY")
apeTreeB_Y.SetDirectory(0)
fileA.Close()
fileB.Close()
# get to last iteration of each tree
apeTreeA_X.GetEntry(apeTreeA_X.GetEntries()-1)
apeTreeB_X.GetEntry(apeTreeB_X.GetEntries()-1)
apeTreeA_Y.GetEntry(apeTreeA_Y.GetEntries()-1)
apeTreeB_Y.GetEntry(apeTreeB_Y.GetEntries()-1)
difference = SystematicErrors()
isRel = 0
direc = 0
for sector in range(1, NUM_SECTORS+1):
name = "Ape_Sector_{}".format(sector)
diffX = abs(getattr(apeTreeA_X, name) - getattr(apeTreeB_X, name))
difference.X[sector-1] = diffX
if sector <= NUM_SECTORS_Y:
diffY = abs(getattr(apeTreeA_Y, name) - getattr(apeTreeB_Y, name))
difference.Y[sector-1] = diffY
difference.isRel[sector-1] = isRel
difference.direction[sector-1] = direc
return difference
# inFile is allData.root, not allData_iterationApe.root
# returns two arrays with values in x and y
def numberOfHits(inFileName):
inFile = ROOT.TFile(inFileName, "READ")
num_x = np.zeros(NUM_SECTORS, dtype=int)
num_y = np.zeros(NUM_SECTORS, dtype=int)
for sector in range(1, NUM_SECTORS+1):
xhist = inFile.Get("ApeEstimator1/Sector_{}/Results/h_ResX".format(sector))
num_x[sector-1] = xhist.GetEntries()
if sector <= NUM_SECTORS_Y:
yhist = inFile.Get("ApeEstimator1/Sector_{}/Results/h_ResY".format(sector))
num_y[sector-1] = yhist.GetEntries()
inFile.Close()
return num_x, num_y
def main():
pass
if __name__ == "__main__":
main()
| [
"ROOT.TFile",
"numpy.zeros",
"numpy.empty"
] | [((2249, 2276), 'ROOT.TFile', 'ROOT.TFile', (['minuend', '"""READ"""'], {}), "(minuend, 'READ')\n", (2259, 2276), False, 'import ROOT\n'), ((2289, 2319), 'ROOT.TFile', 'ROOT.TFile', (['subtrahend', '"""READ"""'], {}), "(subtrahend, 'READ')\n", (2299, 2319), False, 'import ROOT\n'), ((3601, 3631), 'ROOT.TFile', 'ROOT.TFile', (['inFileName', '"""READ"""'], {}), "(inFileName, 'READ')\n", (3611, 3631), False, 'import ROOT\n'), ((3644, 3676), 'numpy.zeros', 'np.zeros', (['NUM_SECTORS'], {'dtype': 'int'}), '(NUM_SECTORS, dtype=int)\n', (3652, 3676), True, 'import numpy as np\n'), ((3689, 3721), 'numpy.zeros', 'np.zeros', (['NUM_SECTORS'], {'dtype': 'int'}), '(NUM_SECTORS, dtype=int)\n', (3697, 3721), True, 'import numpy as np\n'), ((731, 752), 'numpy.zeros', 'np.zeros', (['NUM_SECTORS'], {}), '(NUM_SECTORS)\n', (739, 752), True, 'import numpy as np\n'), ((770, 791), 'numpy.zeros', 'np.zeros', (['NUM_SECTORS'], {}), '(NUM_SECTORS)\n', (778, 791), True, 'import numpy as np\n'), ((909, 941), 'numpy.zeros', 'np.zeros', (['NUM_SECTORS'], {'dtype': 'int'}), '(NUM_SECTORS, dtype=int)\n', (917, 941), True, 'import numpy as np\n'), ((969, 1001), 'numpy.empty', 'np.empty', (['NUM_SECTORS'], {'dtype': 'int'}), '(NUM_SECTORS, dtype=int)\n', (977, 1001), True, 'import numpy as np\n')] |
import warnings
# import pathlib
from torch.utils import data
from mido.midifiles.meta import KeySignatureError
import pretty_midi as pm
import numpy as np
from utils import init_fn
# PATHLIST = list(pathlib.Path('Datasets').glob('**/*.[Mm][Ii][Dd]'))
with open('pathlist.txt', 'r') as f:
PATHLIST = f.readlines()
PATHLIST = [x.strip() for x in PATHLIST]
np.random.shuffle(PATHLIST)
TRAIN_LIST = PATHLIST[:-1024]
TEST_LIST = PATHLIST[-1024:]
def midi_roll(path, input_length, output_length):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
song = pm.PrettyMIDI(str(path).replace('\\', '/'))
event_list = []
global_condition = np.zeros((256), dtype=np.float32)
for inst in song.instruments:
if inst.is_drum:
for note in inst.notes:
if note.velocity:
event_list.append((
int(note.start * 32768), 6,
note.pitch,
note.pitch,
note.velocity
))
event_list.append((
int(note.end * 32768), 7,
note.pitch,
note.pitch,
0
))
global_condition[128 + note.pitch] = 1
else:
global_condition[inst.program] = 1
for note in inst.notes:
if note.velocity:
event_list.append((
int(note.start * 32768), 3,
inst.program,
note.pitch,
note.velocity
))
event_list.append((
int(note.end * 32768), 4,
inst.program,
note.pitch,
0
))
event_list.sort()
input_list = [1] * (input_length // 4 * 4)
current_time = event_list[0][0]
for event in event_list:
if event[0] > current_time:
time = min(event[0] - current_time, 32767)
input_list += [5, time % 32, time // 32 % 32, time // 1024]
current_time = event[0]
input_list += event[1:]
input_list += [2] * (output_length // 4 * 4)
num = np.random.randint(0, len(input_list) - input_length + 1) // 4 * 4
target = np.array(input_list[num : num + input_length], dtype=np.int64)
local_condition = [1, 2, 3, 4] * (input_length // 4 * 4) + [1, 2, 3, 4][:input_length % 4]
local_condition = np.array(local_condition, dtype=np.float32)
return target, global_condition#, local_condition
def piano_rolls_to_midi(roll):
midi = pm.PrettyMIDI(resolution=960)
instruments = [pm.Instrument(i) for i in range(128)] \
+ [pm.Instrument(0, is_drum=True)]
current_time = 0
start_time = [[[] for _ in range(128)] for _ in range(129)]
roll = [roll[i : i + 4] for i in range(0, len(roll) // 4 * 4, 4)]
for event in roll:
if event[0] == 1:
continue
elif event[0] == 2:
break
elif event[0] == 3 or event[0] == 6:
instrument = 128 if event[0] == 6 else event[1]
start_time[instrument][event[2]].append((current_time, event[3]))
elif event[0] == 4 or event[0] == 7:
instrument = 128 if event[0] == 7 else event[1]
for start, velocity in start_time[instrument][event[2]]:
if current_time > start:
instruments[instrument].notes.append(
pm.Note(
velocity=velocity,
pitch=event[2],
start=start / 32768,
end=current_time / 32768
)
)
start_time[instrument][event[2]] = []
elif event[0] == 5:
current_time = current_time + (event[1] + event[2] * 32 + event[3] * 1024)
for inst in instruments:
if inst.notes:
midi.instruments.append(inst)
return midi
class Dataset(data.Dataset):
def __init__(self, train, input_length, output_length, dataset_length):
super(Dataset, self).__init__()
self.pathlist = np.array(TRAIN_LIST if train else TEST_LIST)
self.input_length = input_length
self.output_length = output_length
self.dataset_length = dataset_length
def __getitem__(self, index):
while True:
try:
return midi_roll(
np.random.choice(self.pathlist),
self.input_length,
self.output_length
)
except (IndexError, IOError, EOFError, ValueError, KeySignatureError):
continue
def __len__(self):
return self.dataset_length if self.dataset_length else len(self.pathlist)
class DataLoader(data.DataLoader):
def __init__(
self,
batch_size,
shuffle=True,
num_workers=16,
train=True,
input_length=0,
output_length=0,
dataset_length=0
):
super(DataLoader, self).__init__(
Dataset(
train,
input_length,
output_length,
dataset_length
),
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
pin_memory=True,
worker_init_fn=init_fn
)
| [
"warnings.simplefilter",
"pretty_midi.Note",
"numpy.zeros",
"pretty_midi.PrettyMIDI",
"numpy.array",
"warnings.catch_warnings",
"numpy.random.choice",
"pretty_midi.Instrument",
"numpy.random.shuffle"
] | [((360, 387), 'numpy.random.shuffle', 'np.random.shuffle', (['PATHLIST'], {}), '(PATHLIST)\n', (377, 387), True, 'import numpy as np\n'), ((676, 707), 'numpy.zeros', 'np.zeros', (['(256)'], {'dtype': 'np.float32'}), '(256, dtype=np.float32)\n', (684, 707), True, 'import numpy as np\n'), ((2408, 2468), 'numpy.array', 'np.array', (['input_list[num:num + input_length]'], {'dtype': 'np.int64'}), '(input_list[num:num + input_length], dtype=np.int64)\n', (2416, 2468), True, 'import numpy as np\n'), ((2588, 2631), 'numpy.array', 'np.array', (['local_condition'], {'dtype': 'np.float32'}), '(local_condition, dtype=np.float32)\n', (2596, 2631), True, 'import numpy as np\n'), ((2729, 2758), 'pretty_midi.PrettyMIDI', 'pm.PrettyMIDI', ([], {'resolution': '(960)'}), '(resolution=960)\n', (2742, 2758), True, 'import pretty_midi as pm\n'), ((507, 532), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (530, 532), False, 'import warnings\n'), ((542, 573), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (563, 573), False, 'import warnings\n'), ((4315, 4359), 'numpy.array', 'np.array', (['(TRAIN_LIST if train else TEST_LIST)'], {}), '(TRAIN_LIST if train else TEST_LIST)\n', (4323, 4359), True, 'import numpy as np\n'), ((2778, 2794), 'pretty_midi.Instrument', 'pm.Instrument', (['i'], {}), '(i)\n', (2791, 2794), True, 'import pretty_midi as pm\n'), ((2837, 2867), 'pretty_midi.Instrument', 'pm.Instrument', (['(0)'], {'is_drum': '(True)'}), '(0, is_drum=True)\n', (2850, 2867), True, 'import pretty_midi as pm\n'), ((4615, 4646), 'numpy.random.choice', 'np.random.choice', (['self.pathlist'], {}), '(self.pathlist)\n', (4631, 4646), True, 'import numpy as np\n'), ((3620, 3714), 'pretty_midi.Note', 'pm.Note', ([], {'velocity': 'velocity', 'pitch': 'event[2]', 'start': '(start / 32768)', 'end': '(current_time / 32768)'}), '(velocity=velocity, pitch=event[2], start=start / 32768, end=\n current_time / 32768)\n', (3627, 3714), True, 'import pretty_midi as pm\n')] |
'''
An example of the lake problem using the ema workbench.
The model itself is adapted from the Rhodium example by <NAME>,
see https://gist.github.com/dhadka/a8d7095c98130d8f73bc
'''
import math
import numpy as np
import pandas as pd
from SALib.analyze import sobol
from scipy.optimize import brentq
from ema_workbench import (Model, RealParameter, ScalarOutcome, Constant,
ema_logging, MultiprocessingEvaluator, Policy)
from ema_workbench.em_framework import get_SALib_problem
from ema_workbench.em_framework.evaluators import SOBOL
def lake_problem(
b=0.42, # decay rate for P in lake (0.42 = irreversible)
q=2.0, # recycling exponent
mean=0.02, # mean of natural inflows
stdev=0.001, # future utility discount rate
delta=0.98, # standard deviation of natural inflows
alpha=0.4, # utility from pollution
nsamples=100, # Monte Carlo sampling of natural inflows
**kwargs):
try:
decisions = [kwargs[str(i)] for i in range(100)]
except KeyError:
decisions = [0, ] * 100
Pcrit = brentq(lambda x: x ** q / (1 + x ** q) - b * x, 0.01, 1.5)
nvars = len(decisions)
X = np.zeros((nvars,))
average_daily_P = np.zeros((nvars,))
decisions = np.array(decisions)
reliability = 0.0
for _ in range(nsamples):
X[0] = 0.0
natural_inflows = np.random.lognormal(
math.log(mean ** 2 / math.sqrt(stdev ** 2 + mean ** 2)),
math.sqrt(math.log(1.0 + stdev ** 2 / mean ** 2)),
size=nvars)
for t in range(1, nvars):
X[t] = (1 - b) * X[t - 1] + X[t - 1] ** q / (1 + X[t - 1] ** q) + \
decisions[t - 1] + natural_inflows[t - 1]
average_daily_P[t] += X[t] / float(nsamples)
reliability += np.sum(X < Pcrit) / float(nsamples * nvars)
max_P = np.max(average_daily_P)
utility = np.sum(alpha * decisions * np.power(delta, np.arange(nvars)))
inertia = np.sum(np.absolute(np.diff(decisions)) < 0.02) / float(nvars - 1)
return max_P, utility, inertia, reliability
def analyze(results, ooi):
'''analyze results using SALib sobol, returns a dataframe
'''
_, outcomes = results
problem = get_SALib_problem(lake_model.uncertainties)
y = outcomes[ooi]
sobol_indices = sobol.analyze(problem, y)
sobol_stats = {key: sobol_indices[key] for key in ['ST', 'ST_conf', 'S1',
'S1_conf']}
sobol_stats = pd.DataFrame(sobol_stats, index=problem['names'])
sobol_stats.sort_values(by='ST', ascending=False)
s2 = pd.DataFrame(sobol_indices['S2'], index=problem['names'],
columns=problem['names'])
s2_conf = pd.DataFrame(sobol_indices['S2_conf'], index=problem['names'],
columns=problem['names'])
return sobol_stats, s2, s2_conf
if __name__ == '__main__':
ema_logging.log_to_stderr(ema_logging.INFO)
# instantiate the model
lake_model = Model('lakeproblem', function=lake_problem)
lake_model.time_horizon = 100
# specify uncertainties
lake_model.uncertainties = [RealParameter('b', 0.1, 0.45),
RealParameter('q', 2.0, 4.5),
RealParameter('mean', 0.01, 0.05),
RealParameter('stdev', 0.001, 0.005),
RealParameter('delta', 0.93, 0.99)]
# set levers, one for each time step
lake_model.levers = [RealParameter(str(i), 0, 0.1) for i in
range(lake_model.time_horizon)]
# specify outcomes
lake_model.outcomes = [ScalarOutcome('max_P', ),
ScalarOutcome('utility'),
ScalarOutcome('inertia'),
ScalarOutcome('reliability')]
# override some of the defaults of the model
lake_model.constants = [Constant('alpha', 0.41),
Constant('nsamples', 150)]
# generate sa single default no release policy
policy = Policy('no release', **{str(i): 0.1 for i in range(100)})
n_scenarios = 1000
with MultiprocessingEvaluator(lake_model) as evaluator:
results = evaluator.perform_experiments(n_scenarios, policy,
uncertainty_sampling=SOBOL)
sobol_stats, s2, s2_conf = analyze(results, 'max_P')
print(sobol_stats)
print(s2)
print(s2_conf)
| [
"pandas.DataFrame",
"ema_workbench.RealParameter",
"ema_workbench.Model",
"scipy.optimize.brentq",
"numpy.sum",
"math.sqrt",
"ema_workbench.Constant",
"ema_workbench.ScalarOutcome",
"numpy.zeros",
"numpy.max",
"SALib.analyze.sobol.analyze",
"numpy.array",
"numpy.arange",
"numpy.diff",
"e... | [((1110, 1168), 'scipy.optimize.brentq', 'brentq', (['(lambda x: x ** q / (1 + x ** q) - b * x)', '(0.01)', '(1.5)'], {}), '(lambda x: x ** q / (1 + x ** q) - b * x, 0.01, 1.5)\n', (1116, 1168), False, 'from scipy.optimize import brentq\n'), ((1204, 1222), 'numpy.zeros', 'np.zeros', (['(nvars,)'], {}), '((nvars,))\n', (1212, 1222), True, 'import numpy as np\n'), ((1245, 1263), 'numpy.zeros', 'np.zeros', (['(nvars,)'], {}), '((nvars,))\n', (1253, 1263), True, 'import numpy as np\n'), ((1280, 1299), 'numpy.array', 'np.array', (['decisions'], {}), '(decisions)\n', (1288, 1299), True, 'import numpy as np\n'), ((1890, 1913), 'numpy.max', 'np.max', (['average_daily_P'], {}), '(average_daily_P)\n', (1896, 1913), True, 'import numpy as np\n'), ((2261, 2304), 'ema_workbench.em_framework.get_SALib_problem', 'get_SALib_problem', (['lake_model.uncertainties'], {}), '(lake_model.uncertainties)\n', (2278, 2304), False, 'from ema_workbench.em_framework import get_SALib_problem\n'), ((2347, 2372), 'SALib.analyze.sobol.analyze', 'sobol.analyze', (['problem', 'y'], {}), '(problem, y)\n', (2360, 2372), False, 'from SALib.analyze import sobol\n'), ((2536, 2585), 'pandas.DataFrame', 'pd.DataFrame', (['sobol_stats'], {'index': "problem['names']"}), "(sobol_stats, index=problem['names'])\n", (2548, 2585), True, 'import pandas as pd\n'), ((2649, 2737), 'pandas.DataFrame', 'pd.DataFrame', (["sobol_indices['S2']"], {'index': "problem['names']", 'columns': "problem['names']"}), "(sobol_indices['S2'], index=problem['names'], columns=problem[\n 'names'])\n", (2661, 2737), True, 'import pandas as pd\n'), ((2769, 2862), 'pandas.DataFrame', 'pd.DataFrame', (["sobol_indices['S2_conf']"], {'index': "problem['names']", 'columns': "problem['names']"}), "(sobol_indices['S2_conf'], index=problem['names'], columns=\n problem['names'])\n", (2781, 2862), True, 'import pandas as pd\n'), ((2955, 2998), 'ema_workbench.ema_logging.log_to_stderr', 'ema_logging.log_to_stderr', (['ema_logging.INFO'], {}), '(ema_logging.INFO)\n', (2980, 2998), False, 'from ema_workbench import Model, RealParameter, ScalarOutcome, Constant, ema_logging, MultiprocessingEvaluator, Policy\n'), ((3045, 3088), 'ema_workbench.Model', 'Model', (['"""lakeproblem"""'], {'function': 'lake_problem'}), "('lakeproblem', function=lake_problem)\n", (3050, 3088), False, 'from ema_workbench import Model, RealParameter, ScalarOutcome, Constant, ema_logging, MultiprocessingEvaluator, Policy\n'), ((3184, 3213), 'ema_workbench.RealParameter', 'RealParameter', (['"""b"""', '(0.1)', '(0.45)'], {}), "('b', 0.1, 0.45)\n", (3197, 3213), False, 'from ema_workbench import Model, RealParameter, ScalarOutcome, Constant, ema_logging, MultiprocessingEvaluator, Policy\n'), ((3247, 3275), 'ema_workbench.RealParameter', 'RealParameter', (['"""q"""', '(2.0)', '(4.5)'], {}), "('q', 2.0, 4.5)\n", (3260, 3275), False, 'from ema_workbench import Model, RealParameter, ScalarOutcome, Constant, ema_logging, MultiprocessingEvaluator, Policy\n'), ((3309, 3342), 'ema_workbench.RealParameter', 'RealParameter', (['"""mean"""', '(0.01)', '(0.05)'], {}), "('mean', 0.01, 0.05)\n", (3322, 3342), False, 'from ema_workbench import Model, RealParameter, ScalarOutcome, Constant, ema_logging, MultiprocessingEvaluator, Policy\n'), ((3376, 3412), 'ema_workbench.RealParameter', 'RealParameter', (['"""stdev"""', '(0.001)', '(0.005)'], {}), "('stdev', 0.001, 0.005)\n", (3389, 3412), False, 'from ema_workbench import Model, RealParameter, ScalarOutcome, Constant, ema_logging, MultiprocessingEvaluator, Policy\n'), ((3446, 3480), 'ema_workbench.RealParameter', 'RealParameter', (['"""delta"""', '(0.93)', '(0.99)'], {}), "('delta', 0.93, 0.99)\n", (3459, 3480), False, 'from ema_workbench import Model, RealParameter, ScalarOutcome, Constant, ema_logging, MultiprocessingEvaluator, Policy\n'), ((3696, 3718), 'ema_workbench.ScalarOutcome', 'ScalarOutcome', (['"""max_P"""'], {}), "('max_P')\n", (3709, 3718), False, 'from ema_workbench import Model, RealParameter, ScalarOutcome, Constant, ema_logging, MultiprocessingEvaluator, Policy\n'), ((3749, 3773), 'ema_workbench.ScalarOutcome', 'ScalarOutcome', (['"""utility"""'], {}), "('utility')\n", (3762, 3773), False, 'from ema_workbench import Model, RealParameter, ScalarOutcome, Constant, ema_logging, MultiprocessingEvaluator, Policy\n'), ((3802, 3826), 'ema_workbench.ScalarOutcome', 'ScalarOutcome', (['"""inertia"""'], {}), "('inertia')\n", (3815, 3826), False, 'from ema_workbench import Model, RealParameter, ScalarOutcome, Constant, ema_logging, MultiprocessingEvaluator, Policy\n'), ((3855, 3883), 'ema_workbench.ScalarOutcome', 'ScalarOutcome', (['"""reliability"""'], {}), "('reliability')\n", (3868, 3883), False, 'from ema_workbench import Model, RealParameter, ScalarOutcome, Constant, ema_logging, MultiprocessingEvaluator, Policy\n'), ((3963, 3986), 'ema_workbench.Constant', 'Constant', (['"""alpha"""', '(0.41)'], {}), "('alpha', 0.41)\n", (3971, 3986), False, 'from ema_workbench import Model, RealParameter, ScalarOutcome, Constant, ema_logging, MultiprocessingEvaluator, Policy\n'), ((4016, 4041), 'ema_workbench.Constant', 'Constant', (['"""nsamples"""', '(150)'], {}), "('nsamples', 150)\n", (4024, 4041), False, 'from ema_workbench import Model, RealParameter, ScalarOutcome, Constant, ema_logging, MultiprocessingEvaluator, Policy\n'), ((4200, 4236), 'ema_workbench.MultiprocessingEvaluator', 'MultiprocessingEvaluator', (['lake_model'], {}), '(lake_model)\n', (4224, 4236), False, 'from ema_workbench import Model, RealParameter, ScalarOutcome, Constant, ema_logging, MultiprocessingEvaluator, Policy\n'), ((1833, 1850), 'numpy.sum', 'np.sum', (['(X < Pcrit)'], {}), '(X < Pcrit)\n', (1839, 1850), True, 'import numpy as np\n'), ((1511, 1549), 'math.log', 'math.log', (['(1.0 + stdev ** 2 / mean ** 2)'], {}), '(1.0 + stdev ** 2 / mean ** 2)\n', (1519, 1549), False, 'import math\n'), ((1971, 1987), 'numpy.arange', 'np.arange', (['nvars'], {}), '(nvars)\n', (1980, 1987), True, 'import numpy as np\n'), ((1453, 1486), 'math.sqrt', 'math.sqrt', (['(stdev ** 2 + mean ** 2)'], {}), '(stdev ** 2 + mean ** 2)\n', (1462, 1486), False, 'import math\n'), ((2023, 2041), 'numpy.diff', 'np.diff', (['decisions'], {}), '(decisions)\n', (2030, 2041), True, 'import numpy as np\n')] |
from tomviz import utils
import numpy as np
from numpy.fft import fftn, fftshift, ifftn, ifftshift
import tomviz.operators
class ArtifactsTVOperator(tomviz.operators.CancelableOperator):
def transform_scalars(self, dataset, Niter=100, a=0.1,
wedgeSize=5, kmin=5, theta=0):
"""
Remove Structured Artifacts with Total Variation Minimization"""
#Import information from dataset
array = utils.get_array(dataset)
(nx, ny, nz) = array.shape
# Convert angle from Degrees to Radians.
theta = (theta+90)*(np.pi/180)
dtheta = wedgeSize*(np.pi/180)
#Create coordinate grid in polar
x = np.arange(-nx/2, nx/2-1, dtype=np.float64)
y = np.arange(-ny/2, ny/2-1, dtype=np.float64)
[x, y] = np.meshgrid(x, y, indexing='ij')
rr = (np.square(x) + np.square(y))
phi = np.arctan2(x, y)
#Create the Angular Mask
mask = np.ones((nx, ny), dtype=np.int8)
mask[np.where((phi >= (theta-dtheta/2)) &
(phi <= (theta+dtheta/2)))] = 0
mask[np.where((phi >= (np.pi+theta-dtheta/2)) &
(phi <= (np.pi+theta+dtheta/2)))] = 0
mask[np.where((phi >= (-np.pi+theta-dtheta/2)) &
(phi <= (-np.pi+theta+dtheta/2)))] = 0
mask[np.where(rr < np.square(kmin))] = 1 # Keep values below rmin.
mask = np.array(mask, dtype=bool)
# Initialize Progress bar.
self.progress.maximum = nz * Niter
#Main Loop
for i in range(nz):
#FFT of the Original Image.
FFT_image = fftshift(fftn(array[:, :, i]))
# Reconstruction starts as random image.
recon_init = np.random.rand(nx, ny)
self.progress.message = 'Processing Image No.%d/%d ' % (i+1, nz)
#TV Artifact Removal Loop
for j in range(Niter):
# FFT of Reconstructed Image.
FFT_recon = fftshift(fftn(recon_init))
# Impose the Data Constraint
FFT_recon[mask] = FFT_image[mask]
#Inverse FFT
recon_constraint = np.real(ifftn(ifftshift(FFT_recon)))
# Positivity Constraint
recon_constraint[recon_constraint < 0] = 0
# TV Minimization Loop
recon_minTV = recon_constraint
d = np.linalg.norm(recon_minTV - recon_init)
for k in range(20):
vst = TVDerivative(recon_minTV, nx, ny)
recon_minTV = recon_minTV - a*d*vst
if self.canceled:
return
# Initializte the Next Loop.
recon_init = recon_minTV
# Update the Progress Bar.
self.progress.value = i*Niter + j
# Return reconstruction into stack.
array[:, :, i] = recon_constraint
#Set the result as the new scalars.
utils.set_array(dataset, np.asfortranarray(array))
def TVDerivative(img, nx, ny):
fxy = np.pad(img, (1, 1), 'constant', constant_values=0)
fxnegy = np.roll(fxy, -1, axis=0) #up
fxposy = np.roll(fxy, 1, axis=0) #down
fnegxy = np.roll(fxy, -1, axis=1) #left
fposxy = np.roll(fxy, 1, axis=1) #right
fposxnegy = np.roll(np.roll(fxy, 1, axis=1), -1, axis=0) #right and up
fnegxposy = np.roll(np.roll(fxy, -1, axis=1), 1, axis=0) #left and down
vst1 = (2*(fxy - fnegxy) +
2*(fxy - fxnegy))/(np.sqrt(1e-8 +
(fxy - fnegxy)**2 + (fxy - fxnegy)**2))
vst2 = (2*(fposxy -
fxy))/np.sqrt(1e-8 +
(fposxy - fxy)**2 + (fposxy - fposxnegy)**2)
vst3 = (2*(fxposy -
fxy))/np.sqrt(1e-8 +
(fxposy - fxy)**2 + (fxposy - fnegxposy)**2)
vst = vst1 - vst2 - vst3
vst = vst[1:-1, 1:-1]
vst = vst/np.linalg.norm(vst)
return vst
| [
"numpy.pad",
"numpy.fft.ifftshift",
"numpy.meshgrid",
"numpy.arctan2",
"numpy.roll",
"numpy.fft.fftn",
"numpy.square",
"tomviz.utils.get_array",
"numpy.ones",
"numpy.asfortranarray",
"numpy.where",
"numpy.arange",
"numpy.array",
"numpy.linalg.norm",
"numpy.random.rand",
"numpy.sqrt"
] | [((3120, 3170), 'numpy.pad', 'np.pad', (['img', '(1, 1)', '"""constant"""'], {'constant_values': '(0)'}), "(img, (1, 1), 'constant', constant_values=0)\n", (3126, 3170), True, 'import numpy as np\n'), ((3184, 3208), 'numpy.roll', 'np.roll', (['fxy', '(-1)'], {'axis': '(0)'}), '(fxy, -1, axis=0)\n', (3191, 3208), True, 'import numpy as np\n'), ((3226, 3249), 'numpy.roll', 'np.roll', (['fxy', '(1)'], {'axis': '(0)'}), '(fxy, 1, axis=0)\n', (3233, 3249), True, 'import numpy as np\n'), ((3270, 3294), 'numpy.roll', 'np.roll', (['fxy', '(-1)'], {'axis': '(1)'}), '(fxy, -1, axis=1)\n', (3277, 3294), True, 'import numpy as np\n'), ((3314, 3337), 'numpy.roll', 'np.roll', (['fxy', '(1)'], {'axis': '(1)'}), '(fxy, 1, axis=1)\n', (3321, 3337), True, 'import numpy as np\n'), ((449, 473), 'tomviz.utils.get_array', 'utils.get_array', (['dataset'], {}), '(dataset)\n', (464, 473), False, 'from tomviz import utils\n'), ((691, 739), 'numpy.arange', 'np.arange', (['(-nx / 2)', '(nx / 2 - 1)'], {'dtype': 'np.float64'}), '(-nx / 2, nx / 2 - 1, dtype=np.float64)\n', (700, 739), True, 'import numpy as np\n'), ((746, 794), 'numpy.arange', 'np.arange', (['(-ny / 2)', '(ny / 2 - 1)'], {'dtype': 'np.float64'}), '(-ny / 2, ny / 2 - 1, dtype=np.float64)\n', (755, 794), True, 'import numpy as np\n'), ((806, 838), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {'indexing': '"""ij"""'}), "(x, y, indexing='ij')\n", (817, 838), True, 'import numpy as np\n'), ((896, 912), 'numpy.arctan2', 'np.arctan2', (['x', 'y'], {}), '(x, y)\n', (906, 912), True, 'import numpy as np\n'), ((962, 994), 'numpy.ones', 'np.ones', (['(nx, ny)'], {'dtype': 'np.int8'}), '((nx, ny), dtype=np.int8)\n', (969, 994), True, 'import numpy as np\n'), ((1423, 1449), 'numpy.array', 'np.array', (['mask'], {'dtype': 'bool'}), '(mask, dtype=bool)\n', (1431, 1449), True, 'import numpy as np\n'), ((3370, 3393), 'numpy.roll', 'np.roll', (['fxy', '(1)'], {'axis': '(1)'}), '(fxy, 1, axis=1)\n', (3377, 3393), True, 'import numpy as np\n'), ((3445, 3469), 'numpy.roll', 'np.roll', (['fxy', '(-1)'], {'axis': '(1)'}), '(fxy, -1, axis=1)\n', (3452, 3469), True, 'import numpy as np\n'), ((3559, 3617), 'numpy.sqrt', 'np.sqrt', (['(1e-08 + (fxy - fnegxy) ** 2 + (fxy - fxnegy) ** 2)'], {}), '(1e-08 + (fxy - fnegxy) ** 2 + (fxy - fxnegy) ** 2)\n', (3566, 3617), True, 'import numpy as np\n'), ((3698, 3762), 'numpy.sqrt', 'np.sqrt', (['(1e-08 + (fposxy - fxy) ** 2 + (fposxy - fposxnegy) ** 2)'], {}), '(1e-08 + (fposxy - fxy) ** 2 + (fposxy - fposxnegy) ** 2)\n', (3705, 3762), True, 'import numpy as np\n'), ((3832, 3896), 'numpy.sqrt', 'np.sqrt', (['(1e-08 + (fxposy - fxy) ** 2 + (fxposy - fnegxposy) ** 2)'], {}), '(1e-08 + (fxposy - fxy) ** 2 + (fxposy - fnegxposy) ** 2)\n', (3839, 3896), True, 'import numpy as np\n'), ((3990, 4009), 'numpy.linalg.norm', 'np.linalg.norm', (['vst'], {}), '(vst)\n', (4004, 4009), True, 'import numpy as np\n'), ((853, 865), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (862, 865), True, 'import numpy as np\n'), ((868, 880), 'numpy.square', 'np.square', (['y'], {}), '(y)\n', (877, 880), True, 'import numpy as np\n'), ((1008, 1075), 'numpy.where', 'np.where', (['((phi >= theta - dtheta / 2) & (phi <= theta + dtheta / 2))'], {}), '((phi >= theta - dtheta / 2) & (phi <= theta + dtheta / 2))\n', (1016, 1075), True, 'import numpy as np\n'), ((1112, 1200), 'numpy.where', 'np.where', (['((phi >= np.pi + theta - dtheta / 2) & (phi <= np.pi + theta + dtheta / 2))'], {}), '((phi >= np.pi + theta - dtheta / 2) & (phi <= np.pi + theta + \n dtheta / 2))\n', (1120, 1200), True, 'import numpy as np\n'), ((1228, 1318), 'numpy.where', 'np.where', (['((phi >= -np.pi + theta - dtheta / 2) & (phi <= -np.pi + theta + dtheta / 2))'], {}), '((phi >= -np.pi + theta - dtheta / 2) & (phi <= -np.pi + theta + \n dtheta / 2))\n', (1236, 1318), True, 'import numpy as np\n'), ((1752, 1774), 'numpy.random.rand', 'np.random.rand', (['nx', 'ny'], {}), '(nx, ny)\n', (1766, 1774), True, 'import numpy as np\n'), ((3051, 3075), 'numpy.asfortranarray', 'np.asfortranarray', (['array'], {}), '(array)\n', (3068, 3075), True, 'import numpy as np\n'), ((1651, 1671), 'numpy.fft.fftn', 'fftn', (['array[:, :, i]'], {}), '(array[:, :, i])\n', (1655, 1671), False, 'from numpy.fft import fftn, fftshift, ifftn, ifftshift\n'), ((2434, 2474), 'numpy.linalg.norm', 'np.linalg.norm', (['(recon_minTV - recon_init)'], {}), '(recon_minTV - recon_init)\n', (2448, 2474), True, 'import numpy as np\n'), ((1360, 1375), 'numpy.square', 'np.square', (['kmin'], {}), '(kmin)\n', (1369, 1375), True, 'import numpy as np\n'), ((2011, 2027), 'numpy.fft.fftn', 'fftn', (['recon_init'], {}), '(recon_init)\n', (2015, 2027), False, 'from numpy.fft import fftn, fftshift, ifftn, ifftshift\n'), ((2204, 2224), 'numpy.fft.ifftshift', 'ifftshift', (['FFT_recon'], {}), '(FFT_recon)\n', (2213, 2224), False, 'from numpy.fft import fftn, fftshift, ifftn, ifftshift\n')] |
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
__authors__ = ["<NAME> - ESRF ISDD Advanced Analysis and Modelling"]
__license__ = "MIT"
__date__ = "20/04/2017"
import numpy as np
from comsyl.utils.Logger import log
from comsyl.waveoptics.SRWAdapter import SRWAdapter
VIRTUAL_SOURCE_CENTER = "center"
VIRTUAL_SOURCE_ENTRANCE = "entrance"
class WavefrontBuilder(object):
def __init__(self, undulator, sampling_factor, min_dimension_x, max_dimension_x, min_dimension_y, max_dimension_y, energy, source_position):
self._undulator = undulator
self._sampling_factor = sampling_factor
self._min_dimension_x = min_dimension_x
self._max_dimension_x = max_dimension_x
self._min_dimension_y = min_dimension_y
self._max_dimension_y = max_dimension_y
self._photon_energy = energy
self._source_position = source_position
def _applyLimits(self, value, minimum, maximum):
if minimum > value:
return minimum
elif maximum < value:
return maximum
else:
return value
def _setAdapterInitialZ(self, adapter):
if self._source_position == VIRTUAL_SOURCE_CENTER:
adapter._initial_z = 0.0
raise NotImplementedError("CENTER position might need correction. Not yet implemented.")
elif self._source_position == VIRTUAL_SOURCE_ENTRANCE:
adapter._initial_z = self._undulator.period_length()*3 + self._undulator.length() / 2.0
else:
raise NotImplementedError("Source position %s" % self._source_position)
def build(self, electron_beam, xp, yp, z_offset,x=0.0, y=0.0, initial_z=None):
adapter = SRWAdapter()
adapter.setSamplingFactor(self._sampling_factor)
max_theta_x = self._undulator.gaussian_central_cone_aperture(electron_beam.gamma(),n=1) * 3.0
z = self._undulator.length() + z_offset
min_dimension_x_theta = 1.0 * self._min_dimension_x / z * np.sqrt(2.0)
max_dimension_x_theta = 1.0 * self._max_dimension_x / z * np.sqrt(2.0)
min_dimension_y_theta = 1.0 * self._min_dimension_y / z * np.sqrt(2.0)
max_dimension_y_theta = 1.0 * self._max_dimension_y / z * np.sqrt(2.0)
max_theta_x = self._applyLimits(max_theta_x, min_dimension_x_theta, max_dimension_x_theta)
max_theta_y = self._applyLimits(max_theta_x / 1.5, min_dimension_y_theta, max_dimension_y_theta)
self._setAdapterInitialZ(adapter)
log("Using initial z_0 for initial conditions: %e" % adapter._initial_z)
calc_wavefront = adapter.wavefrontRectangularForSingleEnergy(electron_beam,
self._undulator, z,
max_theta_x,
max_theta_y,
self._photon_energy,
x=x,
xp=xp,
y=y,
yp=yp)
return calc_wavefront
def buildOnGrid(self, reference_wavefront, electron_beam, z_offset, xp, yp, x=0.0, y=0.0, ):
adapter = SRWAdapter()
adapter.setSamplingFactor(self._sampling_factor)
z = self._undulator.length() + z_offset
grid_length_x = reference_wavefront.absolute_x_coordinates().max()
grid_length_y = reference_wavefront.absolute_y_coordinates().max()
energy = self._photon_energy
self._setAdapterInitialZ(adapter)
log("Using initial z_0 for initial conditions: %e" % adapter._initial_z)
calc_wavefront = adapter.wavefrontByCoordinates(electron_beam=electron_beam,
undulator=self._undulator,
z_start=z,
grid_length_x=grid_length_x,
grid_length_y=grid_length_y ,
energy_number=1, energy_start=energy, energy_end=energy,
x=x,
xp=xp,
y=y,
yp=yp)
return calc_wavefront
def createReferenceWavefrontAtVirtualSource(self, Rx, dRx, Ry, dRy, configuration, source_position, wavefront):
adapter = SRWAdapter()
if source_position == VIRTUAL_SOURCE_CENTER:
z = -1.0 * self._undulator.length()
elif source_position == VIRTUAL_SOURCE_ENTRANCE:
z = -1.5 * self._undulator.length() #- 2 * self._undulator.periodLength()
else:
raise NotImplementedError("Source position %s" % source_position)
log("Using source position: %s with z=%.02f" % (source_position, z))
wavefront = adapter.propagate(wavefront, Rx, dRx, Ry, dRy, z)
x_min = -configuration.sourceWavefrontMaximalSizeHorizontal()
x_max = -x_min
y_min = -configuration.sourceWavefrontMaximalSizeVertical()
y_max = -y_min
if x_min > wavefront.minimal_x_coodinate() or x_max < wavefront.maximal_x_coodinate() or \
y_min > wavefront.minimal_y_coodinate() or y_max < wavefront.maximal_y_coodinate():
dim_x = int((x_max-x_min)/wavefront.x_stepwidth())
dim_y = int((y_max-y_min)/wavefront.y_stepwidth())
divisor_x = configuration.samplingFactorDivisorHorizontal()
if divisor_x == "":
divisor_x = 1.0
divisor_y = configuration.samplingFactorDivisorVertical()
if divisor_y == "":
divisor_y = 1.0
wavefront = wavefront.onDomain(x_min, x_max, int(dim_x/divisor_x),
y_min, y_max, int(dim_y/divisor_y))
#wavefront = wavefront.zeroPadded(zero_padding_factor_x=1.0, zero_padding_factor_y=1.0)
return wavefront
| [
"comsyl.waveoptics.SRWAdapter.SRWAdapter",
"comsyl.utils.Logger.log",
"numpy.sqrt"
] | [((2941, 2953), 'comsyl.waveoptics.SRWAdapter.SRWAdapter', 'SRWAdapter', ([], {}), '()\n', (2951, 2953), False, 'from comsyl.waveoptics.SRWAdapter import SRWAdapter\n'), ((3737, 3809), 'comsyl.utils.Logger.log', 'log', (["('Using initial z_0 for initial conditions: %e' % adapter._initial_z)"], {}), "('Using initial z_0 for initial conditions: %e' % adapter._initial_z)\n", (3740, 3809), False, 'from comsyl.utils.Logger import log\n'), ((4684, 4696), 'comsyl.waveoptics.SRWAdapter.SRWAdapter', 'SRWAdapter', ([], {}), '()\n', (4694, 4696), False, 'from comsyl.waveoptics.SRWAdapter import SRWAdapter\n'), ((5043, 5115), 'comsyl.utils.Logger.log', 'log', (["('Using initial z_0 for initial conditions: %e' % adapter._initial_z)"], {}), "('Using initial z_0 for initial conditions: %e' % adapter._initial_z)\n", (5046, 5115), False, 'from comsyl.utils.Logger import log\n'), ((6049, 6061), 'comsyl.waveoptics.SRWAdapter.SRWAdapter', 'SRWAdapter', ([], {}), '()\n', (6059, 6061), False, 'from comsyl.waveoptics.SRWAdapter import SRWAdapter\n'), ((6408, 6476), 'comsyl.utils.Logger.log', 'log', (["('Using source position: %s with z=%.02f' % (source_position, z))"], {}), "('Using source position: %s with z=%.02f' % (source_position, z))\n", (6411, 6476), False, 'from comsyl.utils.Logger import log\n'), ((3229, 3241), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (3236, 3241), True, 'import numpy as np\n'), ((3308, 3320), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (3315, 3320), True, 'import numpy as np\n'), ((3388, 3400), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (3395, 3400), True, 'import numpy as np\n'), ((3467, 3479), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (3474, 3479), True, 'import numpy as np\n')] |
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
import os
import numpy as np
import torch
import sys
def data_loader(fn):
raw=np.load(fn,allow_pickle=True)
return raw
def data_combiner():
combined_data=[]
for f in os.listdir("./"):
if 'packed_data' in f:
raw=data_loader(f)
for i in raw:
combined_data.append(i)
return np.array(combined_data)
#seperate computation mode? ...
class Net(nn.Module):
def __init__(self,input_dim,output_dim,hidden_dim,layer_num,use_relu=True):
super(Net, self).__init__()
self.fc11 = nn.Linear(input_dim,hidden_dim)
self.fc12 = nn.Linear(input_dim,hidden_dim)
self.fc13 = nn.Linear(input_dim,hidden_dim)
middle=[]
for _ in range(0,layer_num):
middle.append(nn.Linear(hidden_dim, hidden_dim))
if use_relu:
middle.append(nn.ReLU())
#self.fc2=nn.Sequential(*middle)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, output_dim,bias=True)
self.relu=nn.ReLU()
self.input_dim=input_dim
self.output_dim=output_dim
self.hidden_dim=hidden_dim
self.layer_num=layer_num
self.use_relu=use_relu
def forward(self, x):
x2=x*x;
x3=x*x*x;
x= self.fc2(self.fc11(x))+self.fc12(x2)+self.fc13(x3)
if self.use_relu:
x=self.relu(x)
#x =self.fc2(x)
#for _ in range(0,self.layer_num):
# x=self.fc2(x)
# if self.use_relu:
# x=self.relu(x)
x = self.fc3(x)
return x
# def __init__(self,input_dim,output_dim,hidden_dim,layer_num,use_relu=True):
# super(Net, self).__init__()
# self.fc1 = nn.Linear(input_dim,hidden_dim)
# self.fc2 = nn.Linear(hidden_dim, hidden_dim)
# self.fc3 = nn.Linear(hidden_dim, output_dim)
# self.relu=nn.ReLU()
#
# self.input_dim=input_dim
# self.output_dim=output_dim
# self.hidden_dim=hidden_dim
# self.layer_num=layer_num
# self.use_relu=use_relu
#
# def forward(self, x):
# x =self.fc1(x)
# if self.use_relu:
# x=self.relu(x)
# for _ in range(0,self.layer_num):
# x=self.fc2(x)
# if self.use_relu:
# x=self.relu(x)
# x = self.fc3(x)
# return x
eval_mode=False
ckpt_path='tmp_weight.pth'
eval_data_path='layer3/layer3_results_pack_6_50.npy'
#needs to be changed
layer_structure=[256,384,8,3,1]
#layer_structure=[48,256,16,5,1]
writer=SummaryWriter("logging2.log")
#raw_data=data_loader('combined_data.npy')
raw_data=data_combiner()
#print(len(raw_data))
#exit()
np.random.shuffle(raw_data)
train_data=raw_data[0:800,:]
train_x=np.array([np.array(i) for i in train_data[:,0]],dtype='float32')
max_train_x=np.amax(train_x[:,5:],axis=0)
min_train_x=np.amin(train_x[:,5:],axis=0)
#train_x[:,5:]=(train_x[:,5:]-min_train_x)/(max_train_x-min_train_x)
#train_x[:,0:5]=train_x[:,0:5]/512
train_x[:,5]=train_x[:,5]-1
train_y=np.array(train_data[:,1],dtype='float32').reshape(train_data.shape[0],1)
max_train_y=np.amax(train_y,axis=0)
min_train_y=np.amin(train_y,axis=0)
#train_y=(train_y-min_train_y)/(max_train_y-min_train_y)
test_data=raw_data[800:,:]
#print(len(train_data[:,0][0]))
#exit()
train_set=TensorDataset(torch.tensor(train_x),torch.tensor(train_y))
train_loader=DataLoader(train_set,batch_size=400,num_workers=29)
test_x=np.array([np.array(i) for i in test_data[:,0]],dtype='float32')
#test_x[:,5:]=(test_x[:,5:]-min_train_x)/(max_train_x-min_train_x)
#test_x[:,0:5]=test_x[:,0:5]/512
test_x[:,5]=test_x[:,5]-1
test_y=np.array(test_data[:,1],dtype='float32').reshape(test_data.shape[0],1)
#test_y=(test_y-min_train_y)/(max_train_y-min_train_y)
#print(test_y)
#test_x=torch.tensor(test_x)
#test_y=torch.tensor(test_y)
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree = 6)
X_Poly = poly_reg.fit_transform(train_x)
lin_reg_2 = LinearRegression()
lin_reg_2.fit(X_Poly, train_y)
pred_y=lin_reg_2.predict(poly_reg.fit_transform(test_x))
print(np.mean(np.abs(pred_y-test_y)/test_y))
exit()
net=Net(17,1,256,2)
#criterion=nn.KLDivLoss()
criterion=nn.MSELoss()
#optimizer = torch.optim.SGD(net.parameters(), lr=0.0001, momentum=0.9)
optimizer = torch.optim.Adam(net.parameters(), lr=3e-4)
#optimizer = torch.optim.RMSprop(net.parameters(), lr=3e-3, momentum=0.9)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer,500,0.95)
train_loss_logged=0
if eval_mode:
eval_data=[]
raw_eval=np.load(eval_data_path,allow_pickle=True)
for dp in raw_eval:
eval_data.append([layer_structure+dp[0][0]+[dp[0][1]],dp[1][0]])
eval_data=np.array(eval_data)
eval_inputs=torch.tensor(np.array([np.array(i) for i in eval_data[:,0]],dtype='float32'))
eval_targets=torch.tensor(np.array(eval_data[:,1],dtype='float32').reshape(eval_data.shape[0],1))
net.load_state_dict(torch.load(ckpt_path))
net.eval()
eval_outputs=net(eval_inputs)
print(eval_outputs.data.numpy())
print(eval_targets)
print(torch.mean(torch.true_divide(torch.abs(eval_outputs-eval_targets),eval_targets)).item())
exit()
for epoch in range(20000): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 1 == 0:
print('[%d, %5d] loss: %.3f ' %
(epoch + 1, i + 1, running_loss / 5))
train_loss_logged=running_loss
running_loss = 0.0
print("="*15)
#eval
tested_outputs=net(test_x)
print(torch.mean(torch.true_divide(torch.abs(tested_outputs-test_y),test_y)).item())
test_loss=criterion(tested_outputs, test_y)
print('test loss: %.3f' % (test_loss.item()))
scheduler.step()
writer.add_scalar('Loss/train', train_loss_logged, epoch)
writer.add_scalar('Loss/test', test_loss.item(), epoch)
writer.add_scalar('Accuracy/test', torch.mean(torch.true_divide(torch.abs(tested_outputs-test_y),test_y)).item(), epoch)
if epoch % 20==19:
torch.save(net.state_dict(), 'tmp_weight.pth')
print('Finished Training')
| [
"torch.nn.MSELoss",
"numpy.load",
"torch.optim.lr_scheduler.StepLR",
"numpy.amin",
"torch.utils.data.DataLoader",
"torch.nn.ReLU",
"numpy.abs",
"torch.load",
"numpy.amax",
"sklearn.preprocessing.PolynomialFeatures",
"sklearn.linear_model.LinearRegression",
"numpy.array",
"torch.utils.tensorb... | [((2741, 2770), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['"""logging2.log"""'], {}), "('logging2.log')\n", (2754, 2770), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((2871, 2898), 'numpy.random.shuffle', 'np.random.shuffle', (['raw_data'], {}), '(raw_data)\n', (2888, 2898), True, 'import numpy as np\n'), ((3013, 3044), 'numpy.amax', 'np.amax', (['train_x[:, 5:]'], {'axis': '(0)'}), '(train_x[:, 5:], axis=0)\n', (3020, 3044), True, 'import numpy as np\n'), ((3055, 3086), 'numpy.amin', 'np.amin', (['train_x[:, 5:]'], {'axis': '(0)'}), '(train_x[:, 5:], axis=0)\n', (3062, 3086), True, 'import numpy as np\n'), ((3310, 3334), 'numpy.amax', 'np.amax', (['train_y'], {'axis': '(0)'}), '(train_y, axis=0)\n', (3317, 3334), True, 'import numpy as np\n'), ((3346, 3370), 'numpy.amin', 'np.amin', (['train_y'], {'axis': '(0)'}), '(train_y, axis=0)\n', (3353, 3370), True, 'import numpy as np\n'), ((3576, 3629), 'torch.utils.data.DataLoader', 'DataLoader', (['train_set'], {'batch_size': '(400)', 'num_workers': '(29)'}), '(train_set, batch_size=400, num_workers=29)\n', (3586, 3629), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((4150, 4178), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {'degree': '(6)'}), '(degree=6)\n', (4168, 4178), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((4235, 4253), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (4251, 4253), False, 'from sklearn.linear_model import LinearRegression\n'), ((4453, 4465), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (4463, 4465), True, 'import torch.nn as nn\n'), ((4680, 4733), 'torch.optim.lr_scheduler.StepLR', 'torch.optim.lr_scheduler.StepLR', (['optimizer', '(500)', '(0.95)'], {}), '(optimizer, 500, 0.95)\n', (4711, 4733), False, 'import torch\n'), ((242, 272), 'numpy.load', 'np.load', (['fn'], {'allow_pickle': '(True)'}), '(fn, allow_pickle=True)\n', (249, 272), True, 'import numpy as np\n'), ((343, 359), 'os.listdir', 'os.listdir', (['"""./"""'], {}), "('./')\n", (353, 359), False, 'import os\n'), ((501, 524), 'numpy.array', 'np.array', (['combined_data'], {}), '(combined_data)\n', (509, 524), True, 'import numpy as np\n'), ((3518, 3539), 'torch.tensor', 'torch.tensor', (['train_x'], {}), '(train_x)\n', (3530, 3539), False, 'import torch\n'), ((3540, 3561), 'torch.tensor', 'torch.tensor', (['train_y'], {}), '(train_y)\n', (3552, 3561), False, 'import torch\n'), ((4799, 4841), 'numpy.load', 'np.load', (['eval_data_path'], {'allow_pickle': '(True)'}), '(eval_data_path, allow_pickle=True)\n', (4806, 4841), True, 'import numpy as np\n'), ((4952, 4971), 'numpy.array', 'np.array', (['eval_data'], {}), '(eval_data)\n', (4960, 4971), True, 'import numpy as np\n'), ((720, 752), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'hidden_dim'], {}), '(input_dim, hidden_dim)\n', (729, 752), True, 'import torch.nn as nn\n'), ((772, 804), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'hidden_dim'], {}), '(input_dim, hidden_dim)\n', (781, 804), True, 'import torch.nn as nn\n'), ((824, 856), 'torch.nn.Linear', 'nn.Linear', (['input_dim', 'hidden_dim'], {}), '(input_dim, hidden_dim)\n', (833, 856), True, 'import torch.nn as nn\n'), ((1098, 1131), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'hidden_dim'], {}), '(hidden_dim, hidden_dim)\n', (1107, 1131), True, 'import torch.nn as nn\n'), ((1151, 1195), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'output_dim'], {'bias': '(True)'}), '(hidden_dim, output_dim, bias=True)\n', (1160, 1195), True, 'import torch.nn as nn\n'), ((1213, 1222), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1220, 1222), True, 'import torch.nn as nn\n'), ((2946, 2957), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (2954, 2957), True, 'import numpy as np\n'), ((3225, 3268), 'numpy.array', 'np.array', (['train_data[:, 1]'], {'dtype': '"""float32"""'}), "(train_data[:, 1], dtype='float32')\n", (3233, 3268), True, 'import numpy as np\n'), ((3647, 3658), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (3655, 3658), True, 'import numpy as np\n'), ((3834, 3876), 'numpy.array', 'np.array', (['test_data[:, 1]'], {'dtype': '"""float32"""'}), "(test_data[:, 1], dtype='float32')\n", (3842, 3876), True, 'import numpy as np\n'), ((5192, 5213), 'torch.load', 'torch.load', (['ckpt_path'], {}), '(ckpt_path)\n', (5202, 5213), False, 'import torch\n'), ((4356, 4379), 'numpy.abs', 'np.abs', (['(pred_y - test_y)'], {}), '(pred_y - test_y)\n', (4362, 4379), True, 'import numpy as np\n'), ((937, 970), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', 'hidden_dim'], {}), '(hidden_dim, hidden_dim)\n', (946, 970), True, 'import torch.nn as nn\n'), ((5011, 5022), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (5019, 5022), True, 'import numpy as np\n'), ((5096, 5138), 'numpy.array', 'np.array', (['eval_data[:, 1]'], {'dtype': '"""float32"""'}), "(eval_data[:, 1], dtype='float32')\n", (5104, 5138), True, 'import numpy as np\n'), ((1027, 1036), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1034, 1036), True, 'import torch.nn as nn\n'), ((5364, 5402), 'torch.abs', 'torch.abs', (['(eval_outputs - eval_targets)'], {}), '(eval_outputs - eval_targets)\n', (5373, 5402), False, 'import torch\n'), ((6261, 6295), 'torch.abs', 'torch.abs', (['(tested_outputs - test_y)'], {}), '(tested_outputs - test_y)\n', (6270, 6295), False, 'import torch\n'), ((6621, 6655), 'torch.abs', 'torch.abs', (['(tested_outputs - test_y)'], {}), '(tested_outputs - test_y)\n', (6630, 6655), False, 'import torch\n')] |
import numpy as np
from PIL import Image
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scipy import stats
def grayscale():
img = Image.open("static/img/temp_img.jpeg")
img = img.convert("RGBA")
img_arr = np.asarray(img)
r = img_arr[:, :, 0]
g = img_arr[:, :, 1]
b = img_arr[:, :, 2]
sum_r = np.sum(r)
sum_g = np.sum(g)
sum_b = np.sum(b)
sum_all = sum_r + sum_g + sum_b
arr_gray = (sum_r / sum_all * r) + \
(sum_g / sum_all * g) + (sum_b / sum_all * b)
# if sum_r > sum_g and sum_r > sum_b:
# arr_gray = (0.5 * r) + (0.25 * g) + (0.25 * b)
# elif sum_g > sum_r and sum_g > sum_b:
# arr_gray = (0.25 * r) + (0.5 * g) + (0.25 * b)
# else:
# arr_gray = (0.25 * r) + (0.25 * g) + (0.5 * b)
img_new = Image.fromarray(arr_gray)
img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_grayscale.jpeg")
def invers():
img = Image.open("static/img/temp_img.jpeg")
img = img.convert("RGB")
img_arr = np.asarray(img)
img_arr.setflags(write=1)
img_arr[:, :, 0] = 255 - img_arr[:, :, 0]
img_arr[:, :, 1] = 255 - img_arr[:, :, 1]
img_arr[:, :, 2] = 255 - img_arr[:, :, 2]
img_new = Image.fromarray(img_arr)
img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_inverse.jpeg")
def zoomin():
img = Image.open("static/img/temp_img.jpeg")
img = img.convert("RGB")
img_arr = np.asarray(img)
# img_arr.setflags(write=1)
new_size = ((img_arr.shape[0] // 2),
(img_arr.shape[1] // 2), img_arr.shape[2])
new_arr = np.full(new_size, 255)
print(img_arr.shape, new_size)
new_arr.setflags(write=1)
img_arr_shape = img_arr.shape
for row in range(img_arr_shape[0]):
for col in range(img_arr_shape[1]):
try:
new_arr[row, col, 0] = (int(img_arr[row, col, 0]) + int(img_arr[row + 1, col, 0]) + int(
img_arr[row, col + 1, 0]) + int(img_arr[row + 1, col + 1, 0])) // 4
new_arr[row, col, 1] = (int(img_arr[row, col, 1]) + int(img_arr[row + 1, col, 1]) + int(
img_arr[row, col + 1, 1]) + int(img_arr[row + 1, col + 1, 1])) // 4
new_arr[row, col, 2] = (int(img_arr[row, col, 2]) + int(img_arr[row + 1, col, 2]) + int(
img_arr[row, col + 1, 2]) + int(img_arr[row + 1, col + 1, 2])) // 4
except:
break
col += 1
row += 1
new_arr = np.uint8(new_arr)
img_new = Image.fromarray(new_arr)
img_new.save("static/img/temp_img_zoomin.jpeg")
def zoomout():
zoomin()
img = Image.open("static/img/temp_img_zoomin.jpeg")
img = img.convert("RGB")
img_arr = np.asarray(img)
img_arr_shape = img_arr.shape
arr_x_size = img_arr.shape[0] * 2
arr_y_size = img_arr.shape[1] * 2
new_arr = np.full((arr_x_size, arr_y_size, 3), 255)
new_arr.setflags(write=1)
for row in range(img_arr_shape[0]):
for col in range(img_arr_shape[1]):
pix_1, pix_2, pix_3 = img_arr[row, col,
0], img_arr[row, col, 1], img_arr[row, col, 2]
new_arr[row, col, 0], new_arr[row + 1, col, 0], new_arr[row, col +
1, 0], new_arr[row + 1, col + 1, 0] = pix_1, pix_1, pix_1, pix_1
new_arr[row, col, 1], new_arr[row + 1, col, 1], new_arr[row, col +
1, 1], new_arr[row + 1, col + 1, 1] = pix_2, pix_2, pix_2, pix_2
new_arr[row, col, 2], new_arr[row + 1, col, 2], new_arr[row, col +
1, 2], new_arr[row + 1, col + 1, 2] = pix_3, pix_3, pix_3, pix_3
col += 1
row += 1
# print(new_arr)
new_arr = np.uint8(new_arr)
img_new = Image.fromarray(new_arr)
img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_zoomout.jpeg")
def crop():
img = Image.open("static/img/temp_img.jpeg")
img = img.convert("RGB")
img_arr = np.asarray(img)
img_arr.setflags(write=1)
middle_x = img_arr.shape[0]
middle_y = img_arr.shape[1]
middle_x_start = middle_x * 1 // 4
middle_x_end = middle_x * 3 // 4
middle_y_start = middle_y * 1 // 4
middle_y_end = middle_y * 3 // 4
img_arr = img_arr[middle_x_start:middle_x_end,
middle_y_start:middle_y_end, :]
img_new = Image.fromarray(img_arr)
img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_crop.jpeg")
def flipvertical():
img = Image.open("static/img/temp_img.jpeg")
img = img.convert("RGB")
img_arr = np.asarray(img)
flipped_arr = img_arr.copy()
flipped_arr.setflags(write=1)
for row in range(img_arr.shape[0]):
for col in range(img_arr.shape[1]):
flipped_arr[-1 * (row + 1), col, 0] = img_arr[row, col, 0]
flipped_arr[-1 * (row + 1), col, 1] = img_arr[row, col, 1]
flipped_arr[-1 * (row + 1), col, 2] = img_arr[row, col, 2]
img_new = Image.fromarray(flipped_arr)
img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_flipvertical.jpeg")
def fliphorizontal():
img = Image.open("static/img/temp_img.jpeg")
img = img.convert("RGB")
img_arr = np.asarray(img)
flipped_arr = img_arr.copy()
flipped_arr.setflags(write=1)
for row in range(img_arr.shape[0]):
for col in range(img_arr.shape[1]):
flipped_arr[row, -1 * (col + 1), 0] = img_arr[row, col, 0]
flipped_arr[row, -1 * (col + 1), 1] = img_arr[row, col, 1]
flipped_arr[row, -1 * (col + 1), 2] = img_arr[row, col, 2]
img_new = Image.fromarray(flipped_arr)
img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_fliphorizontal.jpeg")
def brightnesswithincrease(val=0):
img = Image.open("static/img/temp_img.jpeg")
img = img.convert("RGB")
img_arr = np.asfarray(img)
new_arr = img_arr + int(val)
new_arr = np.clip(new_arr, 0, 255)
img_new = Image.fromarray(new_arr.astype('uint8'))
img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_brightnesswithincrease.jpeg")
def brightnesswithmultiply(val=0):
img = Image.open("static/img/temp_img.jpeg")
img = img.convert("RGB")
img_arr = np.asfarray(img)
new_arr = img_arr * int(val)
new_arr = np.clip(new_arr, 0, 255)
img_new = Image.fromarray(new_arr.astype('uint8'))
img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_brightnesswithmultiply.jpeg")
def darkeningwithdecrease(val=0):
img = Image.open("static/img/temp_img.jpeg")
img = img.convert("RGB")
img_arr = np.asfarray(img)
new_arr = img_arr - int(val)
new_arr = np.clip(new_arr, 0, 255)
img_new = Image.fromarray(new_arr.astype('uint8'))
img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_darkeningwithdecrease.jpeg")
def darkeningwithdivide(val=0):
img = Image.open("static/img/temp_img.jpeg")
img = img.convert("RGB")
img_arr = np.asfarray(img)
new_arr = img_arr // int(val)
new_arr = np.clip(new_arr, 0, 255)
img_new = Image.fromarray(new_arr.astype('uint8'))
img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_darkeningwithdivide.jpeg")
def rotation90(img_file="static/img/temp_img.jpeg"):
img = Image.open(img_file)
img = img.convert("RGB")
img_arr = np.asarray(img)
rotated90 = np.zeros(
(img_arr.shape[1], img_arr.shape[0], img_arr.shape[2]))
rotated90.setflags(write=1)
for row in range(img_arr.shape[0]):
for col in range(img_arr.shape[1]):
rotated90[col, -1 * (row + 1), :] = img_arr[row, col, :]
img_new = Image.fromarray(rotated90.astype('uint8'))
img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_rotated.jpeg")
# print("SAVED!!!")
def rotation180():
rotation90()
rotation90("static/img/temp_img_rotated.jpeg")
def rotation270():
rotation180()
rotation90("static/img/temp_img_rotated.jpeg")
def histogram():
img = Image.open("static/img/temp_img.jpeg")
img = img.convert("RGB")
img_arr = np.asarray(img)
temp_r = np.zeros(256)
temp_g = np.zeros(256)
temp_b = np.zeros(256)
for row in img_arr:
for col in row:
temp_r[col[0]] += 1
temp_g[col[1]] += 1
temp_b[col[2]] += 1
x = [i for i in range(256)]
width = 1 / 1.5
# plt.plot(x, temp_r)
plt.bar(x, temp_r, width, color="r")
plt.title("Red Histogram")
plt.savefig("static/img/temp_red_hist.jpeg")
plt.clf()
# plt.plot(x, temp_g)
plt.bar(x, temp_g, width, color="g")
plt.title("Green Histogram")
plt.savefig("static/img/temp_green_hist.jpeg")
plt.clf()
# plt.plot(x, temp_b)
plt.bar(x, temp_b, width, color="b")
plt.title("Blue Histogram")
plt.savefig("static/img/temp_blue_hist.jpeg")
plt.clf()
def pad3D(c_x, padlen=1):
m, n, r = c_x.shape
c_y = np.zeros((m, n+2*padlen, r+2*padlen), dtype=c_x.dtype)
c_y[:, padlen:-padlen, padlen:-padlen] = c_x
return c_y
def convolute(mat11, mat12, mat13, mat21, mat22, mat23, mat31, mat32, mat33, mode):
if mode == "edge":
grayscale()
img = Image.open("static/img/temp_img_grayscale.jpeg")
else:
img = Image.open("static/img/temp_img.jpeg")
img = img.convert("RGBA")
img_arr = np.asfarray(img)
h, w, _ = img_arr.shape
temp = np.zeros_like(img_arr)
ker = np.array([[mat11, mat12, mat13],
[mat21, mat22, mat23],
[mat31, mat32, mat33]])
# np.place(ker, ker == "", 0)
if mode == "ordinary":
ker = ker.astype("int")
# img_arr = np.pad(img_arr, ((0,0), (1,1), (1,1)), mode='constant')
# img_arr = pad3D(img_arr)
print(img_arr)
for i in range(1, h-1):
for j in range(1, w-1):
temp[i, j, 0] = img_arr[i - 1, j - 1, 0] * ker[0, 0] + img_arr[i - 1, j, 0] * ker[0, 1] + img_arr[i - 1, j + 1, 0] * ker[0, 2] + img_arr[i, j - 1, 0] * ker[1, 0] + \
img_arr[i, j, 0] * ker[1, 1] + img_arr[i, j + 1, 0] * ker[1, 2] + img_arr[i + 1, j - 1,
0] * ker[2, 0] + img_arr[i + 1, j, 0] * ker[2, 1] + img_arr[i + 1, j + 1, 0] * ker[2, 2]
temp[i, j, 1] = img_arr[i - 1, j - 1, 1] * ker[0, 0] + img_arr[i - 1, j, 1] * ker[0, 1] + img_arr[i - 1, j + 1, 1] * ker[0, 2] + img_arr[i, j - 1, 1] * ker[1, 0] + \
img_arr[i, j, 1] * ker[1, 1] + img_arr[i, j + 1, 1] * ker[1, 2] + img_arr[i + 1, j - 1,
1] * ker[2, 0] + img_arr[i + 1, j, 1] * ker[2, 1] + img_arr[i + 1, j + 1, 1] * ker[2, 2]
temp[i, j, 2] = img_arr[i - 1, j - 1, 2] * ker[0, 0] + img_arr[i - 1, j, 2] * ker[0, 1] + img_arr[i - 1, j + 1, 2] * ker[0, 2] + img_arr[i, j - 1, 2] * ker[1, 0] + \
img_arr[i, j, 2] * ker[1, 1] + img_arr[i, j + 1, 2] * ker[1, 2] + img_arr[i + 1, j - 1,
2] * ker[2, 0] + img_arr[i + 1, j, 2] * ker[2, 1] + img_arr[i + 1, j + 1, 2] * ker[2, 2]
new_arr = np.clip(temp, 0, 255)
img_new = Image.fromarray(new_arr.astype('uint8'))
img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_convolution.jpeg")
def median_filter():
img = Image.open("static/img/temp_img.jpeg")
img = img.convert("RGB")
img_arr = np.asfarray(img)
h, w, _ = img_arr.shape
temp = np.zeros_like(img_arr)
for i in range(1, h-1):
for j in range(1, w-1):
arr_value = np.array([img_arr[i-1, j-1, :], img_arr[i-1, j, :], img_arr[i-1, j+1, :], img_arr[
i, j-1, :], img_arr[i, j, :], img_arr[i, j+1, :], img_arr[i+1, j-1, :], img_arr[i+1, j, :], img_arr[i+1, j+1, :]])
temp[i, j, :] = np.median(arr_value, axis=0)
img_new = Image.fromarray(temp.astype('uint8'))
img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_medianfilter.jpeg")
def mean_filter():
img = Image.open("static/img/temp_img.jpeg")
img = img.convert("RGB")
img_arr = np.asfarray(img)
h, w, _ = img_arr.shape
temp = np.zeros_like(img_arr)
for i in range(1, h-1):
for j in range(1, w-1):
arr_value = np.array([img_arr[i-1, j-1, :], img_arr[i-1, j, :], img_arr[i-1, j+1, :], img_arr[
i, j-1, :], img_arr[i, j, :], img_arr[i, j+1, :], img_arr[i+1, j-1, :], img_arr[i+1, j, :], img_arr[i+1, j+1, :]])
temp[i, j, :] = np.mean(arr_value, axis=0)
img_new = Image.fromarray(temp.astype('uint8'))
img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_meanfilter.jpeg")
def mode_filter():
img = Image.open("static/img/temp_img.jpeg")
img = img.convert("RGB")
img_arr = np.asfarray(img)
h, w, _ = img_arr.shape
temp = np.zeros_like(img_arr)
for i in range(1, h-1):
for j in range(1, w-1):
arr_value = np.array([img_arr[i-1, j-1, :], img_arr[i-1, j, :], img_arr[i-1, j+1, :], img_arr[
i, j-1, :], img_arr[i, j, :], img_arr[i, j+1, :], img_arr[i+1, j-1, :], img_arr[i+1, j, :], img_arr[i+1, j+1, :]])
temp[i, j, :] = stats.mode(arr_value, axis=0)[0]
img_new = Image.fromarray(temp.astype('uint8'))
img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_modefilter.jpeg")
def seed_region_growth(seed=10):
img = Image.open("static/img/temp_img.jpeg")
img = img.convert("RGB")
img_arr = np.asfarray(img)
h, w, c = img_arr.shape
temp = np.zeros_like(img_arr)
for i in range(1, h-1):
for j in range(1, w-1):
for k in range(c):
if ((img_arr[i, j, k] - seed <= img_arr[i-1, j-1, k]) and (img_arr[i, j, k] + seed >= img_arr[i-1, j-1, k])):
temp[i-1, j-1, k] = img_arr[i, j, k]
if ((img_arr[i, j, k] - seed <= img_arr[i-1, j, k]) and (img_arr[i, j, k] + seed >= img_arr[i-1, j, k])):
temp[i-1, j, k] = img_arr[i, j, k]
if ((img_arr[i, j, k] - seed <= img_arr[i-1, j+1, k]) and (img_arr[i, j, k] + seed >= img_arr[i-1, j+1, k])):
temp[i-1, j+1, k] = img_arr[i, j, k]
if ((img_arr[i, j, k] - seed <= img_arr[i, j-1, k]) and (img_arr[i, j, k] + seed >= img_arr[i, j-1, k])):
temp[i, j-1, k] = img_arr[i, j, k]
if ((img_arr[i, j, k] - seed <= img_arr[i, j, k]) and (img_arr[i, j, k] + seed >= img_arr[i, j-1, k])):
temp[i, j, k] = img_arr[i, j, k]
if ((img_arr[i, j, k] - seed <= img_arr[i, j+1, k]) and (img_arr[i, j, k] + seed >= img_arr[i, j+1, k])):
temp[i, j+1, k] = img_arr[i, j, k]
if ((img_arr[i, j, k] - seed <= img_arr[i+1, j-1, k]) and (img_arr[i, j, k] + seed >= img_arr[i+1, j-1, k])):
temp[i+1, j-1, k] = img_arr[i, j, k]
if ((img_arr[i, j, k] - seed <= img_arr[i+1, j, k]) and (img_arr[i, j, k] + seed >= img_arr[i+1, j, k])):
temp[i+1, j, k] = img_arr[i, j, k]
if ((img_arr[i, j, k] - seed <= img_arr[i+1, j+1, k]) and (img_arr[i, j, k] + seed >= img_arr[i+1, j+1, k])):
temp[i+1, j+1, k] = img_arr[i, j, k]
img_new = Image.fromarray(temp.astype('uint8'))
img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_seedregiongrowth.jpeg")
def threshold_segmentation():
img = Image.open("static/img/temp_img.jpeg")
img = img.convert("RGB")
img_arr = np.asfarray(img)
h, w, _ = img_arr.shape
temp = np.zeros_like(img_arr)
for i in range(h):
for j in range(w):
if img_arr[i, j, 0] >= 200 and img_arr[i, j, 1] >= 200 and img_arr[i, j, 2] >= 200:
temp[i, j, :] = 255
elif img_arr[i, j, 0] >= 200 and img_arr[i, j, 1] <= 100 and img_arr[i, j, 2] <= 100:
temp[i, j, 0] = 255
elif img_arr[i, j, 0] <= 100 and img_arr[i, j, 1] >= 200 and img_arr[i, j, 2] <= 100:
temp[i, j, 1] = 255
elif img_arr[i, j, 0] <= 100 and img_arr[i, j, 1] <= 100 and img_arr[i, j, 2] >= 200:
temp[i, j, 2] = 255
elif img_arr[i, j, 0] >= 170 and img_arr[i, j, 1] >= 170 and img_arr[i, j, 2] <= 100:
temp[i, j, 0] = 255
temp[i, j, 1] = 255
elif img_arr[i, j, 0] <= 100 and img_arr[i, j, 1] >= 170 and img_arr[i, j, 2] >= 170:
temp[i, j, 1] = 255
temp[i, j, 2] = 255
elif img_arr[i, j, 0] >= 150 and img_arr[i, j, 1] <= 100 and img_arr[i, j, 2] >= 150:
temp[i, j, 1] = 255
temp[i, j, 2] = 255
elif img_arr[i, j, 0] >= 100 and img_arr[i, j, 1] <= 50 and img_arr[i, j, 2] <= 50:
temp[i, j, 0] = 128
elif img_arr[i, j, 0] <= 50 and img_arr[i, j, 1] >= 100 and img_arr[i, j, 2] <= 50:
temp[i, j, 1] = 128
elif img_arr[i, j, 0] <= 70 and img_arr[i, j, 1] <= 70 and img_arr[i, j, 2] >= 100:
temp[i, j, 2] = 128
img_new = Image.fromarray(temp.astype('uint8'))
img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_threshold_segmentation.jpeg")
def dilasi():
img_arr = binarize_image("static/img/temp_img.jpeg")
img_arr[img_arr == 255] = 1
h, w = img_arr.shape
temp = np.zeros_like(img_arr)
ker = np.ones((3, 3))
for i in range(1, h-1):
for j in range(1, w-1):
if img_arr[i-1, j-1] * ker[0, 0] >= 1 or img_arr[i-1, j] * ker[0, 1] >= 1 or img_arr[i-1, j+1] * ker[0, 2] >= 1 or img_arr[i, j-1] * ker[1, 0] >= 1 or img_arr[i, j] * ker[1, 1] >= 1 or img_arr[i, j+1] * ker[1, 2] >= 1 or img_arr[i+1, j-1] * ker[2, 0] >= 1 or img_arr[i+1, j] * ker[2, 1] >= 1 or img_arr[i+1, j+1] * ker[2, 2] >= 1:
temp[i, j] = 1
temp[temp == 1] = 255
img_new = Image.fromarray(temp.astype('uint8'))
# img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_dilasi.jpeg")
def erosi():
img_arr = binarize_image("static/img/temp_img.jpeg")
img_arr[img_arr == 255] = 1
h, w = img_arr.shape
temp = np.zeros_like(img_arr)
ker = np.ones((3, 3))
for i in range(1, h-1):
for j in range(1, w-1):
if img_arr[i-1, j-1] * ker[0, 0] >= 1 and img_arr[i-1, j] * ker[0, 1] >= 1 and img_arr[i-1, j+1] * ker[0, 2] >= 1 and img_arr[i, j-1] * ker[1, 0] >= 1 and img_arr[i, j] * ker[1, 1] >= 1 and img_arr[i, j+1] * ker[1, 2] >= 1 and img_arr[i+1, j-1] * ker[2, 0] >= 1 and img_arr[i+1, j] * ker[2, 1] >= 1 and img_arr[i+1, j+1] * ker[2, 2] >= 1:
temp[i, j] = 1
temp[temp == 1] = 255
img_new = Image.fromarray(temp.astype('uint8'))
img_new.save("static/img/temp_img_erosi.jpeg")
def binarize_image(img_path):
"""Binarize an image."""
image_file = Image.open(img_path)
image = image_file.convert('L') # convert image to monochrome
image = np.array(image)
image = binarize_array(image)
return image
def binarize_array(numpy_array, threshold=50):
"""Binarize a numpy array."""
for i in range(len(numpy_array)):
for j in range(len(numpy_array[0])):
if numpy_array[i][j] > threshold:
numpy_array[i][j] = 255
else:
numpy_array[i][j] = 0
return numpy_array
def image_compression():
print("AAA")
img = Image.open("static/img/temp_img.jpeg")
img = img.convert("RGB")
img_arr = np.asfarray(img)
print(img_arr)
h, w, _ = img_arr.shape
temp = np.zeros_like(img_arr)
for i in range(h):
for j in range(w):
temp[i, j, 0] = int(str(int(img_arr[i, j, 0]))[:-1]+"0")
temp[i, j, 1] = int(str(int(img_arr[i, j, 1]))[:-1]+"0")
temp[i, j, 2] = int(str(int(img_arr[i, j, 2]))[:-1]+"0")
img_new = Image.fromarray(temp.astype('uint8'))
img_new = img_new.convert("RGB")
img_new.save("static/img/temp_img_compressed.jpeg")
| [
"matplotlib.pyplot.title",
"numpy.sum",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.bar",
"numpy.ones",
"numpy.clip",
"numpy.mean",
"numpy.full",
"numpy.zeros_like",
"numpy.asfarray",
"numpy.uint8",
"scipy.stats.mode",
"numpy.median",
"numpy.asarray",
"matplotlib.use",
"numpy.zeros",
... | [((59, 80), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (73, 80), False, 'import matplotlib\n'), ((166, 204), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img.jpeg"""'], {}), "('static/img/temp_img.jpeg')\n", (176, 204), False, 'from PIL import Image\n'), ((250, 265), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (260, 265), True, 'import numpy as np\n'), ((354, 363), 'numpy.sum', 'np.sum', (['r'], {}), '(r)\n', (360, 363), True, 'import numpy as np\n'), ((376, 385), 'numpy.sum', 'np.sum', (['g'], {}), '(g)\n', (382, 385), True, 'import numpy as np\n'), ((398, 407), 'numpy.sum', 'np.sum', (['b'], {}), '(b)\n', (404, 407), True, 'import numpy as np\n'), ((825, 850), 'PIL.Image.fromarray', 'Image.fromarray', (['arr_gray'], {}), '(arr_gray)\n', (840, 850), False, 'from PIL import Image\n'), ((969, 1007), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img.jpeg"""'], {}), "('static/img/temp_img.jpeg')\n", (979, 1007), False, 'from PIL import Image\n'), ((1052, 1067), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (1062, 1067), True, 'import numpy as np\n'), ((1251, 1275), 'PIL.Image.fromarray', 'Image.fromarray', (['img_arr'], {}), '(img_arr)\n', (1266, 1275), False, 'from PIL import Image\n'), ((1392, 1430), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img.jpeg"""'], {}), "('static/img/temp_img.jpeg')\n", (1402, 1430), False, 'from PIL import Image\n'), ((1475, 1490), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (1485, 1490), True, 'import numpy as np\n'), ((1637, 1659), 'numpy.full', 'np.full', (['new_size', '(255)'], {}), '(new_size, 255)\n', (1644, 1659), True, 'import numpy as np\n'), ((2536, 2553), 'numpy.uint8', 'np.uint8', (['new_arr'], {}), '(new_arr)\n', (2544, 2553), True, 'import numpy as np\n'), ((2568, 2592), 'PIL.Image.fromarray', 'Image.fromarray', (['new_arr'], {}), '(new_arr)\n', (2583, 2592), False, 'from PIL import Image\n'), ((2685, 2730), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img_zoomin.jpeg"""'], {}), "('static/img/temp_img_zoomin.jpeg')\n", (2695, 2730), False, 'from PIL import Image\n'), ((2775, 2790), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (2785, 2790), True, 'import numpy as np\n'), ((2917, 2958), 'numpy.full', 'np.full', (['(arr_x_size, arr_y_size, 3)', '(255)'], {}), '((arr_x_size, arr_y_size, 3), 255)\n', (2924, 2958), True, 'import numpy as np\n'), ((3925, 3942), 'numpy.uint8', 'np.uint8', (['new_arr'], {}), '(new_arr)\n', (3933, 3942), True, 'import numpy as np\n'), ((3957, 3981), 'PIL.Image.fromarray', 'Image.fromarray', (['new_arr'], {}), '(new_arr)\n', (3972, 3981), False, 'from PIL import Image\n'), ((4096, 4134), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img.jpeg"""'], {}), "('static/img/temp_img.jpeg')\n", (4106, 4134), False, 'from PIL import Image\n'), ((4179, 4194), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (4189, 4194), True, 'import numpy as np\n'), ((4564, 4588), 'PIL.Image.fromarray', 'Image.fromarray', (['img_arr'], {}), '(img_arr)\n', (4579, 4588), False, 'from PIL import Image\n'), ((4708, 4746), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img.jpeg"""'], {}), "('static/img/temp_img.jpeg')\n", (4718, 4746), False, 'from PIL import Image\n'), ((4790, 4805), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (4800, 4805), True, 'import numpy as np\n'), ((5187, 5215), 'PIL.Image.fromarray', 'Image.fromarray', (['flipped_arr'], {}), '(flipped_arr)\n', (5202, 5215), False, 'from PIL import Image\n'), ((5345, 5383), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img.jpeg"""'], {}), "('static/img/temp_img.jpeg')\n", (5355, 5383), False, 'from PIL import Image\n'), ((5427, 5442), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (5437, 5442), True, 'import numpy as np\n'), ((5824, 5852), 'PIL.Image.fromarray', 'Image.fromarray', (['flipped_arr'], {}), '(flipped_arr)\n', (5839, 5852), False, 'from PIL import Image\n'), ((5997, 6035), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img.jpeg"""'], {}), "('static/img/temp_img.jpeg')\n", (6007, 6035), False, 'from PIL import Image\n'), ((6079, 6095), 'numpy.asfarray', 'np.asfarray', (['img'], {}), '(img)\n', (6090, 6095), True, 'import numpy as np\n'), ((6144, 6168), 'numpy.clip', 'np.clip', (['new_arr', '(0)', '(255)'], {}), '(new_arr, 0, 255)\n', (6151, 6168), True, 'import numpy as np\n'), ((6377, 6415), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img.jpeg"""'], {}), "('static/img/temp_img.jpeg')\n", (6387, 6415), False, 'from PIL import Image\n'), ((6459, 6475), 'numpy.asfarray', 'np.asfarray', (['img'], {}), '(img)\n', (6470, 6475), True, 'import numpy as np\n'), ((6524, 6548), 'numpy.clip', 'np.clip', (['new_arr', '(0)', '(255)'], {}), '(new_arr, 0, 255)\n', (6531, 6548), True, 'import numpy as np\n'), ((6756, 6794), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img.jpeg"""'], {}), "('static/img/temp_img.jpeg')\n", (6766, 6794), False, 'from PIL import Image\n'), ((6838, 6854), 'numpy.asfarray', 'np.asfarray', (['img'], {}), '(img)\n', (6849, 6854), True, 'import numpy as np\n'), ((6903, 6927), 'numpy.clip', 'np.clip', (['new_arr', '(0)', '(255)'], {}), '(new_arr, 0, 255)\n', (6910, 6927), True, 'import numpy as np\n'), ((7132, 7170), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img.jpeg"""'], {}), "('static/img/temp_img.jpeg')\n", (7142, 7170), False, 'from PIL import Image\n'), ((7214, 7230), 'numpy.asfarray', 'np.asfarray', (['img'], {}), '(img)\n', (7225, 7230), True, 'import numpy as np\n'), ((7280, 7304), 'numpy.clip', 'np.clip', (['new_arr', '(0)', '(255)'], {}), '(new_arr, 0, 255)\n', (7287, 7304), True, 'import numpy as np\n'), ((7528, 7548), 'PIL.Image.open', 'Image.open', (['img_file'], {}), '(img_file)\n', (7538, 7548), False, 'from PIL import Image\n'), ((7592, 7607), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (7602, 7607), True, 'import numpy as np\n'), ((7625, 7689), 'numpy.zeros', 'np.zeros', (['(img_arr.shape[1], img_arr.shape[0], img_arr.shape[2])'], {}), '((img_arr.shape[1], img_arr.shape[0], img_arr.shape[2]))\n', (7633, 7689), True, 'import numpy as np\n'), ((8265, 8303), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img.jpeg"""'], {}), "('static/img/temp_img.jpeg')\n", (8275, 8303), False, 'from PIL import Image\n'), ((8347, 8362), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (8357, 8362), True, 'import numpy as np\n'), ((8377, 8390), 'numpy.zeros', 'np.zeros', (['(256)'], {}), '(256)\n', (8385, 8390), True, 'import numpy as np\n'), ((8404, 8417), 'numpy.zeros', 'np.zeros', (['(256)'], {}), '(256)\n', (8412, 8417), True, 'import numpy as np\n'), ((8431, 8444), 'numpy.zeros', 'np.zeros', (['(256)'], {}), '(256)\n', (8439, 8444), True, 'import numpy as np\n'), ((8673, 8709), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'temp_r', 'width'], {'color': '"""r"""'}), "(x, temp_r, width, color='r')\n", (8680, 8709), True, 'import matplotlib.pyplot as plt\n'), ((8714, 8740), 'matplotlib.pyplot.title', 'plt.title', (['"""Red Histogram"""'], {}), "('Red Histogram')\n", (8723, 8740), True, 'import matplotlib.pyplot as plt\n'), ((8745, 8789), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""static/img/temp_red_hist.jpeg"""'], {}), "('static/img/temp_red_hist.jpeg')\n", (8756, 8789), True, 'import matplotlib.pyplot as plt\n'), ((8794, 8803), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8801, 8803), True, 'import matplotlib.pyplot as plt\n'), ((8835, 8871), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'temp_g', 'width'], {'color': '"""g"""'}), "(x, temp_g, width, color='g')\n", (8842, 8871), True, 'import matplotlib.pyplot as plt\n'), ((8876, 8904), 'matplotlib.pyplot.title', 'plt.title', (['"""Green Histogram"""'], {}), "('Green Histogram')\n", (8885, 8904), True, 'import matplotlib.pyplot as plt\n'), ((8909, 8955), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""static/img/temp_green_hist.jpeg"""'], {}), "('static/img/temp_green_hist.jpeg')\n", (8920, 8955), True, 'import matplotlib.pyplot as plt\n'), ((8960, 8969), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8967, 8969), True, 'import matplotlib.pyplot as plt\n'), ((9001, 9037), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'temp_b', 'width'], {'color': '"""b"""'}), "(x, temp_b, width, color='b')\n", (9008, 9037), True, 'import matplotlib.pyplot as plt\n'), ((9042, 9069), 'matplotlib.pyplot.title', 'plt.title', (['"""Blue Histogram"""'], {}), "('Blue Histogram')\n", (9051, 9069), True, 'import matplotlib.pyplot as plt\n'), ((9074, 9119), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""static/img/temp_blue_hist.jpeg"""'], {}), "('static/img/temp_blue_hist.jpeg')\n", (9085, 9119), True, 'import matplotlib.pyplot as plt\n'), ((9124, 9133), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (9131, 9133), True, 'import matplotlib.pyplot as plt\n'), ((9196, 9258), 'numpy.zeros', 'np.zeros', (['(m, n + 2 * padlen, r + 2 * padlen)'], {'dtype': 'c_x.dtype'}), '((m, n + 2 * padlen, r + 2 * padlen), dtype=c_x.dtype)\n', (9204, 9258), True, 'import numpy as np\n'), ((9614, 9630), 'numpy.asfarray', 'np.asfarray', (['img'], {}), '(img)\n', (9625, 9630), True, 'import numpy as np\n'), ((9672, 9694), 'numpy.zeros_like', 'np.zeros_like', (['img_arr'], {}), '(img_arr)\n', (9685, 9694), True, 'import numpy as np\n'), ((9705, 9784), 'numpy.array', 'np.array', (['[[mat11, mat12, mat13], [mat21, mat22, mat23], [mat31, mat32, mat33]]'], {}), '([[mat11, mat12, mat13], [mat21, mat22, mat23], [mat31, mat32, mat33]])\n', (9713, 9784), True, 'import numpy as np\n'), ((11500, 11521), 'numpy.clip', 'np.clip', (['temp', '(0)', '(255)'], {}), '(temp, 0, 255)\n', (11507, 11521), True, 'import numpy as np\n'), ((11704, 11742), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img.jpeg"""'], {}), "('static/img/temp_img.jpeg')\n", (11714, 11742), False, 'from PIL import Image\n'), ((11786, 11802), 'numpy.asfarray', 'np.asfarray', (['img'], {}), '(img)\n', (11797, 11802), True, 'import numpy as np\n'), ((11843, 11865), 'numpy.zeros_like', 'np.zeros_like', (['img_arr'], {}), '(img_arr)\n', (11856, 11865), True, 'import numpy as np\n'), ((12401, 12439), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img.jpeg"""'], {}), "('static/img/temp_img.jpeg')\n", (12411, 12439), False, 'from PIL import Image\n'), ((12483, 12499), 'numpy.asfarray', 'np.asfarray', (['img'], {}), '(img)\n', (12494, 12499), True, 'import numpy as np\n'), ((12540, 12562), 'numpy.zeros_like', 'np.zeros_like', (['img_arr'], {}), '(img_arr)\n', (12553, 12562), True, 'import numpy as np\n'), ((13094, 13132), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img.jpeg"""'], {}), "('static/img/temp_img.jpeg')\n", (13104, 13132), False, 'from PIL import Image\n'), ((13176, 13192), 'numpy.asfarray', 'np.asfarray', (['img'], {}), '(img)\n', (13187, 13192), True, 'import numpy as np\n'), ((13233, 13255), 'numpy.zeros_like', 'np.zeros_like', (['img_arr'], {}), '(img_arr)\n', (13246, 13255), True, 'import numpy as np\n'), ((13807, 13845), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img.jpeg"""'], {}), "('static/img/temp_img.jpeg')\n", (13817, 13845), False, 'from PIL import Image\n'), ((13889, 13905), 'numpy.asfarray', 'np.asfarray', (['img'], {}), '(img)\n', (13900, 13905), True, 'import numpy as np\n'), ((13946, 13968), 'numpy.zeros_like', 'np.zeros_like', (['img_arr'], {}), '(img_arr)\n', (13959, 13968), True, 'import numpy as np\n'), ((15868, 15906), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img.jpeg"""'], {}), "('static/img/temp_img.jpeg')\n", (15878, 15906), False, 'from PIL import Image\n'), ((15950, 15966), 'numpy.asfarray', 'np.asfarray', (['img'], {}), '(img)\n', (15961, 15966), True, 'import numpy as np\n'), ((16007, 16029), 'numpy.zeros_like', 'np.zeros_like', (['img_arr'], {}), '(img_arr)\n', (16020, 16029), True, 'import numpy as np\n'), ((17820, 17842), 'numpy.zeros_like', 'np.zeros_like', (['img_arr'], {}), '(img_arr)\n', (17833, 17842), True, 'import numpy as np\n'), ((17853, 17868), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (17860, 17868), True, 'import numpy as np\n'), ((18614, 18636), 'numpy.zeros_like', 'np.zeros_like', (['img_arr'], {}), '(img_arr)\n', (18627, 18636), True, 'import numpy as np\n'), ((18647, 18662), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (18654, 18662), True, 'import numpy as np\n'), ((19313, 19333), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (19323, 19333), False, 'from PIL import Image\n'), ((19413, 19428), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (19421, 19428), True, 'import numpy as np\n'), ((19865, 19903), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img.jpeg"""'], {}), "('static/img/temp_img.jpeg')\n", (19875, 19903), False, 'from PIL import Image\n'), ((19947, 19963), 'numpy.asfarray', 'np.asfarray', (['img'], {}), '(img)\n', (19958, 19963), True, 'import numpy as np\n'), ((20022, 20044), 'numpy.zeros_like', 'np.zeros_like', (['img_arr'], {}), '(img_arr)\n', (20035, 20044), True, 'import numpy as np\n'), ((9458, 9506), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img_grayscale.jpeg"""'], {}), "('static/img/temp_img_grayscale.jpeg')\n", (9468, 9506), False, 'from PIL import Image\n'), ((9531, 9569), 'PIL.Image.open', 'Image.open', (['"""static/img/temp_img.jpeg"""'], {}), "('static/img/temp_img.jpeg')\n", (9541, 9569), False, 'from PIL import Image\n'), ((11951, 12179), 'numpy.array', 'np.array', (['[img_arr[i - 1, j - 1, :], img_arr[i - 1, j, :], img_arr[i - 1, j + 1, :],\n img_arr[i, j - 1, :], img_arr[i, j, :], img_arr[i, j + 1, :], img_arr[i +\n 1, j - 1, :], img_arr[i + 1, j, :], img_arr[i + 1, j + 1, :]]'], {}), '([img_arr[i - 1, j - 1, :], img_arr[i - 1, j, :], img_arr[i - 1, j +\n 1, :], img_arr[i, j - 1, :], img_arr[i, j, :], img_arr[i, j + 1, :],\n img_arr[i + 1, j - 1, :], img_arr[i + 1, j, :], img_arr[i + 1, j + 1, :]])\n', (11959, 12179), True, 'import numpy as np\n'), ((12193, 12221), 'numpy.median', 'np.median', (['arr_value'], {'axis': '(0)'}), '(arr_value, axis=0)\n', (12202, 12221), True, 'import numpy as np\n'), ((12648, 12876), 'numpy.array', 'np.array', (['[img_arr[i - 1, j - 1, :], img_arr[i - 1, j, :], img_arr[i - 1, j + 1, :],\n img_arr[i, j - 1, :], img_arr[i, j, :], img_arr[i, j + 1, :], img_arr[i +\n 1, j - 1, :], img_arr[i + 1, j, :], img_arr[i + 1, j + 1, :]]'], {}), '([img_arr[i - 1, j - 1, :], img_arr[i - 1, j, :], img_arr[i - 1, j +\n 1, :], img_arr[i, j - 1, :], img_arr[i, j, :], img_arr[i, j + 1, :],\n img_arr[i + 1, j - 1, :], img_arr[i + 1, j, :], img_arr[i + 1, j + 1, :]])\n', (12656, 12876), True, 'import numpy as np\n'), ((12890, 12916), 'numpy.mean', 'np.mean', (['arr_value'], {'axis': '(0)'}), '(arr_value, axis=0)\n', (12897, 12916), True, 'import numpy as np\n'), ((13341, 13569), 'numpy.array', 'np.array', (['[img_arr[i - 1, j - 1, :], img_arr[i - 1, j, :], img_arr[i - 1, j + 1, :],\n img_arr[i, j - 1, :], img_arr[i, j, :], img_arr[i, j + 1, :], img_arr[i +\n 1, j - 1, :], img_arr[i + 1, j, :], img_arr[i + 1, j + 1, :]]'], {}), '([img_arr[i - 1, j - 1, :], img_arr[i - 1, j, :], img_arr[i - 1, j +\n 1, :], img_arr[i, j - 1, :], img_arr[i, j, :], img_arr[i, j + 1, :],\n img_arr[i + 1, j - 1, :], img_arr[i + 1, j, :], img_arr[i + 1, j + 1, :]])\n', (13349, 13569), True, 'import numpy as np\n'), ((13583, 13612), 'scipy.stats.mode', 'stats.mode', (['arr_value'], {'axis': '(0)'}), '(arr_value, axis=0)\n', (13593, 13612), False, 'from scipy import stats\n')] |
import numpy as np
from scipy import sparse, stats
import sklearn.utils.sparsefuncs as sf
def diffexp_ttest(meanA,vA,nA,meanB,vB,nB, top_n=8, diffexp_lfc_cutoff=0.01):
return diffexp_ttest_from_mean_var(meanA, vA, nA, meanB, vB, nB, 1000, diffexp_lfc_cutoff)
def diffexp_ttest_from_mean_var(meanA, varA, nA, meanB, varB, nB, top_n, diffexp_lfc_cutoff):
n_var = meanA.shape[0]
top_n = min(n_var,top_n)
# variance / N
vnA = varA / min(nA, nB) # overestimate variance, would normally be nA
vnB = varB / min(nA, nB) # overestimate variance, would normally be nB
sum_vn = vnA + vnB
# degrees of freedom for Welch's t-test
with np.errstate(divide="ignore", invalid="ignore"):
dof = sum_vn ** 2 / (vnA ** 2 / (nA - 1) + vnB ** 2 / (nB - 1))
dof[np.isnan(dof)] = 1
# Welch's t-test score calculation
with np.errstate(divide="ignore", invalid="ignore"):
tscores = (meanA - meanB) / np.sqrt(sum_vn)
tscores[np.isnan(tscores)] = 0
# p-value
pvals = stats.t.sf(np.abs(tscores), dof) * 2
pvals_adj = pvals * n_var
pvals_adj[pvals_adj > 1] = 1 # cap adjusted p-value at 1
# logfoldchanges: log2(meanA / meanB)
logfoldchanges = np.log2(np.abs((meanA + 1e-9) / (meanB + 1e-9)))
stats_to_sort = tscores
# find all with lfc > cutoff
lfc_above_cutoff_idx = np.nonzero(np.abs(logfoldchanges) > diffexp_lfc_cutoff)[0]
# derive sort order
sort_order = np.argsort(stats_to_sort)
sort_order = np.concatenate((sort_order[-top_n:][::-1],sort_order[:top_n][::-1]))
if lfc_above_cutoff_idx.shape[0] > 0:
sort_order = sort_order[np.in1d(sort_order,lfc_above_cutoff_idx)]
# top n slice based upon sort order
logfoldchanges_top_n = logfoldchanges[sort_order]
pvals_top_n = pvals[sort_order]
pvals_adj_top_n = pvals_adj[sort_order]
# varIndex, logfoldchange, pval, pval_adj
result = {"positive": [[sort_order[i], logfoldchanges_top_n[i], pvals_top_n[i], pvals_adj_top_n[i]] for i in
range(top_n)],
"negative": [[sort_order[i], logfoldchanges_top_n[i], pvals_top_n[i], pvals_adj_top_n[i]] for i in
range(-1, -1 - top_n, -1)], }
return result
def mean_var_n(X):
"""
Two-pass variance calculation. Numerically (more) stable
than naive methods (and same method used by numpy.var())
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Two-pass
"""
n = X.shape[0]
if sparse.issparse(X):
mean,v = sf.mean_variance_axis(X,axis=0)
else:
mean,v = X.mean(0),X.var(0)
meansq = v - mean**2
return mean, meansq, v, n
| [
"numpy.in1d",
"numpy.abs",
"scipy.sparse.issparse",
"numpy.errstate",
"numpy.isnan",
"numpy.argsort",
"sklearn.utils.sparsefuncs.mean_variance_axis",
"numpy.concatenate",
"numpy.sqrt"
] | [((1456, 1481), 'numpy.argsort', 'np.argsort', (['stats_to_sort'], {}), '(stats_to_sort)\n', (1466, 1481), True, 'import numpy as np\n'), ((1499, 1568), 'numpy.concatenate', 'np.concatenate', (['(sort_order[-top_n:][::-1], sort_order[:top_n][::-1])'], {}), '((sort_order[-top_n:][::-1], sort_order[:top_n][::-1]))\n', (1513, 1568), True, 'import numpy as np\n'), ((2513, 2531), 'scipy.sparse.issparse', 'sparse.issparse', (['X'], {}), '(X)\n', (2528, 2531), False, 'from scipy import sparse, stats\n'), ((666, 712), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (677, 712), True, 'import numpy as np\n'), ((794, 807), 'numpy.isnan', 'np.isnan', (['dof'], {}), '(dof)\n', (802, 807), True, 'import numpy as np\n'), ((862, 908), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (873, 908), True, 'import numpy as np\n'), ((974, 991), 'numpy.isnan', 'np.isnan', (['tscores'], {}), '(tscores)\n', (982, 991), True, 'import numpy as np\n'), ((1225, 1266), 'numpy.abs', 'np.abs', (['((meanA + 1e-09) / (meanB + 1e-09))'], {}), '((meanA + 1e-09) / (meanB + 1e-09))\n', (1231, 1266), True, 'import numpy as np\n'), ((2550, 2582), 'sklearn.utils.sparsefuncs.mean_variance_axis', 'sf.mean_variance_axis', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (2571, 2582), True, 'import sklearn.utils.sparsefuncs as sf\n'), ((946, 961), 'numpy.sqrt', 'np.sqrt', (['sum_vn'], {}), '(sum_vn)\n', (953, 961), True, 'import numpy as np\n'), ((1035, 1050), 'numpy.abs', 'np.abs', (['tscores'], {}), '(tscores)\n', (1041, 1050), True, 'import numpy as np\n'), ((1642, 1683), 'numpy.in1d', 'np.in1d', (['sort_order', 'lfc_above_cutoff_idx'], {}), '(sort_order, lfc_above_cutoff_idx)\n', (1649, 1683), True, 'import numpy as np\n'), ((1366, 1388), 'numpy.abs', 'np.abs', (['logfoldchanges'], {}), '(logfoldchanges)\n', (1372, 1388), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
PATH_DATA = "./train_data/data.csv"
PATH_SEQUENTIAL = "./train_data/data_seq_test_10.npz"
SEQ_LEN = 10
object_class_converter = {"BULLSHIT":2,"OTHER":1,"DRONE":0}
feature_columns = ["speed_stability",
"estimated_coverage",
"size_mean_orthogonal_gradient",
"estimated_speed",
"estimated_Mahalanobis_distance",
"uavity",
"speed_stability_ratio",
"speed_direction_stability",
"speed_atan2",
"acceleration_atan2",
"mass_centre_x",
"mass_centre_y",
"bbox_width",
"bbox_height",
"speed_stability_std",
"estimated_coverage_std",
"size_mean_orthogonal_gradient_std",
"estimated_speed_std",
"estimated_Mahalanobis_distance_std",
"uavity_std",
"speed_stability_ratio_std",
"speed_direction_stability_std",
"speed_atan2_std",
"acceleration_atan2_std",
"mass_centre_x_std",
"mass_centre_y_std",
"bbox_width_std",
"bbox_height_std"
]
data = pd.read_csv(PATH_DATA,index_col=0)
#data = data[data.is_zoom_request>0.0]
sequences = [] #BxTxN
labels = [] #Bx1
ZR = [] #Bx1
Video_id = [] #Bx1
for name,group in data.groupby(["video_id", "object_id"]):
lab = group.object_class.unique()
vid = group.video_id.unique()
if len(lab) > 1 or len(vid) > 1 or\
group.shape[0]<SEQ_LEN or \
group.frame_id.diff().sum() != group.shape[0]-1:
continue
print("Process #", name, " ", lab)
for i in range(0,group.shape[0]-SEQ_LEN+1):
sequences.append([])
labels.append(lab[0])
Video_id.append(vid[0])
zr_ = []
for j in range(SEQ_LEN):
sequences[-1].append(list(group.iloc[i+j][feature_columns]))
zr_.append(group.iloc[i+j].is_zoom_request)
ZR.append(np.any(zr_).astype(np.int32))
sequences = np.asanyarray(sequences)
labels = np.asanyarray(labels)
np.savez_compressed(PATH_SEQUENTIAL, X=sequences,Y=labels,ZR=ZR,VID=Video_id)
| [
"pandas.read_csv",
"numpy.any",
"numpy.savez_compressed",
"numpy.asanyarray"
] | [((1436, 1471), 'pandas.read_csv', 'pd.read_csv', (['PATH_DATA'], {'index_col': '(0)'}), '(PATH_DATA, index_col=0)\n', (1447, 1471), True, 'import pandas as pd\n'), ((2194, 2218), 'numpy.asanyarray', 'np.asanyarray', (['sequences'], {}), '(sequences)\n', (2207, 2218), True, 'import numpy as np\n'), ((2228, 2249), 'numpy.asanyarray', 'np.asanyarray', (['labels'], {}), '(labels)\n', (2241, 2249), True, 'import numpy as np\n'), ((2250, 2335), 'numpy.savez_compressed', 'np.savez_compressed', (['PATH_SEQUENTIAL'], {'X': 'sequences', 'Y': 'labels', 'ZR': 'ZR', 'VID': 'Video_id'}), '(PATH_SEQUENTIAL, X=sequences, Y=labels, ZR=ZR, VID=Video_id\n )\n', (2269, 2335), True, 'import numpy as np\n'), ((2151, 2162), 'numpy.any', 'np.any', (['zr_'], {}), '(zr_)\n', (2157, 2162), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import numpy as np
plt.title('Moto Parabolico')
plt.xlabel('Gittata (m)')
plt.ylabel('Alfa (rad)')
x, y = np.loadtxt('motogravi.dat', usecols=(1,0), unpack=True)
plt.plot(x, y, 'x-', label='Gittata')
plt.legend()
plt.show() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((52, 80), 'matplotlib.pyplot.title', 'plt.title', (['"""Moto Parabolico"""'], {}), "('Moto Parabolico')\n", (61, 80), True, 'import matplotlib.pyplot as plt\n'), ((81, 106), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Gittata (m)"""'], {}), "('Gittata (m)')\n", (91, 106), True, 'import matplotlib.pyplot as plt\n'), ((107, 131), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Alfa (rad)"""'], {}), "('Alfa (rad)')\n", (117, 131), True, 'import matplotlib.pyplot as plt\n'), ((139, 195), 'numpy.loadtxt', 'np.loadtxt', (['"""motogravi.dat"""'], {'usecols': '(1, 0)', 'unpack': '(True)'}), "('motogravi.dat', usecols=(1, 0), unpack=True)\n", (149, 195), True, 'import numpy as np\n'), ((195, 232), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""x-"""'], {'label': '"""Gittata"""'}), "(x, y, 'x-', label='Gittata')\n", (203, 232), True, 'import matplotlib.pyplot as plt\n'), ((233, 245), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (243, 245), True, 'import matplotlib.pyplot as plt\n'), ((246, 256), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (254, 256), True, 'import matplotlib.pyplot as plt\n')] |
from numpy import asarray
from numpy import savez_compressed
def load_images(path):
img_list = list()
for i in range(df.shape[0]):
print(i+1,df['image'][i])
# load and resize the image
filename=df['image'][i]
img = cv2.imread(path+filename)
# img process
img = img[ : , : , (2, 1, 0)]
img = img_pre(img)
# merge
img_list.append(img.tolist())
# 防呆 避免記憶體over loading而中斷, 每200次存一次檔案
if((i+1)%200==0):
images=asarray(img_list)
filename = 'tree.npz'
savez_compressed(filename, images)
print(images.shape)
return asarray(img_list) | [
"numpy.asarray",
"numpy.savez_compressed"
] | [((679, 696), 'numpy.asarray', 'asarray', (['img_list'], {}), '(img_list)\n', (686, 696), False, 'from numpy import asarray\n'), ((533, 550), 'numpy.asarray', 'asarray', (['img_list'], {}), '(img_list)\n', (540, 550), False, 'from numpy import asarray\n'), ((599, 633), 'numpy.savez_compressed', 'savez_compressed', (['filename', 'images'], {}), '(filename, images)\n', (615, 633), False, 'from numpy import savez_compressed\n')] |
"""
ckwg +31
Copyright 2020 by Kitware, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither name of Kitware, Inc. nor the names of any contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==============================================================================
Tests for Python interface to vital::sfm_constraints
"""
import nose.tools as nt
import unittest
import numpy as np
from kwiver.vital.modules import modules
from kwiver.vital.types.metadata import *
from kwiver.vital.types.metadata_traits import *
from kwiver.vital.types import (
Metadata,
LocalGeoCS,
rotation,
RotationD,
RotationF,
SFMConstraints,
geodesy,
GeoPoint,
metadata_tags as mt,
SimpleMetadataMap,
)
modules.load_known_modules()
class TestSFMConstraints(unittest.TestCase):
@classmethod
def setUp(self):
self.meta_ = SimpleMetadataMap()
self.geo_ = LocalGeoCS()
self.small_tag = [
mt.tags.VITAL_META_UNKNOWN,
mt.tags.VITAL_META_UNIX_TIMESTAMP,
mt.tags.VITAL_META_SLANT_RANGE,
mt.tags.VITAL_META_MISSION_ID,
mt.tags.VITAL_META_VIDEO_KEY_FRAME,
]
self.loc1 = np.array([-73.759291, 42.849631])
self.crs_ll = geodesy.SRID.lat_lon_WGS84
self.geo_pt1_ = GeoPoint(self.loc1, self.crs_ll)
self.geo_.geo_origin = self.geo_pt1_
def test_init(self):
s = SFMConstraints()
SFMConstraints(s)
SFMConstraints(self.meta_, self.geo_)
def test_properties(self):
# modules.load_known_modules()
# metadata property
s = SFMConstraints(self.meta_, self.geo_)
get_meta = s.metadata
nt.assert_equal(get_meta.size(), 0)
m = SimpleMetadataMap()
s.metadata = m
nt.assert_equal(s.metadata.size(), 0)
# local_geo_property
ret_geo = s.local_geo_cs
np.testing.assert_array_almost_equal(ret_geo.geo_origin.location(self.crs_ll),
self.geo_pt1_.location())
s = SFMConstraints()
s.local_geo_cs = self.geo_
ret_geo = s.local_geo_cs
np.testing.assert_array_almost_equal(ret_geo.geo_origin.location(self.crs_ll),
self.geo_pt1_.location())
def test_get_camera_position_prior_local(self):
s = SFMConstraints(self.meta_, self.geo_)
nt.assert_false(s.get_camera_position_prior_local(0, np.array([0, 1, 3])))
nt.assert_false(s.get_camera_position_prior_local(0, RotationD([1, 2, 3, 4])))
def test_camera_position_priors(self):
s = SFMConstraints(self.meta_, self.geo_)
nt.assert_dict_equal(s.get_camera_position_priors(), {})
def test_image_properties(self):
s = SFMConstraints(self.meta_, self.geo_)
s.store_image_size(0, 1080, 720)
a,b = 0,0
founda, foundb = False, False
founda, a = s.get_image_width(0, a)
foundb, b = s.get_image_height(0, b)
nt.ok_(founda)
nt.ok_(foundb)
nt.assert_equal(a, 1080)
nt.assert_equal(b, 720)
found_focal = True
focal_len = 0.1
found_focal, focal_len = s.get_focal_length_prior(0, focal_len)
nt.assert_false(found_focal)
nt.assert_almost_equal(focal_len, 0.1)
| [
"kwiver.vital.modules.modules.load_known_modules",
"kwiver.vital.types.SFMConstraints",
"kwiver.vital.types.LocalGeoCS",
"nose.tools.ok_",
"nose.tools.assert_almost_equal",
"kwiver.vital.types.GeoPoint",
"nose.tools.assert_equal",
"kwiver.vital.types.SimpleMetadataMap",
"numpy.array",
"kwiver.vita... | [((2028, 2056), 'kwiver.vital.modules.modules.load_known_modules', 'modules.load_known_modules', ([], {}), '()\n', (2054, 2056), False, 'from kwiver.vital.modules import modules\n'), ((2159, 2178), 'kwiver.vital.types.SimpleMetadataMap', 'SimpleMetadataMap', ([], {}), '()\n', (2176, 2178), False, 'from kwiver.vital.types import Metadata, LocalGeoCS, rotation, RotationD, RotationF, SFMConstraints, geodesy, GeoPoint, metadata_tags as mt, SimpleMetadataMap\n'), ((2197, 2209), 'kwiver.vital.types.LocalGeoCS', 'LocalGeoCS', ([], {}), '()\n', (2207, 2209), False, 'from kwiver.vital.types import Metadata, LocalGeoCS, rotation, RotationD, RotationF, SFMConstraints, geodesy, GeoPoint, metadata_tags as mt, SimpleMetadataMap\n'), ((2485, 2518), 'numpy.array', 'np.array', (['[-73.759291, 42.849631]'], {}), '([-73.759291, 42.849631])\n', (2493, 2518), True, 'import numpy as np\n'), ((2588, 2620), 'kwiver.vital.types.GeoPoint', 'GeoPoint', (['self.loc1', 'self.crs_ll'], {}), '(self.loc1, self.crs_ll)\n', (2596, 2620), False, 'from kwiver.vital.types import Metadata, LocalGeoCS, rotation, RotationD, RotationF, SFMConstraints, geodesy, GeoPoint, metadata_tags as mt, SimpleMetadataMap\n'), ((2700, 2716), 'kwiver.vital.types.SFMConstraints', 'SFMConstraints', ([], {}), '()\n', (2714, 2716), False, 'from kwiver.vital.types import Metadata, LocalGeoCS, rotation, RotationD, RotationF, SFMConstraints, geodesy, GeoPoint, metadata_tags as mt, SimpleMetadataMap\n'), ((2723, 2740), 'kwiver.vital.types.SFMConstraints', 'SFMConstraints', (['s'], {}), '(s)\n', (2737, 2740), False, 'from kwiver.vital.types import Metadata, LocalGeoCS, rotation, RotationD, RotationF, SFMConstraints, geodesy, GeoPoint, metadata_tags as mt, SimpleMetadataMap\n'), ((2747, 2784), 'kwiver.vital.types.SFMConstraints', 'SFMConstraints', (['self.meta_', 'self.geo_'], {}), '(self.meta_, self.geo_)\n', (2761, 2784), False, 'from kwiver.vital.types import Metadata, LocalGeoCS, rotation, RotationD, RotationF, SFMConstraints, geodesy, GeoPoint, metadata_tags as mt, SimpleMetadataMap\n'), ((2890, 2927), 'kwiver.vital.types.SFMConstraints', 'SFMConstraints', (['self.meta_', 'self.geo_'], {}), '(self.meta_, self.geo_)\n', (2904, 2927), False, 'from kwiver.vital.types import Metadata, LocalGeoCS, rotation, RotationD, RotationF, SFMConstraints, geodesy, GeoPoint, metadata_tags as mt, SimpleMetadataMap\n'), ((3008, 3027), 'kwiver.vital.types.SimpleMetadataMap', 'SimpleMetadataMap', ([], {}), '()\n', (3025, 3027), False, 'from kwiver.vital.types import Metadata, LocalGeoCS, rotation, RotationD, RotationF, SFMConstraints, geodesy, GeoPoint, metadata_tags as mt, SimpleMetadataMap\n'), ((3345, 3361), 'kwiver.vital.types.SFMConstraints', 'SFMConstraints', ([], {}), '()\n', (3359, 3361), False, 'from kwiver.vital.types import Metadata, LocalGeoCS, rotation, RotationD, RotationF, SFMConstraints, geodesy, GeoPoint, metadata_tags as mt, SimpleMetadataMap\n'), ((3672, 3709), 'kwiver.vital.types.SFMConstraints', 'SFMConstraints', (['self.meta_', 'self.geo_'], {}), '(self.meta_, self.geo_)\n', (3686, 3709), False, 'from kwiver.vital.types import Metadata, LocalGeoCS, rotation, RotationD, RotationF, SFMConstraints, geodesy, GeoPoint, metadata_tags as mt, SimpleMetadataMap\n'), ((3930, 3967), 'kwiver.vital.types.SFMConstraints', 'SFMConstraints', (['self.meta_', 'self.geo_'], {}), '(self.meta_, self.geo_)\n', (3944, 3967), False, 'from kwiver.vital.types import Metadata, LocalGeoCS, rotation, RotationD, RotationF, SFMConstraints, geodesy, GeoPoint, metadata_tags as mt, SimpleMetadataMap\n'), ((4079, 4116), 'kwiver.vital.types.SFMConstraints', 'SFMConstraints', (['self.meta_', 'self.geo_'], {}), '(self.meta_, self.geo_)\n', (4093, 4116), False, 'from kwiver.vital.types import Metadata, LocalGeoCS, rotation, RotationD, RotationF, SFMConstraints, geodesy, GeoPoint, metadata_tags as mt, SimpleMetadataMap\n'), ((4299, 4313), 'nose.tools.ok_', 'nt.ok_', (['founda'], {}), '(founda)\n', (4305, 4313), True, 'import nose.tools as nt\n'), ((4320, 4334), 'nose.tools.ok_', 'nt.ok_', (['foundb'], {}), '(foundb)\n', (4326, 4334), True, 'import nose.tools as nt\n'), ((4341, 4365), 'nose.tools.assert_equal', 'nt.assert_equal', (['a', '(1080)'], {}), '(a, 1080)\n', (4356, 4365), True, 'import nose.tools as nt\n'), ((4372, 4395), 'nose.tools.assert_equal', 'nt.assert_equal', (['b', '(720)'], {}), '(b, 720)\n', (4387, 4395), True, 'import nose.tools as nt\n'), ((4519, 4547), 'nose.tools.assert_false', 'nt.assert_false', (['found_focal'], {}), '(found_focal)\n', (4534, 4547), True, 'import nose.tools as nt\n'), ((4554, 4592), 'nose.tools.assert_almost_equal', 'nt.assert_almost_equal', (['focal_len', '(0.1)'], {}), '(focal_len, 0.1)\n', (4576, 4592), True, 'import nose.tools as nt\n'), ((3769, 3788), 'numpy.array', 'np.array', (['[0, 1, 3]'], {}), '([0, 1, 3])\n', (3777, 3788), True, 'import numpy as np\n'), ((3850, 3873), 'kwiver.vital.types.RotationD', 'RotationD', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (3859, 3873), False, 'from kwiver.vital.types import Metadata, LocalGeoCS, rotation, RotationD, RotationF, SFMConstraints, geodesy, GeoPoint, metadata_tags as mt, SimpleMetadataMap\n')] |
"""
Methods to gauge how well force balance is satisfied for an ensemble,
and to convert between polar and cartesian systems.
"""
import numpy as np
import numba
def polarToCartesian(force, alpha, beta, collapse=True):
"""
Convert a set of forces defined in polar coordinates (f, a, b),
to cartesian coordinates (f_y, f_x).
Parameters
----------
force : float or np.ndarray[F] or list[F]
The force magnitude, or an array/list of F force magnitudes.
alpha : float or np.ndarray[F] or list[F]
The alpha angle, or an array/list of F alpha angles.
beta : float or np.ndarray[F] or list[F]
The beta angle, or an array/list of F beta angles.
collapse : bool
Whether to collapse the force index dimension in the case that
only a single force is provided.
Returns
-------
forceArr : np.ndarray[F,2]
An array of the cartesian components (y,x) of the forces.
If only a single force is provided (ie. `force`, `alpha` and `beta` are all
floats) the first dimension will be omitted, leaving just `[f_y, f_x]`. See
`collapse` for more information.
"""
# Check to see if we were given multiple forces, or just a single one
if hasattr(force, '__iter__'):
forceArr = np.array(force)
alphaArr = np.array(alpha)
betaArr = np.array(beta)
singleForce = False
else:
forceArr = np.array([force])
alphaArr = np.array([alpha])
betaArr = np.array([beta])
singleForce = True
cartesianForceArr = np.zeros((forceArr.shape[0], 2))
for i in range(cartesianForceArr.shape[0]):
# Note that this expression is not exactly the same as in <NAME> et al.
# Rev. Sci. Inst. 88 (2017). There is an extra negative on the alphas, since mine
# appear to be defined backwards.
# F_y
cartesianForceArr[i,0] = forceArr[i] * np.cos(-alphaArr[i] + betaArr[i]) #(np.cos(betaArr[i,j]) * np.cos(alphaArr[i,j]) + np.sin(betaArr[i,j]) * np.sin(alphaArr[i,j]))
# F_x
cartesianForceArr[i,1] = -forceArr[i] * np.sin(-alphaArr[i] + betaArr[i]) #(-np.sin(betaArr[i,j]) * np.cos(alphaArr[i,j]) + np.cos(betaArr[i,j]) * np.sin(alphaArr[i,j]))
# If we only have a single force, we should collapse that first dimension
if singleForce and collapse:
return cartesianForceArr[0]
return cartesianForceArr
def testForceBalance(forceArr, alphaArr, betaArr, collapse=True):
"""
Sum each of the cartesian force components to see how
well an ensemble of forces satisfies force balance.
Parameters
----------
forceArr : np.ndarray[F] or np.ndarray[T,F]
An array/list of F force magnitudes, possibly for T timesteps.
alphaArr : np.ndarray[F] or np.ndarray[T,F]
An array/list of F alpha angles, possibly for T timesteps.
betaArr : np.ndarray[F] or np.ndarray[T,F]
An array/list of F beta angles, possibly for T timesteps.
collapse : bool
Whether to collapse the timestep dimension in the case that
only a single timestep is provided.
Returns
-------
forceSumArr : np.ndarray[T,2]
An array of the sum of each cartesian component (y,x) of the forces at each timestep.
If only a single timestep is provided (ie. `forceArr`, `alphaArr` and `betaArr` are all
1D arrays) the first dimension will be omitted, leaving just `[sum_f_y, sum_f_x]`. See
`collapse` for more information.
"""
# Check if we were given a single timestep, or multiple
if len(np.shape(forceArr)) == 2:
singleTimestep = False
multiForceArr = np.array(forceArr)
multiAlphaArr = np.array(alphaArr)
multiBetaArr = np.array(betaArr)
else:
singleTimestep = True
# TODO: Might need a transpose here
multiForceArr = np.array([forceArr])
multiAlphaArr = np.array([alphaArr])
multiBetaArr = np.array([betaArr])
forceSumArr = np.zeros((multiForceArr.shape[1], 2))
# Sum up forces for each timestep
for i in range(multiForceArr.shape[1]):
cartForces = polarToCartesian(multiForceArr[:,i], multiAlphaArr[:,i], multiBetaArr[:,i], collapse=False)
# sum_y
forceSumArr[i,0] = np.sum(cartForces[:,0])
# sum_x
forceSumArr[i,1] = np.sum(cartForces[:,1])
if singleTimestep and collapse:
return forceSumArr[0]
return forceSumArr
@numba.jit(nopython=True)
def singleParticleForceBalance(forceArr, alphaArr, betaArr):
"""
**Does not currently work! Any calls to this function will just return the original
arrays**
Takes a set of forces acting on a single particle and ensures they obey
force balance.
The majority of this method is transpiled directly from <NAME>'s
implementation:
https://github.com/jekollmer/PEGS
Parameters
----------
forceArr : np.ndarray[N]
Array of force magnitudes at each contact point.
alphaArr : np.ndarray[N]
Array of angles that define the direction of force at each contact point
betaArr : np.ndarray[N]
Array of angles that define the contact point of the forces, and therefore are
not adjusted in the force balancing process
Returns
-------
np.ndarray[N] : Magnitude of balanced forces
np.ndarray[N] : Balanced contact angles alpha
"""
# TODO: Get this function working
print("Warning: force balance is not yet implemented, do not call the singleParticleForceBalance function!")
return forceArr, alphaArr
# Number of contacts (coordination number, often denoted by z)
numContacts = len(forceArr)
if numContacts < 2:
# Can't do anything with only a single force
return forceArr, alphaArr
elif numContacts == 2:
# For 2 forces, there is a unique process
# The two force magnitudes must be equal
balancedForceArr = np.array([forceArr[0], forceArr[0]])
balancedAlphaArr = np.zeros(2)
dBeta = (betaArr[0] - betaArr[1]) / 2
balancedAlphaArr[0] = np.arccos(np.sin(dBeta))
if balancedAlphaArr[0] > np.pi/2:
balancedAlphaArr[0] = np.arccos(np.sin(-dBeta))
# And the other angle must be the opposite
balancedAlphaArr[1] = - balancedAlphaArr[0]
return balancedForceArr, balancedAlphaArr
elif numContacts > 2:
# We solve any z>2 contacts the same way
balancedForceArr = np.zeros_like(forceArr)
balancedAlphaArr = np.zeros_like(alphaArr)
# To calculate the new force magnitudes, we add up vertical and
# horizontal components of the other forces
for i in range(numContacts):
# These initializations are to not count the case where j = i
sum1 = -forceArr[i] * np.sin(alphaArr[i])
sum2 = -forceArr[i] * np.cos(alphaArr[i])
for j in range(numContacts):
sum1 += forceArr[j] * np.sin(alphaArr[j] + betaArr[j] - betaArr[i])
sum2 += forceArr[j] * np.cos(alphaArr[j] + betaArr[j] - betaArr[i])
balancedForceArr[i] = np.sqrt(sum1**2 + sum2**2)
# To calculate new alpha values, we
for i in range(numContacts):
sum3 = -balancedForceArr[i] * np.sin(alphaArr[i])
for j in range(numContacts):
sum3 += balancedForceArr[j] * np.sin(alphaArr[j])
balancedAlphaArr[i] = np.arcsin(-sum3/balancedForceArr[i])
return balancedForceArr, balancedAlphaArr
| [
"numpy.zeros_like",
"numpy.sum",
"numpy.zeros",
"numpy.arcsin",
"numpy.shape",
"numpy.sin",
"numba.jit",
"numpy.array",
"numpy.cos",
"numpy.sqrt"
] | [((4503, 4527), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (4512, 4527), False, 'import numba\n'), ((1589, 1621), 'numpy.zeros', 'np.zeros', (['(forceArr.shape[0], 2)'], {}), '((forceArr.shape[0], 2))\n', (1597, 1621), True, 'import numpy as np\n'), ((4040, 4077), 'numpy.zeros', 'np.zeros', (['(multiForceArr.shape[1], 2)'], {}), '((multiForceArr.shape[1], 2))\n', (4048, 4077), True, 'import numpy as np\n'), ((1302, 1317), 'numpy.array', 'np.array', (['force'], {}), '(force)\n', (1310, 1317), True, 'import numpy as np\n'), ((1337, 1352), 'numpy.array', 'np.array', (['alpha'], {}), '(alpha)\n', (1345, 1352), True, 'import numpy as np\n'), ((1371, 1385), 'numpy.array', 'np.array', (['beta'], {}), '(beta)\n', (1379, 1385), True, 'import numpy as np\n'), ((1443, 1460), 'numpy.array', 'np.array', (['[force]'], {}), '([force])\n', (1451, 1460), True, 'import numpy as np\n'), ((1480, 1497), 'numpy.array', 'np.array', (['[alpha]'], {}), '([alpha])\n', (1488, 1497), True, 'import numpy as np\n'), ((1516, 1532), 'numpy.array', 'np.array', (['[beta]'], {}), '([beta])\n', (1524, 1532), True, 'import numpy as np\n'), ((3701, 3719), 'numpy.array', 'np.array', (['forceArr'], {}), '(forceArr)\n', (3709, 3719), True, 'import numpy as np\n'), ((3744, 3762), 'numpy.array', 'np.array', (['alphaArr'], {}), '(alphaArr)\n', (3752, 3762), True, 'import numpy as np\n'), ((3786, 3803), 'numpy.array', 'np.array', (['betaArr'], {}), '(betaArr)\n', (3794, 3803), True, 'import numpy as np\n'), ((3912, 3932), 'numpy.array', 'np.array', (['[forceArr]'], {}), '([forceArr])\n', (3920, 3932), True, 'import numpy as np\n'), ((3957, 3977), 'numpy.array', 'np.array', (['[alphaArr]'], {}), '([alphaArr])\n', (3965, 3977), True, 'import numpy as np\n'), ((4001, 4020), 'numpy.array', 'np.array', (['[betaArr]'], {}), '([betaArr])\n', (4009, 4020), True, 'import numpy as np\n'), ((4318, 4342), 'numpy.sum', 'np.sum', (['cartForces[:, 0]'], {}), '(cartForces[:, 0])\n', (4324, 4342), True, 'import numpy as np\n'), ((4385, 4409), 'numpy.sum', 'np.sum', (['cartForces[:, 1]'], {}), '(cartForces[:, 1])\n', (4391, 4409), True, 'import numpy as np\n'), ((1943, 1976), 'numpy.cos', 'np.cos', (['(-alphaArr[i] + betaArr[i])'], {}), '(-alphaArr[i] + betaArr[i])\n', (1949, 1976), True, 'import numpy as np\n'), ((2134, 2167), 'numpy.sin', 'np.sin', (['(-alphaArr[i] + betaArr[i])'], {}), '(-alphaArr[i] + betaArr[i])\n', (2140, 2167), True, 'import numpy as np\n'), ((3620, 3638), 'numpy.shape', 'np.shape', (['forceArr'], {}), '(forceArr)\n', (3628, 3638), True, 'import numpy as np\n'), ((6002, 6038), 'numpy.array', 'np.array', (['[forceArr[0], forceArr[0]]'], {}), '([forceArr[0], forceArr[0]])\n', (6010, 6038), True, 'import numpy as np\n'), ((6067, 6078), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (6075, 6078), True, 'import numpy as np\n'), ((6166, 6179), 'numpy.sin', 'np.sin', (['dBeta'], {}), '(dBeta)\n', (6172, 6179), True, 'import numpy as np\n'), ((6549, 6572), 'numpy.zeros_like', 'np.zeros_like', (['forceArr'], {}), '(forceArr)\n', (6562, 6572), True, 'import numpy as np\n'), ((6600, 6623), 'numpy.zeros_like', 'np.zeros_like', (['alphaArr'], {}), '(alphaArr)\n', (6613, 6623), True, 'import numpy as np\n'), ((6267, 6281), 'numpy.sin', 'np.sin', (['(-dBeta)'], {}), '(-dBeta)\n', (6273, 6281), True, 'import numpy as np\n'), ((7212, 7242), 'numpy.sqrt', 'np.sqrt', (['(sum1 ** 2 + sum2 ** 2)'], {}), '(sum1 ** 2 + sum2 ** 2)\n', (7219, 7242), True, 'import numpy as np\n'), ((7526, 7564), 'numpy.arcsin', 'np.arcsin', (['(-sum3 / balancedForceArr[i])'], {}), '(-sum3 / balancedForceArr[i])\n', (7535, 7564), True, 'import numpy as np\n'), ((6894, 6913), 'numpy.sin', 'np.sin', (['alphaArr[i]'], {}), '(alphaArr[i])\n', (6900, 6913), True, 'import numpy as np\n'), ((6948, 6967), 'numpy.cos', 'np.cos', (['alphaArr[i]'], {}), '(alphaArr[i])\n', (6954, 6967), True, 'import numpy as np\n'), ((7364, 7383), 'numpy.sin', 'np.sin', (['alphaArr[i]'], {}), '(alphaArr[i])\n', (7370, 7383), True, 'import numpy as np\n'), ((7047, 7092), 'numpy.sin', 'np.sin', (['(alphaArr[j] + betaArr[j] - betaArr[i])'], {}), '(alphaArr[j] + betaArr[j] - betaArr[i])\n', (7053, 7092), True, 'import numpy as np\n'), ((7131, 7176), 'numpy.cos', 'np.cos', (['(alphaArr[j] + betaArr[j] - betaArr[i])'], {}), '(alphaArr[j] + betaArr[j] - betaArr[i])\n', (7137, 7176), True, 'import numpy as np\n'), ((7471, 7490), 'numpy.sin', 'np.sin', (['alphaArr[j]'], {}), '(alphaArr[j])\n', (7477, 7490), True, 'import numpy as np\n')] |
"""
FishNet for ImageNet-1K, implemented in Gluon.
Original paper: 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
"""
__all__ = ['FishNet', 'fishnet99', 'fishnet150', 'ChannelSqueeze']
import os
from mxnet import cpu
from mxnet.gluon import nn, HybridBlock
from mxnet.gluon.contrib.nn import Identity
from .common import pre_conv1x1_block, pre_conv3x3_block, conv1x1, SesquialteralHourglass, InterpolationBlock
from .preresnet import PreResActivation
from .senet import SEInitBlock
def channel_squeeze(x,
channels_per_group):
"""
Channel squeeze operation.
Parameters:
----------
x : NDArray
Input tensor.
channels_per_group : int
Number of channels per group.
Returns:
-------
NDArray
Resulted tensor.
"""
return x.reshape((0, -4, channels_per_group, -1, -2)).sum(axis=2)
class ChannelSqueeze(HybridBlock):
"""
Channel squeeze layer. This is a wrapper over the same operation. It is designed to save the number of groups.
Parameters:
----------
channels : int
Number of channels.
groups : int
Number of groups.
"""
def __init__(self,
channels,
groups,
**kwargs):
super(ChannelSqueeze, self).__init__(**kwargs)
assert (channels % groups == 0)
self.channels_per_group = channels // groups
def hybrid_forward(self, F, x):
return channel_squeeze(x, self.channels_per_group)
class PreSEAttBlock(HybridBlock):
"""
FishNet specific Squeeze-and-Excitation attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
reduction : int, default 16
Squeeze reduction value.
"""
def __init__(self,
in_channels,
out_channels,
bn_use_global_stats,
reduction=16,
**kwargs):
super(PreSEAttBlock, self).__init__(**kwargs)
mid_cannels = out_channels // reduction
with self.name_scope():
self.bn = nn.BatchNorm(
in_channels=in_channels,
use_global_stats=bn_use_global_stats)
self.relu = nn.Activation("relu")
self.conv1 = conv1x1(
in_channels=in_channels,
out_channels=mid_cannels,
use_bias=True)
self.conv2 = conv1x1(
in_channels=mid_cannels,
out_channels=out_channels,
use_bias=True)
self.sigmoid = nn.Activation("sigmoid")
def hybrid_forward(self, F, x):
x = self.bn(x)
x = self.relu(x)
x = F.contrib.AdaptiveAvgPooling2D(x, output_size=1)
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.sigmoid(x)
return x
class FishBottleneck(HybridBlock):
"""
FishNet bottleneck block for residual unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
dilation : int or tuple/list of 2 int
Dilation value for convolution layer.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels,
strides,
dilation,
bn_use_global_stats,
**kwargs):
super(FishBottleneck, self).__init__(**kwargs)
mid_channels = out_channels // 4
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv3x3_block(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
padding=dilation,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats)
self.conv3 = pre_conv1x1_block(
in_channels=mid_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x
class FishBlock(HybridBlock):
"""
FishNet block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
squeeze : bool, default False
Whether to use a channel squeeze operation.
"""
def __init__(self,
in_channels,
out_channels,
strides=1,
dilation=1,
bn_use_global_stats=False,
squeeze=False,
**kwargs):
super(FishBlock, self).__init__(**kwargs)
self.squeeze = squeeze
self.resize_identity = (in_channels != out_channels) or (strides != 1)
with self.name_scope():
self.body = FishBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats)
if self.squeeze:
assert (in_channels // 2 == out_channels)
self.c_squeeze = ChannelSqueeze(
channels=in_channels,
groups=2)
elif self.resize_identity:
self.identity_conv = pre_conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
if self.squeeze:
identity = self.c_squeeze(x)
elif self.resize_identity:
identity = self.identity_conv(x)
else:
identity = x
x = self.body(x)
x = x + identity
return x
class DownUnit(HybridBlock):
"""
FishNet down unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_use_global_stats,
**kwargs):
super(DownUnit, self).__init__(**kwargs)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
self.pool = nn.MaxPool2D(
pool_size=2,
strides=2)
def hybrid_forward(self, F, x):
x = self.blocks(x)
x = self.pool(x)
return x
class UpUnit(HybridBlock):
"""
FishNet up unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
dilation=1,
bn_use_global_stats=False,
**kwargs):
super(UpUnit, self).__init__(**kwargs)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
squeeze = (dilation > 1) and (i == 0)
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats,
squeeze=squeeze))
in_channels = out_channels
self.upsample = InterpolationBlock(scale_factor=2, bilinear=False)
def hybrid_forward(self, F, x):
x = self.blocks(x)
x = self.upsample(x)
return x
class SkipUnit(HybridBlock):
"""
FishNet skip connection unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_use_global_stats,
**kwargs):
super(SkipUnit, self).__init__(**kwargs)
with self.name_scope():
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
def hybrid_forward(self, F, x):
x = self.blocks(x)
return x
class SkipAttUnit(HybridBlock):
"""
FishNet skip connection unit with attention block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels_list : list of int
Number of output channels for each block.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
out_channels_list,
bn_use_global_stats,
**kwargs):
super(SkipAttUnit, self).__init__(**kwargs)
mid_channels1 = in_channels // 2
mid_channels2 = 2 * in_channels
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels1,
bn_use_global_stats=bn_use_global_stats)
self.conv2 = pre_conv1x1_block(
in_channels=mid_channels1,
out_channels=mid_channels2,
use_bias=True,
bn_use_global_stats=bn_use_global_stats)
in_channels = mid_channels2
self.se = PreSEAttBlock(
in_channels=mid_channels2,
out_channels=out_channels_list[-1],
bn_use_global_stats=bn_use_global_stats)
self.blocks = nn.HybridSequential(prefix="")
for i, out_channels in enumerate(out_channels_list):
self.blocks.add(FishBlock(
in_channels=in_channels,
out_channels=out_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = out_channels
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.conv2(x)
w = self.se(x)
x = self.blocks(x)
x = F.broadcast_add(F.broadcast_mul(x, w), w)
return x
class FishFinalBlock(HybridBlock):
"""
FishNet final block.
Parameters:
----------
in_channels : int
Number of input channels.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
"""
def __init__(self,
in_channels,
bn_use_global_stats,
**kwargs):
super(FishFinalBlock, self).__init__(**kwargs)
mid_channels = in_channels // 2
with self.name_scope():
self.conv1 = pre_conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
self.preactiv = PreResActivation(
in_channels=mid_channels,
bn_use_global_stats=bn_use_global_stats)
def hybrid_forward(self, F, x):
x = self.conv1(x)
x = self.preactiv(x)
return x
class FishNet(HybridBlock):
"""
FishNet model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
direct_channels : list of list of list of int
Number of output channels for each unit along the straight path.
skip_channels : list of list of list of int
Number of output channels for each skip connection unit.
init_block_channels : int
Number of output channels for the initial unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
direct_channels,
skip_channels,
init_block_channels,
bn_use_global_stats=False,
in_channels=3,
in_size=(224, 224),
classes=1000,
**kwargs):
super(FishNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
depth = len(direct_channels[0])
down1_channels = direct_channels[0]
up_channels = direct_channels[1]
down2_channels = direct_channels[2]
skip1_channels = skip_channels[0]
skip2_channels = skip_channels[1]
with self.name_scope():
self.features = nn.HybridSequential(prefix="")
self.features.add(SEInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = init_block_channels
down1_seq = nn.HybridSequential(prefix="")
skip1_seq = nn.HybridSequential(prefix="")
for i in range(depth + 1):
skip1_channels_list = skip1_channels[i]
if i < depth:
skip1_seq.add(SkipUnit(
in_channels=in_channels,
out_channels_list=skip1_channels_list,
bn_use_global_stats=bn_use_global_stats))
down1_channels_list = down1_channels[i]
down1_seq.add(DownUnit(
in_channels=in_channels,
out_channels_list=down1_channels_list,
bn_use_global_stats=bn_use_global_stats))
in_channels = down1_channels_list[-1]
else:
skip1_seq.add(SkipAttUnit(
in_channels=in_channels,
out_channels_list=skip1_channels_list,
bn_use_global_stats=bn_use_global_stats))
in_channels = skip1_channels_list[-1]
up_seq = nn.HybridSequential(prefix="")
skip2_seq = nn.HybridSequential(prefix="")
for i in range(depth + 1):
skip2_channels_list = skip2_channels[i]
if i > 0:
in_channels += skip1_channels[depth - i][-1]
if i < depth:
skip2_seq.add(SkipUnit(
in_channels=in_channels,
out_channels_list=skip2_channels_list,
bn_use_global_stats=bn_use_global_stats))
up_channels_list = up_channels[i]
dilation = 2 ** i
up_seq.add(UpUnit(
in_channels=in_channels,
out_channels_list=up_channels_list,
dilation=dilation,
bn_use_global_stats=bn_use_global_stats))
in_channels = up_channels_list[-1]
else:
skip2_seq.add(Identity())
down2_seq = nn.HybridSequential(prefix="")
for i in range(depth):
down2_channels_list = down2_channels[i]
down2_seq.add(DownUnit(
in_channels=in_channels,
out_channels_list=down2_channels_list,
bn_use_global_stats=bn_use_global_stats))
in_channels = down2_channels_list[-1] + skip2_channels[depth - 1 - i][-1]
self.features.add(SesquialteralHourglass(
down1_seq=down1_seq,
skip1_seq=skip1_seq,
up_seq=up_seq,
skip2_seq=skip2_seq,
down2_seq=down2_seq))
self.features.add(FishFinalBlock(
in_channels=in_channels,
bn_use_global_stats=bn_use_global_stats))
in_channels = in_channels // 2
self.features.add(nn.AvgPool2D(
pool_size=7,
strides=1))
self.output = nn.HybridSequential(prefix="")
self.output.add(conv1x1(
in_channels=in_channels,
out_channels=classes,
use_bias=True))
self.output.add(nn.Flatten())
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
def get_fishnet(blocks,
model_name=None,
pretrained=False,
ctx=cpu(),
root=os.path.join("~", ".mxnet", "models"),
**kwargs):
"""
Create FishNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
if blocks == 99:
direct_layers = [[2, 2, 6], [1, 1, 1], [1, 2, 2]]
skip_layers = [[1, 1, 1, 2], [4, 1, 1, 0]]
elif blocks == 150:
direct_layers = [[2, 4, 8], [2, 2, 2], [2, 2, 4]]
skip_layers = [[2, 2, 2, 4], [4, 2, 2, 0]]
else:
raise ValueError("Unsupported FishNet with number of blocks: {}".format(blocks))
direct_channels_per_layers = [[128, 256, 512], [512, 384, 256], [320, 832, 1600]]
skip_channels_per_layers = [[64, 128, 256, 512], [512, 768, 512, 0]]
direct_channels = [[[b] * c for (b, c) in zip(*a)] for a in
([(ci, li) for (ci, li) in zip(direct_channels_per_layers, direct_layers)])]
skip_channels = [[[b] * c for (b, c) in zip(*a)] for a in
([(ci, li) for (ci, li) in zip(skip_channels_per_layers, skip_layers)])]
init_block_channels = 64
net = FishNet(
direct_channels=direct_channels,
skip_channels=skip_channels,
init_block_channels=init_block_channels,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
net.load_parameters(
filename=get_model_file(
model_name=model_name,
local_model_store_dir_path=root),
ctx=ctx)
return net
def fishnet99(**kwargs):
"""
FishNet-99 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fishnet(blocks=99, model_name="fishnet99", **kwargs)
def fishnet150(**kwargs):
"""
FishNet-150 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'
http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_fishnet(blocks=150, model_name="fishnet150", **kwargs)
def _test():
import numpy as np
import mxnet as mx
pretrained = False
models = [
fishnet99,
fishnet150,
]
for model in models:
net = model(pretrained=pretrained)
ctx = mx.cpu()
if not pretrained:
net.initialize(ctx=ctx)
# net.hybridize()
net_params = net.collect_params()
weight_count = 0
for param in net_params.values():
if (param.shape is None) or (not param._differentiable):
continue
weight_count += np.prod(param.shape)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != fishnet99 or weight_count == 16628904)
assert (model != fishnet150 or weight_count == 24959400)
x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)
y = net(x)
assert (y.shape == (1, 1000))
if __name__ == "__main__":
_test()
| [
"mxnet.gluon.nn.HybridSequential",
"mxnet.gluon.nn.MaxPool2D",
"mxnet.gluon.nn.Activation",
"mxnet.nd.zeros",
"mxnet.gluon.nn.BatchNorm",
"mxnet.cpu",
"mxnet.gluon.nn.AvgPool2D",
"mxnet.gluon.contrib.nn.Identity",
"os.path.join",
"mxnet.gluon.nn.Flatten",
"numpy.prod"
] | [((19168, 19173), 'mxnet.cpu', 'cpu', ([], {}), '()\n', (19171, 19173), False, 'from mxnet import cpu\n'), ((19196, 19233), 'os.path.join', 'os.path.join', (['"""~"""', '""".mxnet"""', '"""models"""'], {}), "('~', '.mxnet', 'models')\n", (19208, 19233), False, 'import os\n'), ((22765, 22773), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (22771, 22773), True, 'import mxnet as mx\n'), ((23321, 23359), 'mxnet.nd.zeros', 'mx.nd.zeros', (['(1, 3, 224, 224)'], {'ctx': 'ctx'}), '((1, 3, 224, 224), ctx=ctx)\n', (23332, 23359), True, 'import mxnet as mx\n'), ((2452, 2527), 'mxnet.gluon.nn.BatchNorm', 'nn.BatchNorm', ([], {'in_channels': 'in_channels', 'use_global_stats': 'bn_use_global_stats'}), '(in_channels=in_channels, use_global_stats=bn_use_global_stats)\n', (2464, 2527), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((2585, 2606), 'mxnet.gluon.nn.Activation', 'nn.Activation', (['"""relu"""'], {}), "('relu')\n", (2598, 2606), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((2931, 2955), 'mxnet.gluon.nn.Activation', 'nn.Activation', (['"""sigmoid"""'], {}), "('sigmoid')\n", (2944, 2955), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((7625, 7655), 'mxnet.gluon.nn.HybridSequential', 'nn.HybridSequential', ([], {'prefix': '""""""'}), "(prefix='')\n", (7644, 7655), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((7985, 8021), 'mxnet.gluon.nn.MaxPool2D', 'nn.MaxPool2D', ([], {'pool_size': '(2)', 'strides': '(2)'}), '(pool_size=2, strides=2)\n', (7997, 8021), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((8940, 8970), 'mxnet.gluon.nn.HybridSequential', 'nn.HybridSequential', ([], {'prefix': '""""""'}), "(prefix='')\n", (8959, 8970), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((10242, 10272), 'mxnet.gluon.nn.HybridSequential', 'nn.HybridSequential', ([], {'prefix': '""""""'}), "(prefix='')\n", (10261, 10272), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((12051, 12081), 'mxnet.gluon.nn.HybridSequential', 'nn.HybridSequential', ([], {'prefix': '""""""'}), "(prefix='')\n", (12070, 12081), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((15340, 15370), 'mxnet.gluon.nn.HybridSequential', 'nn.HybridSequential', ([], {'prefix': '""""""'}), "(prefix='')\n", (15359, 15370), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((15634, 15664), 'mxnet.gluon.nn.HybridSequential', 'nn.HybridSequential', ([], {'prefix': '""""""'}), "(prefix='')\n", (15653, 15664), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((15689, 15719), 'mxnet.gluon.nn.HybridSequential', 'nn.HybridSequential', ([], {'prefix': '""""""'}), "(prefix='')\n", (15708, 15719), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((16734, 16764), 'mxnet.gluon.nn.HybridSequential', 'nn.HybridSequential', ([], {'prefix': '""""""'}), "(prefix='')\n", (16753, 16764), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((16789, 16819), 'mxnet.gluon.nn.HybridSequential', 'nn.HybridSequential', ([], {'prefix': '""""""'}), "(prefix='')\n", (16808, 16819), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((17755, 17785), 'mxnet.gluon.nn.HybridSequential', 'nn.HybridSequential', ([], {'prefix': '""""""'}), "(prefix='')\n", (17774, 17785), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((18724, 18754), 'mxnet.gluon.nn.HybridSequential', 'nn.HybridSequential', ([], {'prefix': '""""""'}), "(prefix='')\n", (18743, 18754), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((23095, 23115), 'numpy.prod', 'np.prod', (['param.shape'], {}), '(param.shape)\n', (23102, 23115), True, 'import numpy as np\n'), ((18626, 18662), 'mxnet.gluon.nn.AvgPool2D', 'nn.AvgPool2D', ([], {'pool_size': '(7)', 'strides': '(1)'}), '(pool_size=7, strides=1)\n', (18638, 18662), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((18931, 18943), 'mxnet.gluon.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (18941, 18943), False, 'from mxnet.gluon import nn, HybridBlock\n'), ((17718, 17728), 'mxnet.gluon.contrib.nn.Identity', 'Identity', ([], {}), '()\n', (17726, 17728), False, 'from mxnet.gluon.contrib.nn import Identity\n')] |
import numpy as np
from scipy import optimize
import matplotlib.pylab as plt
import collections, copy, itertools
class TrajectorySource(object):
"""
Class to generate initial trajectories for linkage inference as well as for continued production
of trajectories feeding into a "live" linking process.
"""
def __init__(self,p_nd,n,ran_dis_spec,dim=2,intensity_choices=[1.],ran_int_spec={'paras':{'delta':0.1},'type':'uniform'}):
"""
Input:
p_nd - float, within [0,1] = probability of non-disappearance for an atom of a trajectroy from one step to the next
n - int, maximum number of positions per step
ran_dis_spec - dict, defines the generation of spatial displacements.
expected structure: {'paras':{'mu':0,'sig':1.},'type':'gaussian'}
dim - int (optional), defines the spatial dimensionality of the problem
"""
#trajectory generation related
self.implemented_ran_dis = ['gaussian','cubic grid']
assert isinstance(p_nd,(int,float)) and (0. <= p_nd <= 1.), "Assertion failed - p_nd is not within [0,1]. p_nd = {}".format(p_nd)
assert isinstance(n,(int,float)) and n>0, "Assertion failed - expected int or float value (to convert to int) for 'n' greater than 0, got {}".format(n)
assert ('paras' in ran_dis_spec) and ('type' in ran_dis_spec) and isinstance(ran_dis_spec['paras'],dict) and isinstance(ran_dis_spec['type'],str), "Assertion failed - expected structure {'paras':[1.,0.],'type':'gaussian'} for ran_dis_spec with type any of {}, got {} instead.".format(self.implemented_ran_dis,ran_dis_spec)
self.p_nd = p_nd
self.n = n
self.ran_dis_spec = ran_dis_spec
self.dim = dim
self.intensity_choices = intensity_choices
self.ran_int_spec = ran_int_spec
#trajectory related
self.positions = None
self.LM_traj = None
self.intensities = None
def generate_intensity_deviation(self):
ran_int_spec={'paras':{'delta':0.1},'type':'uniform'}
if self.ran_int_spec['type'] == 'uniform':
dx = self.ran_int_spec['paras']['delta']*.5
deviation = np.random.uniform(low=-dx,high=dx,size=self.n)
else:
raise ValueError("Error - got unexpected random generator type for intensity noise {}".format(self.ran_int_spec['type']))
return deviation
def _get_initial_frame(self,bounds):
pos = np.array([np.random.uniform(low=bounds[v][0],high=bounds[v][1],size=self.n) for v in xrange(self.dim)])
to_skip = np.random.random(self.n)
pos[:,np.where(to_skip>self.p_nd)] = np.nan
intensities = np.random.choice(self.intensity_choices,size=self.n) + self.generate_intensity_deviation()
return pos, intensities
def generate_displacements(self):
if self.ran_dis_spec['type'] == 'gaussian':
deviation = np.random.normal(scale=self.ran_dis_spec['paras']['sig'],loc=self.ran_dis_spec['paras']['mu'],size=(self.dim,))
signs = np.random.choice(np.array([-1,1]))
deviation *= signs
elif self.ran_dis_spec['type'] == 'cubic grid':
deviation = np.random.choice(np.array([0,1]),size=(self.dim,))
signs = np.random.choice(np.array([-1,1]))
deviation *= signs
else:
raise ValueError("Error - got unexpected random generator type for displacements {}".format(self.ran_dis_spec['type']))
return deviation
def update_positions(self,bounds):
"""
Expects ndarray of shape (Nt,dim,nmax) and extends along the first dimension of the ndarray.
Input:
pos - ndarray of shape (Nt,dim,nmax) containing particle positions
Returns:
new_pos - ndarray of shape (Nt+1,dim,nmax) containing particle positions
"""
pos = self.positions
intensities = self.intensities
#call generate_displacements using the supplied info in ran_dis_spec
last_steps = [None]*self.n #index to timestep which is the last non nan value for the given trajectory
for i in xrange(self.n):
not_nan = np.where([not v for v in np.isnan(pos[:,0,i])])[0]
#if most recent step is not nan or any of the other write down last timestep with non nan value
#is only nan then last_step entry is 'None'
if len(not_nan) > 0:
last_steps[i] = not_nan[-1]
#update all recent positions which are not skipped (i.e. pos val == nan)
new_pos = np.zeros((pos.shape[0]+1,pos.shape[1],pos.shape[2]))
new_pos[:-1] = pos
new_pos[-1,:,:] = np.array([pos[val,:,i] if val!=None else [np.nan,np.nan] for i,val in enumerate(last_steps)]).T
new_int = np.zeros((intensities.shape[0]+1,intensities.shape[1]))
new_int[:-1] = intensities
Nt = pos.shape[0] #new timestep
if None in last_steps:
new_initial, _ = self._get_initial_frame(bounds)
deviations = self.generate_displacements()
intensity_deviations = self.generate_intensity_deviation()
new_int[-1,:] = intensities[-1,:] + intensity_deviations
for i,val in enumerate(last_steps):
if val==None: #case that no initial position exists yet
new_pos[-1,:,i] = new_initial[:,i]
else: #case that previous position exists but is more than step ago
if np.random.uniform() > self.p_nd: #throwing the dice whether or not the position in the next move will be known
nan_array = np.zeros((self.dim,))
nan_array[:] = np.nan
new_pos[-1,:,i] = nan_array
else:
n_dis = Nt - val #is 1 if position at previous timestep present or larger if steps were skipped
dis = [self.generate_displacements() for v in xrange(n_dis)]
dis = np.array(reduce(lambda x,y: x+y,dis))
new_pos[-1,:,i] += dis
return new_pos, new_int
def generate_initial(self,Nt,bounds=[(0,1),(0,1)]):
"""
Input:
Nt - int, number of steps to generate including the initial
bounds - list of tuples of floats or ints (optional), the bounds for the initial frame where each tuple corresponds to one dimension in space
"""
print("Simulating {} initial steps...".format(Nt))
#initial positions
x0, I0 = self._get_initial_frame(bounds)
self.positions = np.array([x0])
self.intensities = np.array([I0])
for i in xrange(Nt-1):
self.positions, self.intensities = self.update_positions(bounds)
def generate_more_steps(self,Nt,bounds=[(0,1),(0,1)]):
print("Simulating {} more steps...".format(Nt))
for i in xrange(Nt):
self.positions, self.intensities = self.update_positions(bounds)
def generate_LM_traj(self):
LM_traj = np.zeros((self.n,self.positions.shape[0]))
for i in xrange(self.n):
LM_traj[i,:] = i
for t in xrange(self.positions.shape[0]):
if np.isnan(self.positions[t,0,i]):
LM_traj[i,t] = np.nan
self.LM_traj = LM_traj
def get_positions(self,shuffle=False):
"""
Return positions as well as original Linkage Matrix (LM).
"""
self.generate_LM_traj()
if shuffle:
for i in xrange(self.positions.shape[0]):
idx_shuffle = np.arange(self.positions.shape[2])
np.random.shuffle(idx_shuffle)
self.positions[i,:,:] = self.positions[i,:,(idx_shuffle)].T
self.intensities[i] = self.intensities[i,(idx_shuffle)]
self.LM_traj[:,i] = self.LM_traj[(idx_shuffle),i]
new_LM_traj = np.zeros(self.LM_traj.shape)
new_LM_traj[:] = np.nan
for j,config in enumerate(self.LM_traj.T):
for i,ix in enumerate(config):
if ix==ix:
new_LM_traj[ix,j] = i
self.LM_traj = new_LM_traj
return self.positions, self.LM_traj, self.intensities
def coordinates_interpreter(path):
"""
Reads the coordinates file produced by ImagePeakClassifier.
"""
with open(path,'r') as f:
lines = map(lambda x: x.rstrip('\n'),f.readlines())
positions = []
for i,line in enumerate(lines):
if 'frame' in line and i>0:
positions += [pos]
pos = []
elif i==0 and 'frame' in line:
pos = []
else:
pos += [map(int,line.split())]
print("num frames {} num positions each frame {}".format(len(positions),[len(v) for v in positions]))
max_num_pos = max([len(v) for v in positions])
num_t = len(positions)
arr_positions = np.zeros((num_t,2,max_num_pos))
arr_positions[:] = np.nan
for i,pos in enumerate(positions):
for j,particle in enumerate(pos):
arr_positions[i,:,j] = np.array(particle)
intensities = np.ones((num_t,max_num_pos))
return arr_positions, intensities | [
"numpy.random.uniform",
"numpy.zeros",
"numpy.ones",
"numpy.isnan",
"numpy.random.random",
"numpy.array",
"numpy.where",
"numpy.random.normal",
"numpy.random.choice",
"numpy.arange",
"numpy.random.shuffle"
] | [((9430, 9463), 'numpy.zeros', 'np.zeros', (['(num_t, 2, max_num_pos)'], {}), '((num_t, 2, max_num_pos))\n', (9438, 9463), True, 'import numpy as np\n'), ((9650, 9679), 'numpy.ones', 'np.ones', (['(num_t, max_num_pos)'], {}), '((num_t, max_num_pos))\n', (9657, 9679), True, 'import numpy as np\n'), ((2711, 2735), 'numpy.random.random', 'np.random.random', (['self.n'], {}), '(self.n)\n', (2727, 2735), True, 'import numpy as np\n'), ((4844, 4900), 'numpy.zeros', 'np.zeros', (['(pos.shape[0] + 1, pos.shape[1], pos.shape[2])'], {}), '((pos.shape[0] + 1, pos.shape[1], pos.shape[2]))\n', (4852, 4900), True, 'import numpy as np\n'), ((5067, 5125), 'numpy.zeros', 'np.zeros', (['(intensities.shape[0] + 1, intensities.shape[1])'], {}), '((intensities.shape[0] + 1, intensities.shape[1]))\n', (5075, 5125), True, 'import numpy as np\n'), ((6973, 6987), 'numpy.array', 'np.array', (['[x0]'], {}), '([x0])\n', (6981, 6987), True, 'import numpy as np\n'), ((7016, 7030), 'numpy.array', 'np.array', (['[I0]'], {}), '([I0])\n', (7024, 7030), True, 'import numpy as np\n'), ((7458, 7501), 'numpy.zeros', 'np.zeros', (['(self.n, self.positions.shape[0])'], {}), '((self.n, self.positions.shape[0]))\n', (7466, 7501), True, 'import numpy as np\n'), ((2291, 2339), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-dx)', 'high': 'dx', 'size': 'self.n'}), '(low=-dx, high=dx, size=self.n)\n', (2308, 2339), True, 'import numpy as np\n'), ((2816, 2869), 'numpy.random.choice', 'np.random.choice', (['self.intensity_choices'], {'size': 'self.n'}), '(self.intensity_choices, size=self.n)\n', (2832, 2869), True, 'import numpy as np\n'), ((3073, 3191), 'numpy.random.normal', 'np.random.normal', ([], {'scale': "self.ran_dis_spec['paras']['sig']", 'loc': "self.ran_dis_spec['paras']['mu']", 'size': '(self.dim,)'}), "(scale=self.ran_dis_spec['paras']['sig'], loc=self.\n ran_dis_spec['paras']['mu'], size=(self.dim,))\n", (3089, 3191), True, 'import numpy as np\n'), ((8358, 8386), 'numpy.zeros', 'np.zeros', (['self.LM_traj.shape'], {}), '(self.LM_traj.shape)\n', (8366, 8386), True, 'import numpy as np\n'), ((9612, 9630), 'numpy.array', 'np.array', (['particle'], {}), '(particle)\n', (9620, 9630), True, 'import numpy as np\n'), ((2598, 2665), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'bounds[v][0]', 'high': 'bounds[v][1]', 'size': 'self.n'}), '(low=bounds[v][0], high=bounds[v][1], size=self.n)\n', (2615, 2665), True, 'import numpy as np\n'), ((2755, 2784), 'numpy.where', 'np.where', (['(to_skip > self.p_nd)'], {}), '(to_skip > self.p_nd)\n', (2763, 2784), True, 'import numpy as np\n'), ((3223, 3240), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (3231, 3240), True, 'import numpy as np\n'), ((7640, 7673), 'numpy.isnan', 'np.isnan', (['self.positions[t, 0, i]'], {}), '(self.positions[t, 0, i])\n', (7648, 7673), True, 'import numpy as np\n'), ((8031, 8065), 'numpy.arange', 'np.arange', (['self.positions.shape[2]'], {}), '(self.positions.shape[2])\n', (8040, 8065), True, 'import numpy as np\n'), ((8083, 8113), 'numpy.random.shuffle', 'np.random.shuffle', (['idx_shuffle'], {}), '(idx_shuffle)\n', (8100, 8113), True, 'import numpy as np\n'), ((3372, 3388), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (3380, 3388), True, 'import numpy as np\n'), ((3444, 3461), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (3452, 3461), True, 'import numpy as np\n'), ((5795, 5814), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5812, 5814), True, 'import numpy as np\n'), ((5939, 5960), 'numpy.zeros', 'np.zeros', (['(self.dim,)'], {}), '((self.dim,))\n', (5947, 5960), True, 'import numpy as np\n'), ((4436, 4458), 'numpy.isnan', 'np.isnan', (['pos[:, 0, i]'], {}), '(pos[:, 0, i])\n', (4444, 4458), True, 'import numpy as np\n')] |
import numpy as np
import hashlib
from collections import OrderedDict
from mujoco_worldgen.objs.obj import Obj
from mujoco_worldgen.util.types import store_args
class Material(Obj):
placeable = False
@store_args
def __init__(self,
random=True,
rgba=None,
texture=None,
texture_type=None,
grid_layout=None,
grid_size=None):
super(Material, self).__init__()
def generate(self, random_state, world_params, placement_size=None):
if not world_params.randomize_material:
deterministic_seed = int(hashlib.sha1(
self.name.encode()).hexdigest(), 16)
random_state = np.random.RandomState(deterministic_seed % 100000)
choice = random_state.randint(0, 3)
self.xml_dict = None
if self.texture is not None:
self.xml_dict = self._material_texture(
random_state, self.texture, self.texture_type,
self.grid_layout, self.grid_size, self.rgba)
elif self.rgba is not None:
self.xml_dict = self._material_rgba(random_state, self.rgba)
elif self.xml_dict is None:
self.xml_dict = [self._material_rgba,
self._material_checker,
self._material_random][choice](random_state)
self.xml_dict = OrderedDict(asset=self.xml_dict)
def generate_xml_dict(self):
return self.xml_dict
def _material_rgba(self, random_state, rgba=None):
material_attrs = OrderedDict([('@name', self.name),
('@specular', 0.1 + 0.2 *
random_state.uniform()),
('@shininess', 0.1 + 0.2 *
random_state.uniform()),
('@reflectance', 0.1 + 0.2 * random_state.uniform())])
if rgba is None:
material_attrs['@rgba'] = 0.1 + 0.8 * random_state.uniform(size=4)
material_attrs['@rgba'][3] = 1.0
elif isinstance(rgba, tuple) and len(rgba) == 2:
material_attrs['@rgba'] = random_state.uniform(rgba[0], rgba[1])
else:
material_attrs['@rgba'] = rgba
return OrderedDict(material=[material_attrs])
def _material_checker(self, random_state):
texture_attr = OrderedDict([('@name', "texture_" + self.name),
('@builtin', 'checker'),
('@height', random_state.randint(5, 100)),
('@width', random_state.randint(5, 100)),
('@type', '2d'),
('@rgb1', [0, 0, 0])])
texture_attr['@rgb2'] = 0.1 + 0.8 * random_state.uniform(size=3)
xml_dict = OrderedDict(texture=[texture_attr])
texrepeat = [random_state.randint(
5, 100), random_state.randint(5, 100)]
xml_dict["material"] = [OrderedDict([('@name', self.name),
('@texrepeat', texrepeat),
('@texture', "texture_" + self.name)])]
return xml_dict
def _material_random(self, random_state):
random = 0.1 + 0.8 * random_state.uniform()
texture_attr = OrderedDict([('@name', "texture_" + self.name),
('@builtin', 'flat'),
('@mark', 'random'),
('@type', '2d'),
('@height', 2048),
('@width', 2048),
('@rgb1', [1, 1, 1]),
('@rgb2', [1, 1, 1]),
('@random', random)])
material = OrderedDict([('@name', self.name),
('@texture', "texture_" + self.name)])
xml_dict = OrderedDict([('texture', [texture_attr]),
('material', [material])])
return xml_dict
def _material_texture(self, random_state, texture, texture_type=None,
grid_layout=None, grid_size=None, rgba=None):
texture_attr = OrderedDict([
('@name', "texture_" + self.name),
('@type', '2d'),
('@builtin', 'none'),
('@file', texture),
])
if texture_type is None:
texture_type = "cube"
texture_attr["@type"] = texture_type
if texture_type == "cube":
texture_attr["@gridlayout"] = '.U..LFRB.D..' if grid_layout is None else grid_layout
texture_attr["@gridsize"] = '3 4' if grid_size is None else grid_size
material = OrderedDict([
('@name', self.name),
('@texture', "texture_" + self.name),
])
if rgba is not None:
material['@rgba'] = rgba
return OrderedDict([
('texture', [texture_attr]),
('material', [material]),
])
| [
"collections.OrderedDict",
"numpy.random.RandomState"
] | [((1417, 1449), 'collections.OrderedDict', 'OrderedDict', ([], {'asset': 'self.xml_dict'}), '(asset=self.xml_dict)\n', (1428, 1449), False, 'from collections import OrderedDict\n'), ((2334, 2372), 'collections.OrderedDict', 'OrderedDict', ([], {'material': '[material_attrs]'}), '(material=[material_attrs])\n', (2345, 2372), False, 'from collections import OrderedDict\n'), ((2914, 2949), 'collections.OrderedDict', 'OrderedDict', ([], {'texture': '[texture_attr]'}), '(texture=[texture_attr])\n', (2925, 2949), False, 'from collections import OrderedDict\n'), ((3414, 3634), 'collections.OrderedDict', 'OrderedDict', (["[('@name', 'texture_' + self.name), ('@builtin', 'flat'), ('@mark',\n 'random'), ('@type', '2d'), ('@height', 2048), ('@width', 2048), (\n '@rgb1', [1, 1, 1]), ('@rgb2', [1, 1, 1]), ('@random', random)]"], {}), "([('@name', 'texture_' + self.name), ('@builtin', 'flat'), (\n '@mark', 'random'), ('@type', '2d'), ('@height', 2048), ('@width', 2048\n ), ('@rgb1', [1, 1, 1]), ('@rgb2', [1, 1, 1]), ('@random', random)])\n", (3425, 3634), False, 'from collections import OrderedDict\n'), ((3932, 4005), 'collections.OrderedDict', 'OrderedDict', (["[('@name', self.name), ('@texture', 'texture_' + self.name)]"], {}), "([('@name', self.name), ('@texture', 'texture_' + self.name)])\n", (3943, 4005), False, 'from collections import OrderedDict\n'), ((4057, 4125), 'collections.OrderedDict', 'OrderedDict', (["[('texture', [texture_attr]), ('material', [material])]"], {}), "([('texture', [texture_attr]), ('material', [material])])\n", (4068, 4125), False, 'from collections import OrderedDict\n'), ((4352, 4464), 'collections.OrderedDict', 'OrderedDict', (["[('@name', 'texture_' + self.name), ('@type', '2d'), ('@builtin', 'none'),\n ('@file', texture)]"], {}), "([('@name', 'texture_' + self.name), ('@type', '2d'), (\n '@builtin', 'none'), ('@file', texture)])\n", (4363, 4464), False, 'from collections import OrderedDict\n'), ((4866, 4939), 'collections.OrderedDict', 'OrderedDict', (["[('@name', self.name), ('@texture', 'texture_' + self.name)]"], {}), "([('@name', self.name), ('@texture', 'texture_' + self.name)])\n", (4877, 4939), False, 'from collections import OrderedDict\n'), ((5058, 5126), 'collections.OrderedDict', 'OrderedDict', (["[('texture', [texture_attr]), ('material', [material])]"], {}), "([('texture', [texture_attr]), ('material', [material])])\n", (5069, 5126), False, 'from collections import OrderedDict\n'), ((734, 784), 'numpy.random.RandomState', 'np.random.RandomState', (['(deterministic_seed % 100000)'], {}), '(deterministic_seed % 100000)\n', (755, 784), True, 'import numpy as np\n'), ((3076, 3181), 'collections.OrderedDict', 'OrderedDict', (["[('@name', self.name), ('@texrepeat', texrepeat), ('@texture', 'texture_' +\n self.name)]"], {}), "([('@name', self.name), ('@texrepeat', texrepeat), ('@texture', \n 'texture_' + self.name)])\n", (3087, 3181), False, 'from collections import OrderedDict\n')] |
import os
import logging
logger = logging.getLogger(__name__)
from collections import OrderedDict
import numpy
import pyopencl
from pyopencl import array as cla
class OclMultiAnalyzer:
NUM_CRYSTAL = numpy.int32(13)
def __init__(self, L, L2, pixel, center, tha, thd, psi, rollx, rolly, device=None):
"""Constructor if the "Multi-analyzer" working on OpenCL
Nota:
Internally, all calculation are performed in radians.
All distances must use the same unit (m, mm or inches)
:param L: distance from the sample to the analyzer
:param L2: distance from the analyzer to the detector
:param pixel: pixel size
:param center: position of the center on the detector (in pixel)
:param tha: acceptance angle of the analyzer crystal(°)
:param thd: diffraction angle of the analyzer crystal(°) 2x tha
:param psi: Offset of angles (in 2th) of the analyzer crystals
:param rollx: mis-orientation of the analyzer along x (°)
:param rolly: mis-orientation of the analyzer along y (°)
:param device: 2-tuple with the device
"""
self.L = numpy.float64(L)
self.L2 = numpy.float64(L2)
self.pixel = numpy.float64(pixel)
self._center = numpy.ascontiguousarray(center, dtype=numpy.float64)
self._tha = numpy.deg2rad(tha)
if thd:
self._thd = numpy.deg2rad(thd)
else:
self._thd = 2.0 * self._tha
assert len(psi) == self.NUM_CRYSTAL, "psi has the right size"
assert len(rollx) == self.NUM_CRYSTAL, "rollx has the right size"
assert len(rolly) == self.NUM_CRYSTAL, "rolly has the right size"
self._psi = numpy.deg2rad(psi, dtype=numpy.float64)
self._rollx = numpy.deg2rad(rollx, dtype=numpy.float64)
self._rolly = numpy.deg2rad(rolly, dtype=numpy.float64)
if device:
self.ctx = pyopencl.create_some_context(answers=[str(i) for i in device])
else:
self.ctx = pyopencl.create_some_context(interactive=True)
self.queue = pyopencl.CommandQueue(self.ctx)
self.kernel_arguments = OrderedDict()
self.buffers = {}
self.kernel_arguments = {}
self.prg = None
self.allocate_buffers()
self.set_kernel_arguments()
self.compile_kernel()
def allocate_buffers(self):
self.buffers["roicoll"] = None
self.buffers["monitor"] = None
self.buffers["arm"] = None
self.buffers["center"] = cla.to_device(self.queue, self._center)
self.buffers["psi"] = cla.to_device(self.queue, self._psi)
self.buffers["rollx"] = cla.to_device(self.queue, self._rollx)
self.buffers["rolly"] = cla.to_device(self.queue, self._rolly)
self.buffers["out_signal"] = None
self.buffers["out_norm"] = None
def set_kernel_arguments(self):
self.kernel_arguments["integrate"] = OrderedDict([("roicoll", self.buffers["roicoll"]),
("monitor", self.buffers["monitor"]),
("arm", self.buffers["arm"]),
("num_crystal", self.NUM_CRYSTAL),
("num_frame", None),
("num_roi", numpy.uint32(512)),
("num_bin", None),
("L", self.L),
("L2", self.L2),
("pixel", self.pixel),
("center", self.buffers["center"].data),
("tha", self._tha),
("thd", self._thd),
("psi", self.buffers["psi"].data),
("rollx", self.buffers["rollx"].data),
("rolly", self.buffers["rolly"].data),
("resolution", None),
("niter", 250),
("phi_max", None),
("roi_min", None),
("roi_max", None),
("tth_min", None),
('tth_max', None),
("dtth", None),
("width", numpy.int32(0)),
("dtthw", None),
("out_signal", self.buffers["out_signal"]),
("out_norm", self.buffers["out_norm"]),
("do_debug", numpy.uint8(0)),
("cycles", None),
("local", None)])
def compile_kernel(self):
with open(os.path.join(os.path.dirname(__file__), "multianalyzer.cl"), "r") as r:
src = r.read()
self.prg = pyopencl.Program(self.ctx, src).build()
def integrate(self,
roicollection,
arm,
mon,
tth_min,
tth_max,
dtth,
phi_max=90.,
roi_min=0,
roi_max=512,
roi_step=1,
iter_max=250,
resolution=1e-3,
width=1,
dtthw=None
):
"""Performess the integration of the ROIstack recorded at given angles on t
:param roi_stack: stack of (nframes,NUM_CRYSTAL*numROI) with the recorded signal
:param arm: 2theta position of the arm (in degrees)
:param tth_min: start position of the histograms (in degrees)
:param tth_max: End positon of the histogram (in degrees)
:param dtth: bin size for the histogram (in degrees)
:param phi_max: discard data with |phi| larger than this value (in degree)
:param iter_max: maximum number of iteration in the 2theta convergence
:param resolution: precision of the 2theta convergence in fraction of dtth
:param width: width of the sample, same unit as pixels
:param dtthw: Minimum precision expected for ROI being `width` appart, by default dtth
:return: center of bins, histogram of signal and histogram of normalization, cycles per data-point
"""
if roi_step and roi_step!=1:
logger.warning("only roi_step=1 is supported in OpenCL")
dtthw = dtthw or dtth
do_debug = logger.getEffectiveLevel()<=logging.DEBUG
nframes = arm.shape[0]
roicoll = numpy.ascontiguousarray(roicollection, dtype=numpy.int32).reshape((nframes, self.NUM_CRYSTAL, -1))
mon = numpy.ascontiguousarray(mon, dtype=numpy.int32)
tth_max += 0.5 * dtth
tth_b = numpy.arange(tth_min, tth_max + 0.4999999 * dtth, dtth)
tth_min -= 0.5 * dtth
nbin = tth_b.size
assert mon.shape[0] == arm.shape[0], "monitor array shape matches the one from arm array "
nroi = roicoll.shape[-1]
arm = numpy.deg2rad(arm)
try:
max_frames = min(int(int(self.ctx.devices[0].max_mem_alloc_size)/(numpy.dtype(numpy.int32).itemsize*self.NUM_CRYSTAL*nroi)),
nframes)
except:
max_frames = None
logger.info(f"Allocate `out_norm` on device for {4*self.NUM_CRYSTAL*nbin/1e6}MB")
self.buffers["out_norm"] = cla.empty(self.queue, (self.NUM_CRYSTAL, nbin), dtype=numpy.int32)
logger.info(f"Allocate `out_signal` on device for {4*self.NUM_CRYSTAL*nbin/1e6}MB")
self.buffers["out_signal"] = cla.empty(self.queue, (self.NUM_CRYSTAL, nbin), dtype=numpy.int32)
evt = self.prg.memset(self.queue, (nbin, self.NUM_CRYSTAL), None,
numpy.uint32(self.NUM_CRYSTAL),
numpy.uint32(nbin),
self.buffers["out_norm"].data,
self.buffers["out_signal"].data)
if max_frames:
logger.info(f"Allocate partial `roicoll` on device for {numpy.dtype(numpy.int32).itemsize*self.NUM_CRYSTAL*nroi*max_frames/1e6}MB")
self.buffers["roicoll"] = cla.empty(self.queue, (max_frames, self.NUM_CRYSTAL, nroi), dtype=numpy.int32)
logger.info(f"Allocate partial `mon` on device for {numpy.dtype(numpy.int32).itemsize*max_frames/1e6}MB")
self.buffers["monitor"] = cla.empty(self.queue, (max_frames), dtype=numpy.int32)
logger.info(f"Allocate partial `arm` on device for {numpy.dtype(numpy.float64).itemsize*max_frames/1e6}MB")
self.buffers["arm"] = cla.empty(self.queue, (max_frames), dtype=numpy.float64)
else:
logger.info(f"Allocate complete `roicoll` on device for {roicoll.nbytes/1e6}MB")
self.buffers["roicoll"] = cla.to_device(self.queue, roicoll)
logger.info(f"Allocate complete `mon` on device for {mon.nbytes/1e6}MB")
self.buffers["monitor"] = cla.to_device(self.queue, mon)
logger.info(f"Allocate complete `arm` on device for {arm.nbytes/1e6}MB")
self.buffers["arm"] = cla.to_device(self.queue, arm)
kwags = self.kernel_arguments["integrate"]
kwags["roicoll"] = self.buffers["roicoll"].data
kwags["monitor"] = self.buffers["monitor"].data
kwags["arm"] = self.buffers["arm"].data
kwags["out_norm"] = self.buffers["out_norm"].data
kwags["out_signal"] = self.buffers["out_signal"].data
kwags["num_frame"] = numpy.uint32(max_frames if max_frames else nframes)
kwags["num_roi"] = numpy.uint32(nroi)
kwags["num_bin"] = numpy.uint32(nbin)
kwags["resolution"] = numpy.deg2rad(resolution*dtth)
kwags["niter"] = numpy.int32(iter_max)
kwags["phi_max"] = numpy.deg2rad(phi_max)
kwags["tth_min"] = numpy.deg2rad(tth_min)
kwags['tth_max'] = numpy.deg2rad(tth_max)
kwags["dtth"] = numpy.deg2rad(dtth)
kwags["roi_min"] = numpy.uint32(max(roi_min, 0))
kwags["roi_max"] = numpy.uint32(min(roi_max, nroi))
kwags["local"] = pyopencl.LocalMemory(8*nroi)
kwags["width"] = numpy.int32(0.5*width/self.pixel)
kwags["dtthw"] = numpy.deg2rad(dtthw)
if do_debug:
logger.info(f"Allocate `cycles` on device for {self.NUM_CRYSTAL*nroi*nframes/1e6}MB")
if max_frames:
self.buffers["cycles"] = cla.empty(self.queue, (self.NUM_CRYSTAL, nroi, max_frames), dtype=numpy.uint8)
else:
self.buffers["cycles"] = cla.empty(self.queue, (self.NUM_CRYSTAL, nroi, nframes), dtype=numpy.uint8)
cycles = numpy.zeros((self.NUM_CRYSTAL, nroi, nframes), dtype=numpy.uint8)
else:
self.buffers["cycles"] = cla.empty(self.queue, (1, 1, 1), dtype=numpy.uint8)
kwags["do_debug"] = numpy.int32(do_debug)
kwags["cycles"] = self.buffers["cycles"].data
if do_debug:
log = ["Parameters of the `integrate` kernel:"]
i=0
for k,v in kwags.items():
i+=1
log.append(f"#{i}\t{k}: {v}")
logger.debug("\n".join(log))
if max_frames:
for start in range(0, nframes, max_frames):
stop = start+max_frames
if stop<nframes:
sub_roicol = roicoll[start:stop, :, :]
sub_arm = arm[start:stop]
sub_mon = mon[start:stop]
else:
stop = nframes
sub_roicol = numpy.empty((max_frames, self.NUM_CRYSTAL, nroi), dtype=numpy.int32)
sub_roicol[:stop-start, ...] = roicoll[start:stop, ...]
sub_arm = numpy.empty((max_frames), dtype=numpy.float64)
sub_arm[:stop-start] = arm[start:stop]
sub_mon = numpy.empty((max_frames), dtype=numpy.int32)
sub_mon[:stop-start] = mon[start:stop]
self.buffers["roicoll"].set(sub_roicol)
self.buffers["monitor"].set(sub_mon)
self.buffers["arm"].set(sub_arm)
evt = self.prg.integrate(self.queue, (nroi, stop-start, self.NUM_CRYSTAL), (nroi, 1, 1), *kwags.values())
if do_debug:
cycles[:, :, start:stop] = self.buffers["cycles"].get()[:, :, :stop-start]
else:
evt = self.prg.integrate(self.queue, (nroi, nframes, self.NUM_CRYSTAL), (nroi, 1, 1), *kwags.values())
if do_debug:
cycles = self.buffers["cycles"].get()
evt.wait()
if do_debug:
return tth_b, self.buffers["out_signal"].get(), self.buffers["out_norm"].get(), cycles
else:
return tth_b, self.buffers["out_signal"].get(), self.buffers["out_norm"].get()
| [
"numpy.uint32",
"pyopencl.array.empty",
"numpy.empty",
"pyopencl.Program",
"numpy.arange",
"numpy.float64",
"os.path.dirname",
"pyopencl.CommandQueue",
"numpy.int32",
"numpy.uint8",
"pyopencl.create_some_context",
"numpy.deg2rad",
"numpy.dtype",
"numpy.zeros",
"pyopencl.array.to_device",... | [((34, 61), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (51, 61), False, 'import logging\n'), ((205, 220), 'numpy.int32', 'numpy.int32', (['(13)'], {}), '(13)\n', (216, 220), False, 'import numpy\n'), ((1177, 1193), 'numpy.float64', 'numpy.float64', (['L'], {}), '(L)\n', (1190, 1193), False, 'import numpy\n'), ((1212, 1229), 'numpy.float64', 'numpy.float64', (['L2'], {}), '(L2)\n', (1225, 1229), False, 'import numpy\n'), ((1251, 1271), 'numpy.float64', 'numpy.float64', (['pixel'], {}), '(pixel)\n', (1264, 1271), False, 'import numpy\n'), ((1295, 1347), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['center'], {'dtype': 'numpy.float64'}), '(center, dtype=numpy.float64)\n', (1318, 1347), False, 'import numpy\n'), ((1368, 1386), 'numpy.deg2rad', 'numpy.deg2rad', (['tha'], {}), '(tha)\n', (1381, 1386), False, 'import numpy\n'), ((1740, 1779), 'numpy.deg2rad', 'numpy.deg2rad', (['psi'], {'dtype': 'numpy.float64'}), '(psi, dtype=numpy.float64)\n', (1753, 1779), False, 'import numpy\n'), ((1802, 1843), 'numpy.deg2rad', 'numpy.deg2rad', (['rollx'], {'dtype': 'numpy.float64'}), '(rollx, dtype=numpy.float64)\n', (1815, 1843), False, 'import numpy\n'), ((1866, 1907), 'numpy.deg2rad', 'numpy.deg2rad', (['rolly'], {'dtype': 'numpy.float64'}), '(rolly, dtype=numpy.float64)\n', (1879, 1907), False, 'import numpy\n'), ((2128, 2159), 'pyopencl.CommandQueue', 'pyopencl.CommandQueue', (['self.ctx'], {}), '(self.ctx)\n', (2149, 2159), False, 'import pyopencl\n'), ((2192, 2205), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2203, 2205), False, 'from collections import OrderedDict\n'), ((2568, 2607), 'pyopencl.array.to_device', 'cla.to_device', (['self.queue', 'self._center'], {}), '(self.queue, self._center)\n', (2581, 2607), True, 'from pyopencl import array as cla\n'), ((2638, 2674), 'pyopencl.array.to_device', 'cla.to_device', (['self.queue', 'self._psi'], {}), '(self.queue, self._psi)\n', (2651, 2674), True, 'from pyopencl import array as cla\n'), ((2707, 2745), 'pyopencl.array.to_device', 'cla.to_device', (['self.queue', 'self._rollx'], {}), '(self.queue, self._rollx)\n', (2720, 2745), True, 'from pyopencl import array as cla\n'), ((2778, 2816), 'pyopencl.array.to_device', 'cla.to_device', (['self.queue', 'self._rolly'], {}), '(self.queue, self._rolly)\n', (2791, 2816), True, 'from pyopencl import array as cla\n'), ((7535, 7582), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['mon'], {'dtype': 'numpy.int32'}), '(mon, dtype=numpy.int32)\n', (7558, 7582), False, 'import numpy\n'), ((7629, 7684), 'numpy.arange', 'numpy.arange', (['tth_min', '(tth_max + 0.4999999 * dtth)', 'dtth'], {}), '(tth_min, tth_max + 0.4999999 * dtth, dtth)\n', (7641, 7684), False, 'import numpy\n'), ((7896, 7914), 'numpy.deg2rad', 'numpy.deg2rad', (['arm'], {}), '(arm)\n', (7909, 7914), False, 'import numpy\n'), ((8275, 8341), 'pyopencl.array.empty', 'cla.empty', (['self.queue', '(self.NUM_CRYSTAL, nbin)'], {'dtype': 'numpy.int32'}), '(self.queue, (self.NUM_CRYSTAL, nbin), dtype=numpy.int32)\n', (8284, 8341), True, 'from pyopencl import array as cla\n'), ((8471, 8537), 'pyopencl.array.empty', 'cla.empty', (['self.queue', '(self.NUM_CRYSTAL, nbin)'], {'dtype': 'numpy.int32'}), '(self.queue, (self.NUM_CRYSTAL, nbin), dtype=numpy.int32)\n', (8480, 8537), True, 'from pyopencl import array as cla\n'), ((10401, 10452), 'numpy.uint32', 'numpy.uint32', (['(max_frames if max_frames else nframes)'], {}), '(max_frames if max_frames else nframes)\n', (10413, 10452), False, 'import numpy\n'), ((10480, 10498), 'numpy.uint32', 'numpy.uint32', (['nroi'], {}), '(nroi)\n', (10492, 10498), False, 'import numpy\n'), ((10526, 10544), 'numpy.uint32', 'numpy.uint32', (['nbin'], {}), '(nbin)\n', (10538, 10544), False, 'import numpy\n'), ((10575, 10607), 'numpy.deg2rad', 'numpy.deg2rad', (['(resolution * dtth)'], {}), '(resolution * dtth)\n', (10588, 10607), False, 'import numpy\n'), ((10631, 10652), 'numpy.int32', 'numpy.int32', (['iter_max'], {}), '(iter_max)\n', (10642, 10652), False, 'import numpy\n'), ((10680, 10702), 'numpy.deg2rad', 'numpy.deg2rad', (['phi_max'], {}), '(phi_max)\n', (10693, 10702), False, 'import numpy\n'), ((10730, 10752), 'numpy.deg2rad', 'numpy.deg2rad', (['tth_min'], {}), '(tth_min)\n', (10743, 10752), False, 'import numpy\n'), ((10780, 10802), 'numpy.deg2rad', 'numpy.deg2rad', (['tth_max'], {}), '(tth_max)\n', (10793, 10802), False, 'import numpy\n'), ((10827, 10846), 'numpy.deg2rad', 'numpy.deg2rad', (['dtth'], {}), '(dtth)\n', (10840, 10846), False, 'import numpy\n'), ((10989, 11019), 'pyopencl.LocalMemory', 'pyopencl.LocalMemory', (['(8 * nroi)'], {}), '(8 * nroi)\n', (11009, 11019), False, 'import pyopencl\n'), ((11043, 11080), 'numpy.int32', 'numpy.int32', (['(0.5 * width / self.pixel)'], {}), '(0.5 * width / self.pixel)\n', (11054, 11080), False, 'import numpy\n'), ((11102, 11122), 'numpy.deg2rad', 'numpy.deg2rad', (['dtthw'], {}), '(dtthw)\n', (11115, 11122), False, 'import numpy\n'), ((11755, 11776), 'numpy.int32', 'numpy.int32', (['do_debug'], {}), '(do_debug)\n', (11766, 11776), False, 'import numpy\n'), ((1427, 1445), 'numpy.deg2rad', 'numpy.deg2rad', (['thd'], {}), '(thd)\n', (1440, 1445), False, 'import numpy\n'), ((2051, 2097), 'pyopencl.create_some_context', 'pyopencl.create_some_context', ([], {'interactive': '(True)'}), '(interactive=True)\n', (2079, 2097), False, 'import pyopencl\n'), ((8643, 8673), 'numpy.uint32', 'numpy.uint32', (['self.NUM_CRYSTAL'], {}), '(self.NUM_CRYSTAL)\n', (8655, 8673), False, 'import numpy\n'), ((8705, 8723), 'numpy.uint32', 'numpy.uint32', (['nbin'], {}), '(nbin)\n', (8717, 8723), False, 'import numpy\n'), ((9054, 9132), 'pyopencl.array.empty', 'cla.empty', (['self.queue', '(max_frames, self.NUM_CRYSTAL, nroi)'], {'dtype': 'numpy.int32'}), '(self.queue, (max_frames, self.NUM_CRYSTAL, nroi), dtype=numpy.int32)\n', (9063, 9132), True, 'from pyopencl import array as cla\n'), ((9290, 9342), 'pyopencl.array.empty', 'cla.empty', (['self.queue', 'max_frames'], {'dtype': 'numpy.int32'}), '(self.queue, max_frames, dtype=numpy.int32)\n', (9299, 9342), True, 'from pyopencl import array as cla\n'), ((9500, 9554), 'pyopencl.array.empty', 'cla.empty', (['self.queue', 'max_frames'], {'dtype': 'numpy.float64'}), '(self.queue, max_frames, dtype=numpy.float64)\n', (9509, 9554), True, 'from pyopencl import array as cla\n'), ((9702, 9736), 'pyopencl.array.to_device', 'cla.to_device', (['self.queue', 'roicoll'], {}), '(self.queue, roicoll)\n', (9715, 9736), True, 'from pyopencl import array as cla\n'), ((9860, 9890), 'pyopencl.array.to_device', 'cla.to_device', (['self.queue', 'mon'], {}), '(self.queue, mon)\n', (9873, 9890), True, 'from pyopencl import array as cla\n'), ((10010, 10040), 'pyopencl.array.to_device', 'cla.to_device', (['self.queue', 'arm'], {}), '(self.queue, arm)\n', (10023, 10040), True, 'from pyopencl import array as cla\n'), ((11558, 11623), 'numpy.zeros', 'numpy.zeros', (['(self.NUM_CRYSTAL, nroi, nframes)'], {'dtype': 'numpy.uint8'}), '((self.NUM_CRYSTAL, nroi, nframes), dtype=numpy.uint8)\n', (11569, 11623), False, 'import numpy\n'), ((11675, 11726), 'pyopencl.array.empty', 'cla.empty', (['self.queue', '(1, 1, 1)'], {'dtype': 'numpy.uint8'}), '(self.queue, (1, 1, 1), dtype=numpy.uint8)\n', (11684, 11726), True, 'from pyopencl import array as cla\n'), ((5706, 5737), 'pyopencl.Program', 'pyopencl.Program', (['self.ctx', 'src'], {}), '(self.ctx, src)\n', (5722, 5737), False, 'import pyopencl\n'), ((7422, 7479), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['roicollection'], {'dtype': 'numpy.int32'}), '(roicollection, dtype=numpy.int32)\n', (7445, 7479), False, 'import numpy\n'), ((11323, 11401), 'pyopencl.array.empty', 'cla.empty', (['self.queue', '(self.NUM_CRYSTAL, nroi, max_frames)'], {'dtype': 'numpy.uint8'}), '(self.queue, (self.NUM_CRYSTAL, nroi, max_frames), dtype=numpy.uint8)\n', (11332, 11401), True, 'from pyopencl import array as cla\n'), ((11461, 11536), 'pyopencl.array.empty', 'cla.empty', (['self.queue', '(self.NUM_CRYSTAL, nroi, nframes)'], {'dtype': 'numpy.uint8'}), '(self.queue, (self.NUM_CRYSTAL, nroi, nframes), dtype=numpy.uint8)\n', (11470, 11536), True, 'from pyopencl import array as cla\n'), ((3458, 3475), 'numpy.uint32', 'numpy.uint32', (['(512)'], {}), '(512)\n', (3470, 3475), False, 'import numpy\n'), ((5007, 5021), 'numpy.int32', 'numpy.int32', (['(0)'], {}), '(0)\n', (5018, 5021), False, 'import numpy\n'), ((5370, 5384), 'numpy.uint8', 'numpy.uint8', (['(0)'], {}), '(0)\n', (5381, 5384), False, 'import numpy\n'), ((5601, 5626), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (5616, 5626), False, 'import os\n'), ((12480, 12548), 'numpy.empty', 'numpy.empty', (['(max_frames, self.NUM_CRYSTAL, nroi)'], {'dtype': 'numpy.int32'}), '((max_frames, self.NUM_CRYSTAL, nroi), dtype=numpy.int32)\n', (12491, 12548), False, 'import numpy\n'), ((12655, 12699), 'numpy.empty', 'numpy.empty', (['max_frames'], {'dtype': 'numpy.float64'}), '(max_frames, dtype=numpy.float64)\n', (12666, 12699), False, 'import numpy\n'), ((12791, 12833), 'numpy.empty', 'numpy.empty', (['max_frames'], {'dtype': 'numpy.int32'}), '(max_frames, dtype=numpy.int32)\n', (12802, 12833), False, 'import numpy\n'), ((8006, 8030), 'numpy.dtype', 'numpy.dtype', (['numpy.int32'], {}), '(numpy.int32)\n', (8017, 8030), False, 'import numpy\n'), ((9198, 9222), 'numpy.dtype', 'numpy.dtype', (['numpy.int32'], {}), '(numpy.int32)\n', (9209, 9222), False, 'import numpy\n'), ((9410, 9436), 'numpy.dtype', 'numpy.dtype', (['numpy.float64'], {}), '(numpy.float64)\n', (9421, 9436), False, 'import numpy\n'), ((8940, 8964), 'numpy.dtype', 'numpy.dtype', (['numpy.int32'], {}), '(numpy.int32)\n', (8951, 8964), False, 'import numpy\n')] |
"""Tests for bdpy.util"""
import unittest
import numpy as np
import bdpy
class TestUtil(unittest.TestCase):
"""Tests for 'util' module"""
def test_create_groupvector_pass0001(self):
"""Test for create_groupvector (list and scalar inputs)."""
x = [1, 2, 3]
y = 2
exp_output = [1, 1, 2, 2, 3, 3]
test_output = bdpy.create_groupvector(x, y)
self.assertTrue((test_output == exp_output).all())
def test_create_groupvector_pass0002(self):
"""Test for create_groupvector (list and list inputs)."""
x = [1, 2, 3]
y = [2, 4, 2]
exp_output = [1, 1, 2, 2, 2, 2, 3, 3]
test_output = bdpy.create_groupvector(x, y)
self.assertTrue((test_output == exp_output).all())
def test_create_groupvector_pass0003(self):
"""Test for create_groupvector (Numpy array and scalar inputs)."""
x = np.array([1, 2, 3])
y = 2
exp_output = np.array([1, 1, 2, 2, 3, 3])
test_output = bdpy.create_groupvector(x, y)
np.testing.assert_array_equal(test_output, exp_output)
def test_create_groupvector_pass0005(self):
"""Test for create_groupvector (Numpy arrays inputs)."""
x = np.array([1, 2, 3])
y = np.array([2, 4, 2])
exp_output = np.array([1, 1, 2, 2, 2, 2, 3, 3])
test_output = bdpy.create_groupvector(x, y)
np.testing.assert_array_equal(test_output, exp_output)
def test_create_groupvector_error(self):
"""Test for create_groupvector (ValueError)."""
x = [1, 2, 3]
y = [0]
self.assertRaises(ValueError, bdpy.create_groupvector, x, y)
def test_divide_chunks(self):
'''Test for divide_chunks.'''
a = [1, 2, 3, 4, 5, 6, 7]
# Test 1
expected = [[1, 2, 3, 4],
[5, 6, 7]]
actual = bdpy.divide_chunks(a, chunk_size=4)
self.assertEqual(actual, expected)
# Test 2
expected = [[1, 2, 3],
[4, 5, 6],
[7]]
actual = bdpy.divide_chunks(a, chunk_size=3)
self.assertEqual(actual, expected)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestUtil)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"unittest.TextTestRunner",
"numpy.testing.assert_array_equal",
"bdpy.divide_chunks",
"numpy.array",
"unittest.TestLoader",
"bdpy.create_groupvector"
] | [((366, 395), 'bdpy.create_groupvector', 'bdpy.create_groupvector', (['x', 'y'], {}), '(x, y)\n', (389, 395), False, 'import bdpy\n'), ((686, 715), 'bdpy.create_groupvector', 'bdpy.create_groupvector', (['x', 'y'], {}), '(x, y)\n', (709, 715), False, 'import bdpy\n'), ((913, 932), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (921, 932), True, 'import numpy as np\n'), ((969, 997), 'numpy.array', 'np.array', (['[1, 1, 2, 2, 3, 3]'], {}), '([1, 1, 2, 2, 3, 3])\n', (977, 997), True, 'import numpy as np\n'), ((1021, 1050), 'bdpy.create_groupvector', 'bdpy.create_groupvector', (['x', 'y'], {}), '(x, y)\n', (1044, 1050), False, 'import bdpy\n'), ((1060, 1114), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['test_output', 'exp_output'], {}), '(test_output, exp_output)\n', (1089, 1114), True, 'import numpy as np\n'), ((1242, 1261), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1250, 1261), True, 'import numpy as np\n'), ((1274, 1293), 'numpy.array', 'np.array', (['[2, 4, 2]'], {}), '([2, 4, 2])\n', (1282, 1293), True, 'import numpy as np\n'), ((1316, 1350), 'numpy.array', 'np.array', (['[1, 1, 2, 2, 2, 2, 3, 3]'], {}), '([1, 1, 2, 2, 2, 2, 3, 3])\n', (1324, 1350), True, 'import numpy as np\n'), ((1374, 1403), 'bdpy.create_groupvector', 'bdpy.create_groupvector', (['x', 'y'], {}), '(x, y)\n', (1397, 1403), False, 'import bdpy\n'), ((1413, 1467), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['test_output', 'exp_output'], {}), '(test_output, exp_output)\n', (1442, 1467), True, 'import numpy as np\n'), ((1887, 1922), 'bdpy.divide_chunks', 'bdpy.divide_chunks', (['a'], {'chunk_size': '(4)'}), '(a, chunk_size=4)\n', (1905, 1922), False, 'import bdpy\n'), ((2088, 2123), 'bdpy.divide_chunks', 'bdpy.divide_chunks', (['a'], {'chunk_size': '(3)'}), '(a, chunk_size=3)\n', (2106, 2123), False, 'import bdpy\n'), ((2208, 2229), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (2227, 2229), False, 'import unittest\n'), ((2266, 2302), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (2289, 2302), False, 'import unittest\n')] |
#!/usr/bin/env python
#-*-coding:utf-8-*-
from __future__ import print_function
import numpy as np
import tensorflow as tf
import pickle
#np.random.seed(1337)
#tf.set_random_seed(1337)
class Base_Line():
def __init__(self,model_params):
self.hidden_dim = model_params.hidden_dim
self.ques_len = model_params.ques_len
self.ans_len = model_params.ans_len
self.embedding_file = model_params.embedding_file
self.keep_prob = model_params.keep_prob
#self._build_base_line_pointwise()
def _build_base_line_pointwise(self):
with tf.variable_scope('input') as input_l:
self._ques = tf.placeholder(tf.int32,[None,self.ques_len],name='ques_point')
self._ques_len = tf.placeholder(tf.float32,[None,self.ques_len],name='ques_len_point')
self._ans = tf.placeholder(tf.int32,[None,self.ans_len],name='ans_point')
self._ans_len = tf.placeholder(tf.float32,[None,self.ans_len],name='ans_len_point')
self._ques_filter_len = tf.tile(tf.reshape(self._ans_len,[-1,1,self.ans_len]),[1,self.ques_len,1]) # (-1, ques_len, ans_len)
self._ans_filter_len = tf.tile(tf.reshape(self._ques_len,[-1,1,self.ques_len]),[1,self.ans_len,1]) # (-1, ans_len, ques_len, )
self._ques_align_len = tf.tile(tf.reshape(self._ques_len,[-1,self.ques_len,1]),[1,1,self.hidden_dim]) # (-1, ques_len, hd)
self._ans_align_len = tf.tile(tf.reshape(self._ans_len,[-1,self.ans_len,1]),[1,1,self.hidden_dim]) # (-1, ans_len, hd)
self.p_label = tf.placeholder(tf.float32,[None,])
#self.l_label = tf.placeholder(tf.float32,[None,self.list_size])
with tf.name_scope('list_wise'):
with tf.variable_scope('embedding_layer') as embedding_l:
weights = np.load(self.embedding_file)
weights[0] = np.zeros((weights.shape[1]))
embeddings = tf.Variable(weights,dtype=tf.float32)
ques_emb = tf.nn.embedding_lookup(embeddings,self._ques)
ans_emb = tf.nn.embedding_lookup(embeddings,self._ans)
print('ques_emb:',ques_emb.shape)
print('ans_emb',ans_emb.shape)
with tf.variable_scope('preprocess_layer') as prep_l:
sig_den = tf.layers.Dense(self.hidden_dim,activation=tf.sigmoid,name='sigmoid_dense')
tan_den = tf.layers.Dense(self.hidden_dim,activation=tf.tanh,name='tanh_dense')
ques_sig = sig_den(ques_emb)
ques_tan = tan_den(ques_emb)
ques_h = tf.multiply(ques_sig,ques_tan)
ans_sig = sig_den(ans_emb)
ans_tan = tan_den(ans_emb)
ans_h = tf.multiply(ans_sig,ans_tan)
with tf.variable_scope('attention_softalign') as att_align_l:
ques_att_matrix = self.getAttMat(ques_h,ans_h) # (bz, q_len, a_len)
ans_att_matrix = self.getAttMat(ans_h,ques_h) # (bz, a_len, q_len)
print('ques_att_matrix:',ques_att_matrix.shape)
ques_align = self.getAlign(ans_h,ques_att_matrix,self._ques_filter_len)
ans_align = self.getAlign(ques_h,ans_att_matrix,self._ans_filter_len)
print('ques_align:',ques_align.shape)
ques_aligned = tf.multiply(tf.multiply(ques_align,ques_h),self._ques_align_len)
ans_aligned = tf.multiply(tf.multiply(ans_align,ans_h),self._ans_align_len)
with tf.variable_scope('cnn_feature') as cnn_l:
cnn_ques = [tf.layers.Conv1D(self.hidden_dim,i,padding='same',activation=tf.nn.relu,name='q_conv_'+str(i)) for i in range(1,6)]
cnn_ans = [tf.layers.Conv1D(self.hidden_dim,i,padding='same',activation=tf.nn.relu,name='a_conv_'+str(i)) for i in range(1,6)]
ques_cnn = self.conv1d_listwise(cnn_ques,ques_aligned)
ans_cnn = self.conv1d_listwise(cnn_ans,ans_aligned)
print('ques_cnn:',ques_cnn.shape)
with tf.variable_scope('output_layer') as out_l:
ques_o1 = tf.layers.dense(ques_cnn,self.hidden_dim,activation=tf.tanh,name='q_out1')
ans_o1 = tf.layers.dense(ans_cnn,self.hidden_dim,activation=tf.tanh,name='a_out1')
finalo1 = tf.concat([ques_o1,ans_o1],axis=-1)
finalo2 = tf.layers.dense(finalo1,self.hidden_dim,activation=tf.tanh,name='finalout')
self.score = tf.layers.dense(finalo2,1,name='score')
print('score:',self.score.shape)
#with tf.variable_scope('loss') as loss_l:
# self.loss_pointwise = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.p_label,logits=tf.squeeze(self.score,-1)))
# self.loss_listwise = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.l_label,logits=tf.squeeze(self.score,-1)))
# self.total_loss = self.loss_pointwise + self.loss_listwise
# -----------------------------------------------------------------------------------------------------------------
def _build_base_line_listwise(self):
with tf.variable_scope('inputs') as inputs:
self.r_ques = tf.placeholder(tf.int32,[None,None,self.ques_len],name='ques_point')
self.r_ques_len = tf.placeholder(tf.float32,[None,None,self.ques_len],name='ques_len_point')
self.r_ans = tf.placeholder(tf.int32,[None,None,self.ans_len],name='ans_point')
self.r_ans_len = tf.placeholder(tf.float32,[None,None,self.ans_len],name='ans_len_point')
self._ques_filter_len = tf.tile(tf.expand_dims(self.r_ans_len,2),[1,1,self.ques_len,1]) # (bz, ls, q_len, a_len)
self._ans_filter_len = tf.tile(tf.expand_dims(self.r_ques_len,2),[1,1,self.ans_len,1])
self._ques_align_len = tf.tile(tf.expand_dims(self.r_ques_len,3),[1,1,1,self.hidden_dim])
self._ans_align_len = tf.tile(tf.expand_dims(self.r_ans_len,3),[1,1,1,self.hidden_dim])
#self.p_label = tf.placeholder(tf.float32,[None,None])
self.l_label = tf.placeholder(tf.float32,[None,None])
self.is_train = tf.placeholder(tf.bool)
self._ques = self.r_ques
self._ques_len = self.r_ques_len
self._ans = self.r_ans
self._ans_len = self.r_ans_len
batch_size, list_size = tf.shape(self._ans)[0], tf.shape(self._ans)[1]
self.dc = tf.placeholder(tf.bool)
with tf.name_scope('list_wise'):
with tf.variable_scope('embedding_layer') as embedding_l:
with open(self.embedding_file, 'rb') as fin:
weights = pickle.load(fin)
embeddings = tf.Variable(weights,dtype=tf.float32,trainable=False)
ques_emb = tf.nn.embedding_lookup(embeddings,self._ques)
ans_emb = tf.nn.embedding_lookup(embeddings,self._ans)
print('ques_emb:',ques_emb.shape)
print('ans_emb',ans_emb.shape)
with tf.variable_scope('preprocess_layer') as prep_l:
sig_den = tf.layers.Dense(self.hidden_dim,name='q_sigmoid_dense')
tan_den = tf.layers.Dense(self.hidden_dim,name='q_tanh_dense')
#a_sig_den = tf.layers.Dense(self.hidden_dim,name='a_sigmoid_dense')
#a_tan_den = tf.layers.Dense(self.hidden_dim,name='a_tanh_dense')
ques_sig = sig_den(ques_emb)
ques_tan = tan_den(ques_emb)
#ques_sig = tf.layers.batch_normalization(ques_sig,training=self.is_train)
#ques_tan = tf.layers.batch_normalization(ques_tan,training=self.is_train)
ques_sig = tf.sigmoid(ques_sig)
ques_tan = tf.tanh(ques_tan)
ques_h = tf.multiply(ques_sig,ques_tan)
#ques_h = ques_emb
ans_sig = sig_den(ans_emb)
ans_tan = tan_den(ans_emb)
#ans_sig = tf.layers.batch_normalization(ans_sig,training=self.is_train)
#ans_tan = tf.layers.batch_normalization(ans_tan,training=self.is_train)
ans_sig = tf.sigmoid(ans_sig)
ans_tan = tf.tanh(ans_tan)
ans_h = tf.multiply(ans_sig,ans_tan)
#ans_h = ans_emb
with tf.variable_scope('attention_softalign') as att_align_l:
ques_att_matrix = self.getAttMat(ques_h,ans_h)
ans_att_matrix = self.getAttMat(ans_h,ques_h)
print('ques_att_matrix:',ques_att_matrix.shape)
ques_align = self.getAlign(ans_h,ques_att_matrix,self._ques_filter_len) # (bz, ls, _q_len, hd)
ans_align = self.getAlign(ques_h,ans_att_matrix,self._ans_filter_len)
print('ques_align:',ques_align.shape)
ques_aligned = tf.multiply(tf.multiply(ques_align,ques_h),self._ques_align_len)
ans_aligned = tf.multiply(tf.multiply(ans_align,ans_h),self._ans_align_len)
with tf.variable_scope('cnn_feature') as cnn_l:
self.cnn_ques = [tf.layers.Conv1D(self.hidden_dim,i,padding='same',activation=tf.nn.relu,name='q_conv_'+str(i)) for i in range(1,6)]
self.cnn_ans = [tf.layers.Conv1D(self.hidden_dim,i,padding='same',activation=tf.nn.relu,name='a_conv_'+str(i)) for i in range(1,6)]
ques_aligned = tf.reshape(ques_aligned,shape=(-1,self.ques_len,self.hidden_dim))
ans_aligned = tf.reshape(ans_aligned,shape=(-1,self.ans_len,self.hidden_dim))
ques_cnn_filter = tf.reshape(self._ques_align_len,shape=(-1,self.ques_len,self.hidden_dim))
ans_cnn_filter = tf.reshape(self._ans_align_len,shape=(-1,self.ans_len,self.hidden_dim))
ques_cnn = self.conv1d_listwise(self.cnn_ques,ques_aligned,ques_cnn_filter)
ans_cnn = self.conv1d_listwise(self.cnn_ans,ans_aligned,ans_cnn_filter)
ques_cnn = tf.reshape(ques_cnn,shape=(batch_size,list_size,self.hidden_dim*len(self.cnn_ques)))
ans_cnn = tf.reshape(ans_cnn,shape=(batch_size,list_size,self.hidden_dim*len(self.cnn_ans)))
#ques_cnn = tf.concat([self.conv1d_listwise(self.cnn_ques,ques_aligned[:,i,:,:],keep_dims=True) for i in range(ques_aligned.shape[1])],axis=1)
#ans_cnn = tf.concat([self.conv1d_listwise(self.cnn_ans,ans_aligned[:,i,:,:],keep_dims=True) for i in range(ques_aligned.shape[1])],axis=1)
#def _conv1d_listwise(step,sent_cnn,sent_aligned,signal):
# conv1dfn = self.cnn_ques if tf.equal(signal,tf.constant(1)) is not None else self.cnn_ans
# sent_cnn = tf.concat([sent_cnn,self.conv1d_listwise(conv1dfn,sent_aligned[:,step,:,:],True)],1)
# return step+1,sent_cnn,sent_aligned,signal
#ques_cnn = tf.zeros([tf.shape(ques_aligned)[0],1,self.hidden_dim*len(self.cnn_ques)],dtype=tf.float32)
#ans_cnn = tf.zeros([tf.shape(ques_aligned)[0],1,self.hidden_dim*len(self.cnn_ans)],dtype=tf.float32)
#step = tf.constant(0)
#signal = tf.constant(1)
#_,ques_cnn,_,_ = tf.while_loop(cond=lambda step,*_: step<tf.shape(ques_aligned)[1],
# body=_conv1d_listwise,
# loop_vars=[step,ques_cnn,ques_aligned,signal],
# shape_invariants=[step.get_shape(),tf.TensorShape([ques_cnn.shape[0],None,ques_cnn.shape[2]]),ques_aligned.get_shape(),signal.get_shape()])
#step = tf.constant(0)
#signal = tf.constant(0)
#_,ans_cnn,_,_ = tf.while_loop(cond=lambda step,*_: step<tf.shape(ans_aligned)[1],
# body=_conv1d_listwise,
# loop_vars=[step,ans_cnn,ans_aligned,signal],
# shape_invariants=[step.get_shape(),tf.TensorShape([ans_cnn.shape[0],None,ans_cnn.shape[2]]),ans_aligned.get_shape(),signal.get_shape()])
#ques_cnn = ques_cnn[:,1:,:]
#ans_cnn = ans_cnn[:,1:,:]
print('ques_cnn:',ques_cnn.shape)
print('ans_cnn:',ans_cnn.shape )
with tf.variable_scope('output_layer') as out_l:
ques_o1 = tf.layers.dense(ques_cnn,self.hidden_dim,activation=tf.tanh,name='q_out1')
ans_o1 = tf.layers.dense(ans_cnn,self.hidden_dim,activation=tf.tanh,name='a_out1')
finalo1 = tf.concat([ques_o1,ans_o1],axis=-1)
finalo2 = tf.layers.dense(finalo1,self.hidden_dim,activation=tf.tanh,name='finalout')
self.score = tf.layers.dense(finalo2,1,name='score')
print('score:',self.score.shape)
self.logit_score = tf.nn.log_softmax(tf.squeeze(self.score,-1),dim=-1)
print('logit_score:',self.logit_score.shape)
with tf.variable_scope('loss') as loss_l:
#self.loss_pointwise = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.p_label,logits=tf.squeeze(self.score,-1)))
self.loss_listwise = tf.reduce_mean(self.l_label*(tf.log(tf.clip_by_value(self.l_label,1e-5,1.0))-self.logit_score))
#self.total_loss = self.loss_pointwise + self.loss_listwise
@staticmethod
def getAttMat(sent1,sent2):
return tf.matmul(sent1,sent2,transpose_b=True)
@staticmethod
# self.getAlign(ans_h,ques_att_matrix,self._ques_filter_len)
# ans_h: (bz, ls, a_len, hd)
# ques_filter_len: (bz, ls, _q_len, a_len)
# return: (bz, ls, _q_len, hd)
def getAlign(sent,matrix,sent_len):
matrix_e = tf.exp(matrix-tf.reduce_max(matrix,-1,keep_dims=True))
matrix_e_true = tf.multiply(matrix_e,sent_len) # (bz, ls, _q_len, a_len)
matrix_s = tf.reduce_sum(matrix_e_true,-1,keep_dims=True) # (bz, ls, _q_len, 1)
matrix_sm = matrix_e_true/matrix_s # (bz, ls, _q_len, a_len)
return tf.matmul(matrix_sm,sent)
@staticmethod
# ques_cnn = self.conv1d_listwise(self.cnn_ques, ques_aligned, ques_cnn_filter)
def conv1d_listwise(conv1dfn,sent,sent_len,keep_dims=False):
cnn_out = tf.concat([conv1dfn[i](sent)*sent_len for i in range(len(conv1dfn))],axis=-1)
maxpool_out = tf.reduce_max(cnn_out,1,keep_dims=keep_dims)
return maxpool_out
if __name__ == '__main__':
class Model_Param():
batch_size = 10
hidden_dim = 200
list_size = 15
ques_len = 30
ans_len = 40
keep_prob = 0.5
embedding_file = '/data/wikiqa/self/raw/wiki_embedding.pkl'
m_p = Model_Param()
base_line = Base_Line(m_p)
base_line._build_base_line_listwise()
| [
"numpy.load",
"tensorflow.reduce_sum",
"tensorflow.clip_by_value",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.multiply",
"tensorflow.Variable",
"pickle.load",
"tensorflow.reduce_max",
"tensorflow.layers.Dense",
"tensorflow.variable_scope",
"tensorflow.concat",
"tensorflow.placeho... | [((13542, 13583), 'tensorflow.matmul', 'tf.matmul', (['sent1', 'sent2'], {'transpose_b': '(True)'}), '(sent1, sent2, transpose_b=True)\n', (13551, 13583), True, 'import tensorflow as tf\n'), ((13923, 13954), 'tensorflow.multiply', 'tf.multiply', (['matrix_e', 'sent_len'], {}), '(matrix_e, sent_len)\n', (13934, 13954), True, 'import tensorflow as tf\n'), ((14000, 14048), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['matrix_e_true', '(-1)'], {'keep_dims': '(True)'}), '(matrix_e_true, -1, keep_dims=True)\n', (14013, 14048), True, 'import tensorflow as tf\n'), ((14155, 14181), 'tensorflow.matmul', 'tf.matmul', (['matrix_sm', 'sent'], {}), '(matrix_sm, sent)\n', (14164, 14181), True, 'import tensorflow as tf\n'), ((14468, 14514), 'tensorflow.reduce_max', 'tf.reduce_max', (['cnn_out', '(1)'], {'keep_dims': 'keep_dims'}), '(cnn_out, 1, keep_dims=keep_dims)\n', (14481, 14514), True, 'import tensorflow as tf\n'), ((588, 614), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""input"""'], {}), "('input')\n", (605, 614), True, 'import tensorflow as tf\n'), ((652, 718), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.ques_len]'], {'name': '"""ques_point"""'}), "(tf.int32, [None, self.ques_len], name='ques_point')\n", (666, 718), True, 'import tensorflow as tf\n'), ((745, 817), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.ques_len]'], {'name': '"""ques_len_point"""'}), "(tf.float32, [None, self.ques_len], name='ques_len_point')\n", (759, 817), True, 'import tensorflow as tf\n'), ((839, 903), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, self.ans_len]'], {'name': '"""ans_point"""'}), "(tf.int32, [None, self.ans_len], name='ans_point')\n", (853, 903), True, 'import tensorflow as tf\n'), ((929, 999), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.ans_len]'], {'name': '"""ans_len_point"""'}), "(tf.float32, [None, self.ans_len], name='ans_len_point')\n", (943, 999), True, 'import tensorflow as tf\n'), ((1573, 1607), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {}), '(tf.float32, [None])\n', (1587, 1607), True, 'import tensorflow as tf\n'), ((1698, 1724), 'tensorflow.name_scope', 'tf.name_scope', (['"""list_wise"""'], {}), "('list_wise')\n", (1711, 1724), True, 'import tensorflow as tf\n'), ((5182, 5209), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""inputs"""'], {}), "('inputs')\n", (5199, 5209), True, 'import tensorflow as tf\n'), ((5247, 5319), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None, self.ques_len]'], {'name': '"""ques_point"""'}), "(tf.int32, [None, None, self.ques_len], name='ques_point')\n", (5261, 5319), True, 'import tensorflow as tf\n'), ((5346, 5424), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, self.ques_len]'], {'name': '"""ques_len_point"""'}), "(tf.float32, [None, None, self.ques_len], name='ques_len_point')\n", (5360, 5424), True, 'import tensorflow as tf\n'), ((5446, 5516), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None, self.ans_len]'], {'name': '"""ans_point"""'}), "(tf.int32, [None, None, self.ans_len], name='ans_point')\n", (5460, 5516), True, 'import tensorflow as tf\n'), ((5542, 5618), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None, self.ans_len]'], {'name': '"""ans_len_point"""'}), "(tf.float32, [None, None, self.ans_len], name='ans_len_point')\n", (5556, 5618), True, 'import tensorflow as tf\n'), ((6139, 6179), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None]'], {}), '(tf.float32, [None, None])\n', (6153, 6179), True, 'import tensorflow as tf\n'), ((6207, 6230), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {}), '(tf.bool)\n', (6221, 6230), True, 'import tensorflow as tf\n'), ((6498, 6521), 'tensorflow.placeholder', 'tf.placeholder', (['tf.bool'], {}), '(tf.bool)\n', (6512, 6521), True, 'import tensorflow as tf\n'), ((6536, 6562), 'tensorflow.name_scope', 'tf.name_scope', (['"""list_wise"""'], {}), "('list_wise')\n", (6549, 6562), True, 'import tensorflow as tf\n'), ((1042, 1090), 'tensorflow.reshape', 'tf.reshape', (['self._ans_len', '[-1, 1, self.ans_len]'], {}), '(self._ans_len, [-1, 1, self.ans_len])\n', (1052, 1090), True, 'import tensorflow as tf\n'), ((1179, 1229), 'tensorflow.reshape', 'tf.reshape', (['self._ques_len', '[-1, 1, self.ques_len]'], {}), '(self._ques_len, [-1, 1, self.ques_len])\n', (1189, 1229), True, 'import tensorflow as tf\n'), ((1320, 1370), 'tensorflow.reshape', 'tf.reshape', (['self._ques_len', '[-1, self.ques_len, 1]'], {}), '(self._ques_len, [-1, self.ques_len, 1])\n', (1330, 1370), True, 'import tensorflow as tf\n'), ((1455, 1503), 'tensorflow.reshape', 'tf.reshape', (['self._ans_len', '[-1, self.ans_len, 1]'], {}), '(self._ans_len, [-1, self.ans_len, 1])\n', (1465, 1503), True, 'import tensorflow as tf\n'), ((1743, 1779), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""embedding_layer"""'], {}), "('embedding_layer')\n", (1760, 1779), True, 'import tensorflow as tf\n'), ((1822, 1850), 'numpy.load', 'np.load', (['self.embedding_file'], {}), '(self.embedding_file)\n', (1829, 1850), True, 'import numpy as np\n'), ((1880, 1906), 'numpy.zeros', 'np.zeros', (['weights.shape[1]'], {}), '(weights.shape[1])\n', (1888, 1906), True, 'import numpy as np\n'), ((1938, 1976), 'tensorflow.Variable', 'tf.Variable', (['weights'], {'dtype': 'tf.float32'}), '(weights, dtype=tf.float32)\n', (1949, 1976), True, 'import tensorflow as tf\n'), ((2004, 2050), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'self._ques'], {}), '(embeddings, self._ques)\n', (2026, 2050), True, 'import tensorflow as tf\n'), ((2076, 2121), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'self._ans'], {}), '(embeddings, self._ans)\n', (2098, 2121), True, 'import tensorflow as tf\n'), ((2235, 2272), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""preprocess_layer"""'], {}), "('preprocess_layer')\n", (2252, 2272), True, 'import tensorflow as tf\n'), ((2310, 2387), 'tensorflow.layers.Dense', 'tf.layers.Dense', (['self.hidden_dim'], {'activation': 'tf.sigmoid', 'name': '"""sigmoid_dense"""'}), "(self.hidden_dim, activation=tf.sigmoid, name='sigmoid_dense')\n", (2325, 2387), True, 'import tensorflow as tf\n'), ((2412, 2483), 'tensorflow.layers.Dense', 'tf.layers.Dense', (['self.hidden_dim'], {'activation': 'tf.tanh', 'name': '"""tanh_dense"""'}), "(self.hidden_dim, activation=tf.tanh, name='tanh_dense')\n", (2427, 2483), True, 'import tensorflow as tf\n'), ((2598, 2629), 'tensorflow.multiply', 'tf.multiply', (['ques_sig', 'ques_tan'], {}), '(ques_sig, ques_tan)\n', (2609, 2629), True, 'import tensorflow as tf\n'), ((2740, 2769), 'tensorflow.multiply', 'tf.multiply', (['ans_sig', 'ans_tan'], {}), '(ans_sig, ans_tan)\n', (2751, 2769), True, 'import tensorflow as tf\n'), ((2786, 2826), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""attention_softalign"""'], {}), "('attention_softalign')\n", (2803, 2826), True, 'import tensorflow as tf\n'), ((3510, 3542), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cnn_feature"""'], {}), "('cnn_feature')\n", (3527, 3542), True, 'import tensorflow as tf\n'), ((4047, 4080), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""output_layer"""'], {}), "('output_layer')\n", (4064, 4080), True, 'import tensorflow as tf\n'), ((4117, 4194), 'tensorflow.layers.dense', 'tf.layers.dense', (['ques_cnn', 'self.hidden_dim'], {'activation': 'tf.tanh', 'name': '"""q_out1"""'}), "(ques_cnn, self.hidden_dim, activation=tf.tanh, name='q_out1')\n", (4132, 4194), True, 'import tensorflow as tf\n'), ((4217, 4293), 'tensorflow.layers.dense', 'tf.layers.dense', (['ans_cnn', 'self.hidden_dim'], {'activation': 'tf.tanh', 'name': '"""a_out1"""'}), "(ans_cnn, self.hidden_dim, activation=tf.tanh, name='a_out1')\n", (4232, 4293), True, 'import tensorflow as tf\n'), ((4318, 4355), 'tensorflow.concat', 'tf.concat', (['[ques_o1, ans_o1]'], {'axis': '(-1)'}), '([ques_o1, ans_o1], axis=-1)\n', (4327, 4355), True, 'import tensorflow as tf\n'), ((4381, 4459), 'tensorflow.layers.dense', 'tf.layers.dense', (['finalo1', 'self.hidden_dim'], {'activation': 'tf.tanh', 'name': '"""finalout"""'}), "(finalo1, self.hidden_dim, activation=tf.tanh, name='finalout')\n", (4396, 4459), True, 'import tensorflow as tf\n'), ((4486, 4527), 'tensorflow.layers.dense', 'tf.layers.dense', (['finalo2', '(1)'], {'name': '"""score"""'}), "(finalo2, 1, name='score')\n", (4501, 4527), True, 'import tensorflow as tf\n'), ((5660, 5693), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.r_ans_len', '(2)'], {}), '(self.r_ans_len, 2)\n', (5674, 5693), True, 'import tensorflow as tf\n'), ((5785, 5819), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.r_ques_len', '(2)'], {}), '(self.r_ques_len, 2)\n', (5799, 5819), True, 'import tensorflow as tf\n'), ((5885, 5919), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.r_ques_len', '(3)'], {}), '(self.r_ques_len, 3)\n', (5899, 5919), True, 'import tensorflow as tf\n'), ((5986, 6019), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.r_ans_len', '(3)'], {}), '(self.r_ans_len, 3)\n', (6000, 6019), True, 'import tensorflow as tf\n'), ((6581, 6617), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""embedding_layer"""'], {}), "('embedding_layer')\n", (6598, 6617), True, 'import tensorflow as tf\n'), ((6771, 6826), 'tensorflow.Variable', 'tf.Variable', (['weights'], {'dtype': 'tf.float32', 'trainable': '(False)'}), '(weights, dtype=tf.float32, trainable=False)\n', (6782, 6826), True, 'import tensorflow as tf\n'), ((6853, 6899), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'self._ques'], {}), '(embeddings, self._ques)\n', (6875, 6899), True, 'import tensorflow as tf\n'), ((6925, 6970), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['embeddings', 'self._ans'], {}), '(embeddings, self._ans)\n', (6947, 6970), True, 'import tensorflow as tf\n'), ((7084, 7121), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""preprocess_layer"""'], {}), "('preprocess_layer')\n", (7101, 7121), True, 'import tensorflow as tf\n'), ((7159, 7215), 'tensorflow.layers.Dense', 'tf.layers.Dense', (['self.hidden_dim'], {'name': '"""q_sigmoid_dense"""'}), "(self.hidden_dim, name='q_sigmoid_dense')\n", (7174, 7215), True, 'import tensorflow as tf\n'), ((7241, 7294), 'tensorflow.layers.Dense', 'tf.layers.Dense', (['self.hidden_dim'], {'name': '"""q_tanh_dense"""'}), "(self.hidden_dim, name='q_tanh_dense')\n", (7256, 7294), True, 'import tensorflow as tf\n'), ((7762, 7782), 'tensorflow.sigmoid', 'tf.sigmoid', (['ques_sig'], {}), '(ques_sig)\n', (7772, 7782), True, 'import tensorflow as tf\n'), ((7810, 7827), 'tensorflow.tanh', 'tf.tanh', (['ques_tan'], {}), '(ques_tan)\n', (7817, 7827), True, 'import tensorflow as tf\n'), ((7853, 7884), 'tensorflow.multiply', 'tf.multiply', (['ques_sig', 'ques_tan'], {}), '(ques_sig, ques_tan)\n', (7864, 7884), True, 'import tensorflow as tf\n'), ((8210, 8229), 'tensorflow.sigmoid', 'tf.sigmoid', (['ans_sig'], {}), '(ans_sig)\n', (8220, 8229), True, 'import tensorflow as tf\n'), ((8256, 8272), 'tensorflow.tanh', 'tf.tanh', (['ans_tan'], {}), '(ans_tan)\n', (8263, 8272), True, 'import tensorflow as tf\n'), ((8297, 8326), 'tensorflow.multiply', 'tf.multiply', (['ans_sig', 'ans_tan'], {}), '(ans_sig, ans_tan)\n', (8308, 8326), True, 'import tensorflow as tf\n'), ((8376, 8416), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""attention_softalign"""'], {}), "('attention_softalign')\n", (8393, 8416), True, 'import tensorflow as tf\n'), ((9080, 9112), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""cnn_feature"""'], {}), "('cnn_feature')\n", (9097, 9112), True, 'import tensorflow as tf\n'), ((9452, 9520), 'tensorflow.reshape', 'tf.reshape', (['ques_aligned'], {'shape': '(-1, self.ques_len, self.hidden_dim)'}), '(ques_aligned, shape=(-1, self.ques_len, self.hidden_dim))\n', (9462, 9520), True, 'import tensorflow as tf\n'), ((9548, 9614), 'tensorflow.reshape', 'tf.reshape', (['ans_aligned'], {'shape': '(-1, self.ans_len, self.hidden_dim)'}), '(ans_aligned, shape=(-1, self.ans_len, self.hidden_dim))\n', (9558, 9614), True, 'import tensorflow as tf\n'), ((9647, 9723), 'tensorflow.reshape', 'tf.reshape', (['self._ques_align_len'], {'shape': '(-1, self.ques_len, self.hidden_dim)'}), '(self._ques_align_len, shape=(-1, self.ques_len, self.hidden_dim))\n', (9657, 9723), True, 'import tensorflow as tf\n'), ((9754, 9828), 'tensorflow.reshape', 'tf.reshape', (['self._ans_align_len'], {'shape': '(-1, self.ans_len, self.hidden_dim)'}), '(self._ans_align_len, shape=(-1, self.ans_len, self.hidden_dim))\n', (9764, 9828), True, 'import tensorflow as tf\n'), ((12387, 12420), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""output_layer"""'], {}), "('output_layer')\n", (12404, 12420), True, 'import tensorflow as tf\n'), ((12457, 12534), 'tensorflow.layers.dense', 'tf.layers.dense', (['ques_cnn', 'self.hidden_dim'], {'activation': 'tf.tanh', 'name': '"""q_out1"""'}), "(ques_cnn, self.hidden_dim, activation=tf.tanh, name='q_out1')\n", (12472, 12534), True, 'import tensorflow as tf\n'), ((12557, 12633), 'tensorflow.layers.dense', 'tf.layers.dense', (['ans_cnn', 'self.hidden_dim'], {'activation': 'tf.tanh', 'name': '"""a_out1"""'}), "(ans_cnn, self.hidden_dim, activation=tf.tanh, name='a_out1')\n", (12572, 12633), True, 'import tensorflow as tf\n'), ((12658, 12695), 'tensorflow.concat', 'tf.concat', (['[ques_o1, ans_o1]'], {'axis': '(-1)'}), '([ques_o1, ans_o1], axis=-1)\n', (12667, 12695), True, 'import tensorflow as tf\n'), ((12721, 12799), 'tensorflow.layers.dense', 'tf.layers.dense', (['finalo1', 'self.hidden_dim'], {'activation': 'tf.tanh', 'name': '"""finalout"""'}), "(finalo1, self.hidden_dim, activation=tf.tanh, name='finalout')\n", (12736, 12799), True, 'import tensorflow as tf\n'), ((12826, 12867), 'tensorflow.layers.dense', 'tf.layers.dense', (['finalo2', '(1)'], {'name': '"""score"""'}), "(finalo2, 1, name='score')\n", (12841, 12867), True, 'import tensorflow as tf\n'), ((13080, 13105), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""loss"""'], {}), "('loss')\n", (13097, 13105), True, 'import tensorflow as tf\n'), ((13858, 13899), 'tensorflow.reduce_max', 'tf.reduce_max', (['matrix', '(-1)'], {'keep_dims': '(True)'}), '(matrix, -1, keep_dims=True)\n', (13871, 13899), True, 'import tensorflow as tf\n'), ((3348, 3379), 'tensorflow.multiply', 'tf.multiply', (['ques_align', 'ques_h'], {}), '(ques_align, ques_h)\n', (3359, 3379), True, 'import tensorflow as tf\n'), ((3443, 3472), 'tensorflow.multiply', 'tf.multiply', (['ans_align', 'ans_h'], {}), '(ans_align, ans_h)\n', (3454, 3472), True, 'import tensorflow as tf\n'), ((6429, 6448), 'tensorflow.shape', 'tf.shape', (['self._ans'], {}), '(self._ans)\n', (6437, 6448), True, 'import tensorflow as tf\n'), ((6453, 6472), 'tensorflow.shape', 'tf.shape', (['self._ans'], {}), '(self._ans)\n', (6461, 6472), True, 'import tensorflow as tf\n'), ((6725, 6741), 'pickle.load', 'pickle.load', (['fin'], {}), '(fin)\n', (6736, 6741), False, 'import pickle\n'), ((8918, 8949), 'tensorflow.multiply', 'tf.multiply', (['ques_align', 'ques_h'], {}), '(ques_align, ques_h)\n', (8929, 8949), True, 'import tensorflow as tf\n'), ((9013, 9042), 'tensorflow.multiply', 'tf.multiply', (['ans_align', 'ans_h'], {}), '(ans_align, ans_h)\n', (9024, 9042), True, 'import tensorflow as tf\n'), ((12968, 12994), 'tensorflow.squeeze', 'tf.squeeze', (['self.score', '(-1)'], {}), '(self.score, -1)\n', (12978, 12994), True, 'import tensorflow as tf\n'), ((13339, 13381), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.l_label', '(1e-05)', '(1.0)'], {}), '(self.l_label, 1e-05, 1.0)\n', (13355, 13381), True, 'import tensorflow as tf\n')] |
"""
OpenVINO DL Workbench
Rise algorithm implementation
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Tuple, Callable
import numpy as np
from skimage import transform
class _ProgressReporter:
def __init__(self, total_steps: int, progress_cb: Callable[[int], None] = None):
self.prev_progress = 0
self.total_steps = total_steps
self.current_step = 0
self.progress_cb = progress_cb
def next_step(self):
self.current_step += 1
if not self.progress_cb:
return
progress = int(self.current_step * (100 / self.total_steps))
if progress - self.prev_progress >= 1:
self.progress_cb(progress)
self.prev_progress = progress
class RISE:
"""
Rise algorithm implementation
Paper: https://arxiv.org/pdf/1806.07421.pdf
Github: https://github.com/eclique/RISE
"""
NUMBER_OF_RANDOM_MASKS = 2000
GRID_SIZE = 8
BINARY_MASK_PROBABILITY = 0.5
def __init__(self, image_input_size: Tuple[int, int]):
self.image_input_size = image_input_size
def generate_masks(self, step_cb: Callable[[], None] = None) -> np.array:
cell_size = np.ceil(np.array(self.image_input_size) / self.GRID_SIZE)
up_size = (self.GRID_SIZE + 1) * cell_size
grid = np.random.rand(self.NUMBER_OF_RANDOM_MASKS, self.GRID_SIZE, self.GRID_SIZE)
grid = grid < self.BINARY_MASK_PROBABILITY
grid = grid.astype('float32')
masks = np.empty((self.NUMBER_OF_RANDOM_MASKS, *self.image_input_size), dtype='float32')
for i in range(self.NUMBER_OF_RANDOM_MASKS):
# random shifts
# pylint: disable=invalid-name
x = np.random.randint(0, cell_size[0])
# pylint: disable=invalid-name
y = np.random.randint(0, cell_size[1])
# linear upsampling
upsampled = transform.resize(grid[i], up_size, order=1, mode='reflect', anti_aliasing=False)
# cropping
masks[i, :, :] = upsampled[x:x + self.image_input_size[0], y:y + self.image_input_size[1]]
step_cb()
masks = masks.reshape((-1, *self.image_input_size, 1))
return masks
def explain(self, infer: Callable[[np.array], np.array], image: np.array,
progress_cb: Callable[[int], None] = None) -> np.array:
progress_reporter = _ProgressReporter(self.NUMBER_OF_RANDOM_MASKS * 2, progress_cb)
masks = self.generate_masks(progress_reporter.next_step)
predictions = []
# Make sure multiplication is being done for correct axes
# Convert to uint8 after multiplying by float32 mask
masked = (image * masks).astype('uint8')
for i in range(self.NUMBER_OF_RANDOM_MASKS):
prediction = infer(masked[i])
predictions.append(prediction)
progress_reporter.next_step()
predictions = np.array(predictions)
reshaped_mask = masks.reshape(self.NUMBER_OF_RANDOM_MASKS, -1)
explanation_masks = predictions.T.dot(reshaped_mask).reshape(-1, *self.image_input_size)
explanation_masks = explanation_masks / self.NUMBER_OF_RANDOM_MASKS / self.BINARY_MASK_PROBABILITY
return explanation_masks
| [
"numpy.empty",
"numpy.random.randint",
"skimage.transform.resize",
"numpy.array",
"numpy.random.rand"
] | [((1855, 1930), 'numpy.random.rand', 'np.random.rand', (['self.NUMBER_OF_RANDOM_MASKS', 'self.GRID_SIZE', 'self.GRID_SIZE'], {}), '(self.NUMBER_OF_RANDOM_MASKS, self.GRID_SIZE, self.GRID_SIZE)\n', (1869, 1930), True, 'import numpy as np\n'), ((2037, 2122), 'numpy.empty', 'np.empty', (['(self.NUMBER_OF_RANDOM_MASKS, *self.image_input_size)'], {'dtype': '"""float32"""'}), "((self.NUMBER_OF_RANDOM_MASKS, *self.image_input_size), dtype='float32'\n )\n", (2045, 2122), True, 'import numpy as np\n'), ((3475, 3496), 'numpy.array', 'np.array', (['predictions'], {}), '(predictions)\n', (3483, 3496), True, 'import numpy as np\n'), ((2259, 2293), 'numpy.random.randint', 'np.random.randint', (['(0)', 'cell_size[0]'], {}), '(0, cell_size[0])\n', (2276, 2293), True, 'import numpy as np\n'), ((2353, 2387), 'numpy.random.randint', 'np.random.randint', (['(0)', 'cell_size[1]'], {}), '(0, cell_size[1])\n', (2370, 2387), True, 'import numpy as np\n'), ((2444, 2529), 'skimage.transform.resize', 'transform.resize', (['grid[i]', 'up_size'], {'order': '(1)', 'mode': '"""reflect"""', 'anti_aliasing': '(False)'}), "(grid[i], up_size, order=1, mode='reflect', anti_aliasing=False\n )\n", (2460, 2529), False, 'from skimage import transform\n'), ((1738, 1769), 'numpy.array', 'np.array', (['self.image_input_size'], {}), '(self.image_input_size)\n', (1746, 1769), True, 'import numpy as np\n')] |
from __future__ import division
import numpy
# Style index functions
def scale_data(imgband_data, scale_from, scale_to):
sc_min, sc_max = scale_from
tc_min, tc_max = scale_to
clipped = imgband_data.clip(sc_min, sc_max)
normalised = (clipped - sc_min) / (sc_max - sc_min)
scaled = normalised * (tc_max - tc_min)
return scaled + tc_min
def sum_bands(data, band1, band2, band_mapper=None):
if band_mapper:
band1=band_mapper(band1)
band2=band_mapper(band2)
return data[band1] + data[band2]
def delta_bands(data, band1, band2, band_mapper=None):
if band_mapper:
band1=band_mapper(band1)
band2=band_mapper(band2)
typ1 = data[band1].dtype
typ2 = data[band2].dtype
if typ1.name.startswith('uint'):
nodata = data[band1].nodata
data[band1] = data[band1].astype('int32')
data[band1].attrs["nodata"] = nodata
if typ2.name.startswith('uint'):
nodata = data[band2].nodata
data[band2] = data[band2].astype('int32')
data[band2].attrs["nodata"] = nodata
# if typ1.name.startswith('uint') or typ2.name.startswith('uint'):
# data = data.astype('int32', copy=False)
return data[band1] - data[band2]
# N.B. Modifying scale_to would be dangerous - don't do it.
# pylint: disable=dangerous-default-value
def norm_diff(data, band1, band2, band_mapper=None, scale_from=None, scale_to=[0,255]):
# Calculate a normalised difference index.
unscaled = delta_bands(data, band1,band2, band_mapper) / sum_bands(data, band1, band2, band_mapper)
if scale_from:
scaled = scale_data(unscaled, scale_from, scale_to)
else:
scaled = unscaled
return scaled
def constant(data, band, const, band_mapper=None):
# Covert an xarray for a flat constant.
# Useful for displaying mask extents as a flat colour and debugging.
# params is assumed to be a tuple containing a constant value and a band name/alias.
if band_mapper:
band = band_mapper(band)
return data[band] * 0.0 + const
def single_band(data, band, band_mapper=None):
# Use the raw value of a band directly as the index function.
if band_mapper:
band = band_mapper(band)
return data[band]
def band_quotient(data, band1, band2, band_mapper=None, scale_from=None, scale_to=[0,255]):
if band_mapper:
band1=band_mapper(band1)
band2=band_mapper(band2)
unscaled = data[band1] / data[band2]
if scale_from:
scaled = scale_data(unscaled, scale_from, scale_to)
else:
scaled = unscaled
return scaled
def band_quotient_sum(data, band1a, band1b, band2a, band2b, band_mapper=None):
return band_quotient(data, band1a, band1b, band_mapper) + band_quotient(data, band2a, band2b, band_mapper)
def sentinel2_ndci(data, b_red_edge, b_red, b_green, b_swir, band_mapper=None):
red_delta = delta_bands(data, b_red_edge, b_red, band_mapper)
red_sum = sum_bands(data,b_red_edge, b_red, band_mapper)
mndwi = norm_diff(data, b_green, b_swir, band_mapper)
return red_delta / red_sum.where(mndwi > 0.1)
def multi_date_delta(data):
data1, data2 = (data.sel(time=dt) for dt in data.coords["time"].values)
# data1, data2 = data.values.item(0), data.values.item(1)
return data2 - data1
def single_band_log(data, band, scale_factor, exponent, band_mapper=None):
if band_mapper:
band = band_mapper(band)
d = data[band]
return scale_factor * ( (d ** exponent) - 1.0)
def single_band_arcsec(data, band, scale_from=None, scale_to=None, band_mapper=None):
if scale_from is not None and scale_to is None:
scale_to = [0,255]
if band_mapper:
band = band_mapper(band)
d = data[band]
unscaled = numpy.arccos(1/(d + 1))
if scale_from:
return scale_data(unscaled, scale_from, scale_to)
return unscaled
def single_band_offset_log(data, band, scale=1.0, scale_from=None, scale_to=None, offset=None, band_mapper=None):
if scale_from is not None and scale_to is None:
scale_to = [0,255]
if band_mapper:
band = band_mapper(band)
d = data[band]
if offset is not None:
d = data[band] * scale + offset
unscaled = numpy.log(d)
else:
unscaled = numpy.log1p(d*scale)
if scale_from:
return scale_data(unscaled, scale_from, scale_to)
return unscaled
def radar_vegetation_index(data, band_hv, band_hh, band_mapper=None):
if band_mapper:
band_hv = band_mapper(band_hv)
band_hh = band_mapper(band_hh)
hv_sq = data[band_hv]*data[band_hv]
hh_sq = data[band_hh]*data[band_hh]
return (hv_sq * 4.0) / (hh_sq + hv_sq)
| [
"numpy.arccos",
"numpy.log",
"numpy.log1p"
] | [((3760, 3785), 'numpy.arccos', 'numpy.arccos', (['(1 / (d + 1))'], {}), '(1 / (d + 1))\n', (3772, 3785), False, 'import numpy\n'), ((4235, 4247), 'numpy.log', 'numpy.log', (['d'], {}), '(d)\n', (4244, 4247), False, 'import numpy\n'), ((4278, 4300), 'numpy.log1p', 'numpy.log1p', (['(d * scale)'], {}), '(d * scale)\n', (4289, 4300), False, 'import numpy\n')] |
from pathlib import Path
import argparse
import numpy as np
from gym import wrappers
from rl.make_game import make_game
# TODO: Something's wrong with the seed -> Fix it
def visualize(game: str) -> None:
# NOTE: Has to be run from a terminal, not from VS Code!
cwd = Path.cwd()
run_vals = np.load(cwd / f"runs/{game}.npy")
seed = run_vals[0]
actions = run_vals[1:]
Env = make_game(game)
Env.reset()
Env.seed(int(seed))
for a in actions:
try:
Env.step(a)
except AssertionError:
Env.step(int(a))
Env.render()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--game", type=str, default="CartPole-v0", help="Env name")
args = parser.parse_args()
visualize(args.game)
| [
"pathlib.Path.cwd",
"rl.make_game.make_game",
"argparse.ArgumentParser",
"numpy.load"
] | [((277, 287), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (285, 287), False, 'from pathlib import Path\n'), ((303, 336), 'numpy.load', 'np.load', (["(cwd / f'runs/{game}.npy')"], {}), "(cwd / f'runs/{game}.npy')\n", (310, 336), True, 'import numpy as np\n'), ((397, 412), 'rl.make_game.make_game', 'make_game', (['game'], {}), '(game)\n', (406, 412), False, 'from rl.make_game import make_game\n'), ((635, 660), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (658, 660), False, 'import argparse\n')] |
"""
This module contains the change detection algorithm by
Conradsen et al. (2015).
TODO: Make all functions work with xarray Datasets
"""
from ..io import disassemble_complex
from ..filters import BoxcarFilter
from . import ChangeDetection
import numpy as np
import xarray as xr
# Cannot install libgsl-dev on ReadTheDocs.
# So if we are building the documentation ignore the error raised.
try:
from . import _omnibus
except Exception:
import os
if os.environ.get('READTHEDOCS') != 'True':
raise
def _change_detection(ds, alpha=0.01, ml=None, n=1, njobs=1):
"""
Implement the change detection algorithm proposed by Conradsen et al.
(2015).
Parameters
----------
ds : xarray.Dataset
A (multilooked) dataset in covariance matrix format.
alpha : float (0. ... 1.), optional
The significance level (default: 0.01).
ml : int, optional
Multilooking window size. If `None`, no multilooking is performed and
the dataset is assumed to already be multilooked (default: None)
n : int, optional
The number of looks in `ds`. If `ml` is specified this parameter is
ignored (default: 1).
Returns
-------
xarray.DataArray
A boolean DataArray indicating whether a change occurred at each
(y, x, time) coordinate.
"""
ds.persist()
ds_m = disassemble_complex(ds)
# Multilooking
if ml is not None:
ds_m = BoxcarFilter(w=ml).apply(ds_m)
n = ml ** 2
values = ds_m[['C11', 'C12__re', 'C12__im', 'C22']].to_array() \
.transpose('y', 'x', 'time', 'variable').values
change = _omnibus.change_detection(values, alpha=alpha, n=n, njobs=njobs)
coords = ds.coords
dims = ['y', 'x', 'time']
change_arr = xr.DataArray(np.asarray(change, dtype=bool),
dims=dims, coords=coords,
attrs=ds.attrs, name='change')
return change_arr
class OmnibusTest(ChangeDetection):
"""
OmnibusTest
This class implements the change detection algorithm by Conradsen et al.
(2015).
Parameters
----------
ds : xarray.Dataset
A (multilooked) dataset in covariance matrix format.
ml : int, optional
Multilooking window size. By default, no multilooking is performed and
the dataset is assumed to already be multilooked.
n : int, optional
The number of looks in `ds`. If `ml` is specified this parameter is
ignored (default: 1).
alpha : float (0. ... 1.), optional
The significance level (default: 0.01).
kwargs : dict, optional
Extra keyword arguments to be applied to
``ChangeDetection.__init__``.
"""
def __init__(self, ml=None, n=1, alpha=0.01, *args, **kwargs):
self.ml = ml
self.n = n
self.alpha = alpha
super().__init__(*args, **kwargs)
def apply(self, ds):
return _change_detection(ds, alpha=self.alpha, ml=self.ml, n=self.n,
njobs=self.njobs)
| [
"os.environ.get",
"numpy.asarray"
] | [((1794, 1824), 'numpy.asarray', 'np.asarray', (['change'], {'dtype': 'bool'}), '(change, dtype=bool)\n', (1804, 1824), True, 'import numpy as np\n'), ((464, 493), 'os.environ.get', 'os.environ.get', (['"""READTHEDOCS"""'], {}), "('READTHEDOCS')\n", (478, 493), False, 'import os\n')] |
"""Class for loading CelebA dataset.
"""
import torch
from torch.utils.data import Dataset
from torch.utils.data import SubsetRandomSampler, SequentialSampler
import torchvision
from torchvision import transforms
import pandas as pd
from PIL import Image
from skimage import io, transform
from pathlib import Path
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import re
from abc import ABC, abstractmethod
import functools
import random
import time
from data.celeba_plugins.SeqSampler import SeqSampler
from data.celeba_plugins.constr_para_generator_bb import opts2face_bb_rand
from data.celeba_plugins.constr_para_generator_polytope import opts2lm_polytope_rand
from data.celeba_plugins.constr_para_generator_circle_sector import opts2lm_circle_sector_rand
from data.celeba_plugins.lm_ordering import opts2lm_ordering
def opts2celeba_dataset(opts):
"""Creates CelebaDataset object by calling its constructor with options
from opts.
Args:
opts (obj): Namespace object with options.
Returns:
celeba_dataset (obj): Instantiated CelebaDataset object.
"""
#dictionary to map opts2constr_para_generator string to object
opts2constr_para_generator_dict = {
'None': None,
}
if hasattr(opts, 'opts2constr_para_generator'): #opts.model_module == 'constraintnet':
if opts.opts2constr_para_generator == 'opts2face_bb_rand':
opts2constr_para_generator_dict['opts2face_bb_rand'] = opts2face_bb_rand(opts)
if opts.opts2constr_para_generator == 'opts2lm_polytope_rand':
opts2constr_para_generator_dict['opts2lm_polytope_rand'] = opts2lm_polytope_rand(opts)
if opts.opts2constr_para_generator == 'opts2lm_circle_sector_rand':
opts2constr_para_generator_dict['opts2lm_circle_sector_rand'] = opts2lm_circle_sector_rand(opts)
constr_para_generator = None
if hasattr(opts, 'opts2constr_para_generator'): #opts.model_module == 'constraintnet':
constr_para_generator = \
opts2constr_para_generator_dict[opts.opts2constr_para_generator]
opts2y_generator_dict = {
'None': None,
'opts2lm_ordering': opts2lm_ordering(opts)
}
y_generator = opts2y_generator_dict[opts.opts2y_generator]
return CelebaDataset(Path(opts.imgs_dir), Path(opts.lms_file),
opts.preprocess, opts.preprocess_lm_keys, opts.rescale_output_size,
opts.randcovercrop_output_size, opts.randcovercrop_lms_covered,
opts.randcovercrop_padding, opts.randcovercrop_no_rand,
opts.randhorflip_p, opts.normalize_mean, opts.normalize_std,
constr_para_generator, y_generator, opts.rotate_y, opts.sampler)
class CelebaDataset(Dataset):
"""This class implements the abstract base class Dataset for CelebA
dataset.
"""
def __init__(self, imgs_dir, lms_file, preprocess=[], preprocess_lm_keys=[],
rescale_output_size=100, randcovercrop_output_size=100,
randcovercrop_lms_covered=['nose'], randcovercrop_padding=0. ,
randcovercrop_no_rand = False, randhorflip_p = 0. ,
normalize_mean = [0.485, 0.456, 0.406],
normalize_std = [0.229, 0.224, 0.225],
constr_para_generator=None, y_generator=None, rotate_y=0., sampler='all'):
"""Initialization for accessing CelebA dataset.
Args:
imgs_dir (obj): Path (Path object of pathlib) to the images of the
dataset.
lms_file (obj): Path (Path object of pathlib) to the txt file
containing the landmarks.
preprocess (list): List of preprocessing steps.
preprocess_lm_keys (list): List of landmark keys that should be in
the output sample. When no landmarks are specified all
landmarks are considered.
rescale_output_size (int or list with two ints): Image size after
rescaling.
randcovercrop_output_size (int or list with two ints): Image size
after random cover crop.
randcovercrop_lms_covered (list): List of landmark keys that should
be covered by random cover crop.
randcovercrop_padding (float or list of two floats): Padding as
fraction w.r.t. image edge. If one number is specified same
padding ratio is used for x and y dimension. With two numbers
padding rates for both dimensions can be specified separately.
Numbers must be between 0 and 1.
randcovercrop_no_rand (boolean): No random sampling of crop
position.
randhorflip_p (float): Probability for horizontal flip.
normalize_mean (list): Mean value for each channel. Normalization
is computed according to input[channel] = (input[channel] -
mean[channel]) / std[channel]. Default values are for using
pretrained torch models.
normalize_std (list): Standard deviations for each channel.
Normalization is computed according to input[channel] =
(input[channel] - mean[channel]) / std[channel]. Default values
are for using pretrained torch models.
constr_para_generator (obj): If 'None' no constraint parameters are
added. Otherwise this functor object creates constraint
parameters based on the preprocessed sample. The keyword
constr_para is added to the sample with an appropriate one
dimensional torch tensor created by this functor as value.
y_generator (obj): If 'None' no y object is added to the sample and
landmarks can be used. Otherwise the keyword y is added to the
preprocessed sample and the value is generated by this functor.
rotate_matrix (list): List with 4 elements (r_11, r_12, r_21, r_22).
These elements are the entries of a 2x2 rotation matrix. If this
matrix is specified it is applied to y after y_generator was
applied.
"""
self.imgs_dir = imgs_dir
self.lms_file = lms_file
self.preprocess = preprocess
self.preprocess_lm_keys = preprocess_lm_keys
self.rescale_output_size = rescale_output_size
self.randcovercrop_output_size = randcovercrop_output_size
self.randcovercrop_lms_covered = randcovercrop_lms_covered
self.randcovercrop_padding = randcovercrop_padding
self.randcovercrop_no_rand = randcovercrop_no_rand
self.randhorflip_p = randhorflip_p
self.normalize_mean = normalize_mean
self.normalize_std = normalize_std
self.constr_para_generator = constr_para_generator
self.y_generator = y_generator
self.rotate_y = rotate_y
self.lm_frame = pd.read_csv(lms_file, sep='\s+', header=1)
self.all_lm_keys = self._get_all_lm_keys()
self.lm_keys = self._get_lm_keys()
#mapping from self.preprocess instantiation method
self.preprocess_dict = {
'rescale': self._preprocess_rescale,
'randcovercrop': self._preprocess_randcovercrop,
'randhorflip': self._preprocess_randhorflip,
'totensor': self._preprocess_totensor,
'normalize': self._preprocess_normalize,
}
#transformation function if no preprocessing is applied it is None
self.transform = self._preprocess()
def __getitem__(self, idx):
"""Implements sample access with bracket operator.
If dataset is an instance of this class, access datasamples by typing
dataset[idx]. The returned sample is in the following format
{'img': img, 'nose': np.array(nose_x, nose_y), 'lefteye': ...}.
Args:
idx (int): Index for accessing certain sample.
Returns:
sample (dict): Data sample with specified index. Sample is a
dictionary object with an 'img' and 5 landmark keys 'lefteye',
'righteye', 'nose', 'leftmouth', 'rightmouth'. When
constr_para_generator and y_generator are specified the
keywords 'constr_para' and 'y' are created.
"""
sample = {}
img_name = self.lm_frame.iloc[idx].name
img_path = self.imgs_dir / img_name
img = io.imread(img_path)
sample['img'] = img
for lm_key in self.lm_keys:
xy = self.lm_frame.loc[
img_name,
[lm_key + '_x', lm_key+'_y']
]
xy = xy.values
sample[lm_key] = xy
if self.transform:
sample = self.transform(sample)
if not self.constr_para_generator is None:
constr_para = self.constr_para_generator(sample)
sample['constr_para'] = constr_para
if not self.y_generator is None:
y = self.y_generator(sample)
sample['y'] = y
if isinstance(self.rotate_y, list):
if len(self.rotate_y) == 4:
r_11 = self.rotate_y[0]
r_12 = self.rotate_y[1]
r_21 = self.rotate_y[2]
r_22 = self.rotate_y[3]
y_1 = sample['y'][0]
y_2 = sample['y'][1]
sample['y'][0] = y_1*r_11 + y_2*r_12
sample['y'][1] = y_1*r_21 + y_2*r_22
return sample
def __len__(self):
"""Returns the number of samples in the dataset.
Args:
Returns:
len (int): Number of samples in total.
"""
return len(self.lm_frame)
def split(self):
"""Splits indices of the dataset into training, validation and test
part.
Args:
Returns:
split (dict): Dictionary with indice lists for training, validation
and test set. The keys are train, valid and test respectively.
"""
indices_train = list(range(162771))
indices_valid = list(range(162771, 182638))
indices_test = list(range(182638, self.__len__()))
split = {
'train': indices_train,
'valid': indices_valid,
'test': indices_test
}
return split
def get_sampler(self, indices):
"""Returns random sampler for specified indices.
The sampler can be passed to the dataloader for sampling only from the
subset given by indices.
Args:
indices (list): List with indices of subset. Elements must be
within [0, len(dataset)].
Returns:
sampler (obj): Torch.utils.data.SubsetRandomSampler for indices.
"""
sampler = SubsetRandomSampler(indices)
return sampler
def get_fixed_sampler(self, indices):
"""Returns deterministic sampler for specified indices.
The sampler can be passed to the dataloader for sampling only from the
subset given by indices.
Args:
indices (list): List with indices of subset. Elements must be
within [0, len(dataset)].
Returns:
sampler (obj): Torch.utils.data.SequentialSampler for indices.
"""
sampler = SeqSampler(indices)
return sampler
def _get_all_lm_keys(self):
"""Access to all keys in the data sample with respect to landmarks.
The keys related to landmarks are the column names of the landmark
dataset without the _x/_y postfix.
Args:
Returns:
all_lm_keys (list): List of landmark keys in the dataset. E.g.
'nose',... .
"""
all_lm_keys = []
for lm in self.lm_frame.columns:
lm_key = re.sub('\_x', '', lm)
if lm_key + '_x' == lm:
all_lm_keys.append(lm_key)
return all_lm_keys
def _get_lm_keys(self):
"""Read and check landmark keys from options for output datasample.
Returns:
lm_keys (list): List of landmark keys for output datasample.
"""
for lm_key in self.preprocess_lm_keys:
if not lm_key in self.all_lm_keys:
raise TypeError("""Landmark key {lm_key} in
self.preprocess_lm_keys does not exist in CelebA
dataset.""".format(lm_key=lm_key))
if len(self.preprocess_lm_keys) == 0:
return self.all_lm_keys
return self.preprocess_lm_keys
def _preprocess(self):
"""Builds up preprocessing pipeline from specified options.
Returns:
compose (obj): Torchvision.transforms.Compose object representing
the preprocessing pipeline.
"""
if len(self.preprocess) == 0:
return None
pipeline = []
for step in self.preprocess:
if not step in self.preprocess_dict.keys():
raise ValueError("""Step {step} is not defined in
preprocess_dict.""".format(step=step))
processing = self.preprocess_dict[step]()
pipeline.append(processing)
return transforms.Compose(pipeline)
def _preprocess_rescale(self):
"""Instantiate Rescale functor from options.
Returns:
rescale (obj): Instantiated Rescale functor object.
"""
output_size = self.rescale_output_size
output_size = CelebaDataset.list2scalar_tuple(output_size)
return Rescale(output_size, *self.lm_keys)
def _preprocess_randcovercrop(self):
"""Instantiate RandCoverCrop functor from options.
Returns:
randcovercrop (obj): Instantiated RandCoverCrop functor object.
"""
output_size = self.randcovercrop_output_size
output_size = CelebaDataset.list2scalar_tuple(output_size)
lms_covered = self.randcovercrop_lms_covered
padding = self.randcovercrop_padding
padding = CelebaDataset.list2scalar_tuple(padding)
no_rand = self.randcovercrop_no_rand
return RandCoverCrop(output_size, lms_covered, padding, no_rand,
*self.lm_keys)
def _preprocess_randhorflip(self):
"""Instantiate RandHorFlip functor from options.
Returns:
horflip (obj): Instantiated RandHorFlip functor object.
"""
p = self.randhorflip_p
if not 0 <= p <= 1:
raise ValueError('randhorflip_p must between 0 and 1.')
return RandHorFlip(p, *self.lm_keys)
def _preprocess_totensor(self):
"""Instantiate ToTensor functor.
Returns:
totensor (obj): Instantiated ToTensor object.
"""
return ToTensor(*self.lm_keys)
def _preprocess_normalize(self):
"""Instantiate Normalize functor from options.
Returns:
normalize (obj): Instantiated Normalize functor object.
"""
mean = self.normalize_mean
std = self.normalize_std
return Normalize(mean, std, *self.lm_keys)
@staticmethod
def list2scalar_tuple(list_in):
"""Converts list of one element to a scalar and a list of two elements
two a tuple.
Args:
list_in (list): List of one or two elements.
Returns:
scalar_tuple (scalar or tuple): Scalar of type inferred from type
of list elements or tuple of two numbers, depending on length
of list.
"""
scalar_tuple = 0
if isinstance(list_in, (int, float)):
scalar_tuple = list_in
return scalar_tuple
if len(list_in) == 1:
scalar_tuple = list_in[0]
elif len(scalar_tuple) == 2:
scalar_tuple = list_in[0], list_in[1]
else:
raise TypeError("""List which should be converted to scalar or
tuple must contain one or two elements but {n} elements are
given.""".format(n=len(list_in)))
return scalar_tuple
class BaseTrf(ABC):
"""Abstract base class for implementing data transformation functors for
CelebA dataset.
Implements functionality for handling optional landmark keys.
Attributes:
lm_keys (list): List for specifying landmarks. If no landmarks are
specified the landmarks are inferred from the data sample when the
functor is called for the first time.
"""
def __init__(self, *lm_keys):
"""Initialization with option to select only certain landmarks.
Args:
*lm_keys (str): Optional number of keys in datasample for
selecting only specified landmarks. If no landmark keys are
given all landmarks within data sample are considered.
Landmark keys should given without coordinate postfix. E.g.
['nose', 'lefteye'].
"""
self.lm_keys = []
for lm_key in lm_keys:
self.lm_keys.append(lm_key)
@abstractmethod
def __call__(self):
"""Abstract method to make object of class callable (functor).
"""
pass
@staticmethod
def update_lm_keys(func):
"""Decorator for __call__ method for updating attribute self.lm_keys
according to landmark keys given by sample.
If self.lm_keys is empty all landmarks from sample are inferred.
Args:
sample (dict): Dictionary containing the data sample. Contains
the key 'img' and at least one landmark key as e.g. 'nose'.
Returns:
"""
@functools.wraps(func)
def wrapper_update_lm_keys(self, sample):
#if no landmark keys are specified use all landmarks within sample
if len(self.lm_keys) == 0:
for lm_key in sample.keys():
if not lm_key == 'img':
self.lm_keys.append(lm_key)
#check validity of landmark keys
if len(self.lm_keys) == 0:
raise TypeError('Data sample does not contain landmarks.')
for lm_key in self.lm_keys:
if not lm_key in sample.keys():
raise TypeError("""Specified landmark {lm} is not within
given data sample.""".format(lm=lm_key))
#execute wrapped function
value = func(self, sample)
return value
return wrapper_update_lm_keys
class ToTensor(BaseTrf):
"""Acts as functor and converts image and specified landmarks within sample
to tensor.
Image dimensions are flipped from H x W x C to C x H x W and range is
squashed from [0,255] to [0.,1.0]. When no landmarks are specified, all
landmarks within the sample are considered. If no landmark keys are
specified in the constructor, all landmarks within the sample are
transformed. This functionality is handled by parent class BaseTrf.
Returned sample contains image data and specified landmark data.
Attributes:
lm_keys (str): Landmark keys within data sample for which numpy arrays
should be converted.
"""
def __init__(self, *lm_keys):
"""Initialization with option to select only certain landmarks.
Args:
*lm_keys (str): Optional number of keys in datasample for
selecting only specified landmarks. If no landmark keys are
given all landmarks within data sample are considered.
Landmark keys should be given without coordinate postfix. E.g.
['nose', 'lefteye'].
"""
super(ToTensor, self).__init__(*lm_keys)
@BaseTrf.update_lm_keys
def __call__(self, sample):
"""Converts ndarrays within sample to tensors of type Float (not
DoubleFloat).
Flips dimensions of image data from H x W x C to C x H x W and
transforms range [0,255] to [0.,1.0].
Args:
sample (dict): A sample containing numpy arrays.
Returns:
tensor_sample (dict): Converted sample containing tensors of type
Float.
"""
tensor_sample = {}
img = sample['img']
img = img.transpose((2,0,1))
tensor_sample['img'] = torch.from_numpy(img).float()
for lm_key in self.lm_keys:
lm = sample[lm_key]
tensor_sample[lm_key] = torch.from_numpy(lm).float()
return tensor_sample
class RandHorFlip(BaseTrf):
"""This class implements an horizontal flip as a simple data augmentation
technique.
The horizontal flip is performed with a probability of 0.5. If no
horizontal flip is performed the output sample is the unchanged input
sample.
Attributes:
*lm_keys (str): Landmark keys which should be in output data sample.
"""
def __init__(self, p=0.5, *lm_keys):
"""Initialization with option to select only certain landmarks.
Args:
p (float): Probability for horizontal flip.
*lm_keys (str): Optional number of keys in datasample for
selecting only specified landmarks. If no landmark keys are
given all landmarks within data sample are considered.
Landmark keys should be given without coordinate postfix. E.g.
['nose', 'lefteye'].
"""
super(RandHorFlip, self).__init__(*lm_keys)
self.p = p
@BaseTrf.update_lm_keys
def __call__(self, sample):
"""Flips image horizontally.
Args:
sample (dict): A data sample with image and landmark data.
Returns:
flipped_sample (dict): Data sample with flipped image.
"""
p_coin = (self.p, 1.- self.p)
throw_coin = np.random.choice((0,1), p=p_coin)
if throw_coin == 1:
return sample
flipped_sample = {}
img = sample['img']
flipped_img = img[:, ::-1,:]
flipped_sample['img'] = flipped_img.copy()
#landmarks
width = img.shape[1]
for lm_key in self.lm_keys:
lm_x = width - 1 - sample[lm_key][0]
lm_y = sample[lm_key][1]
flipped_sample[lm_key] = np.array((int(lm_x), int(lm_y)))
tmp_lefteye = flipped_sample['lefteye']
flipped_sample['lefteye'] = flipped_sample['righteye']
flipped_sample['righteye'] = tmp_lefteye
tmp_leftmouth = flipped_sample['leftmouth']
flipped_sample['leftmouth'] = flipped_sample['rightmouth']
flipped_sample['rightmouth'] = tmp_leftmouth
return flipped_sample
class Rescale(BaseTrf):
"""Acts as functor and rescales image and specified landmarks within
sample. Image is tranformed to range [0.,1.].
When no landmarks are specified, all landmarks within the sample are
considered. If no landmark keys are specified in the constructor, all
landmarks within the sample are transformed. This functionality is handled
by parent class BaseTrf. Returned sample contains image data and specified
landmark data.
Attributes:
lm_keys (str): Landmark keys within data sample for which numpy arrays
should be converted.
"""
def __init__(self, output_size, *lm_keys):
"""Initialization with option to select only certain landmarks.
Args:
output_size (int or tuple): Output size of image. If output_size is
tuple (h,w) it represents height and width of the output image.
If output_size is int smaller part of height and width is
rescaled and aspect ratio is kept the same.
*lm_keys (str): Optional number of keys in datasample for
selecting only specified landmarks. If no landmark keys are
given all landmarks within data sample are considered.
Landmark keys should be given without coordinate postfix. E.g.
['nose', 'lefteye'].
"""
super(Rescale, self).__init__(*lm_keys)
if not isinstance(output_size, (int, tuple)):
raise TypeError('output_size for rescaling must be int or tuple.')
self.output_size = output_size
@BaseTrf.update_lm_keys
def __call__(self, sample):
"""Rescales image and landmark data.
Args:
sample (dict): A data sample with image and landmark data.
Returns:
rescaled_sample (dict): Rescaled data sample.
"""
rescaled_sample = {}
img = sample['img']
#img in format H x W x C
h_in, w_in = img.shape[:2]
if isinstance(self.output_size, int):
if h_in > w_in:
w_out = self.output_size
h_out = h_in * w_out / w_in
else:
h_out = self.output_size
w_out = w_in * h_out / h_in
else:
h_out, w_out = self.output_size
#image
img = transform.resize(img, (int(h_out), int(w_out)))
rescaled_sample['img'] = img
#landmarks
for lm_key in self.lm_keys:
lm_x = sample[lm_key][0] * w_out / w_in
lm_y = sample[lm_key][1] * h_out / h_in
rescaled_sample[lm_key] = np.array((int(lm_x), int(lm_y)))
return rescaled_sample
class Crop(BaseTrf):
"""Acts as a functor and crops a subimage from specified rectangle region.
Attributes:
lm_keys (list): Landmark keys which should be contained in output
sample.
rec (dict): Representation of rectangle region which should be cropped.
Dictionary with keys 'x_min', 'x_max', 'y_min', 'y_max'.
"""
def __init__(self, rec, *lm_keys):
"""Initialization with option to define landmarks which should be added
to cropped sample.
Args:
rec (dict): Representation of rectangle region which should be
cropped. Dictionary with keys 'x_min', 'x_max', 'y_min',
'y_max'.
*lm_keys (str): Optional number of keys in datasample for
selecting only specified landmarks in returned data sample. If
no landmark keys are given all landmarks within data sample are
considered. Landmark keys should be given without coordinate
postfix. E.g. ['nose', 'lefteye'].
"""
super(Crop, self).__init__(*lm_keys)
self.rec = rec
@BaseTrf.update_lm_keys
def __call__(self, sample):
"""Crops a specified rectangle region from image in sample and converts
landmarks accordingly.
Args:
sample (dict): A data sample with image and landmark data.
Returns:
cropped_sample (dict): Cropped data sample.
"""
cropped_sample = {}
#crop image
img = sample['img']
#img dimensions H x W x C
cropped_img = img[
self.rec['y_min']:self.rec['y_max'],
self.rec['x_min']:self.rec['x_max']
]
cropped_sample['img'] = cropped_img
#transform landmarks accordingly
for lm_key in self.lm_keys:
xy = sample[lm_key]
x_cropped = xy[0] - self.rec['x_min']
y_cropped = xy[1] - self.rec['y_min']
xy_cropped = np.array((x_cropped, y_cropped))
cropped_sample[lm_key] = xy_cropped
return cropped_sample
class RandCoverCrop(BaseTrf):
"""Acts as a functor and crops a subimage with specified size by random
such that all specified landmarks are covered definitely.
For ensuring that all specified landmarks are covered a minimum rectangle
around the landmarks is created and extended by specified padding in x and
y dimension.
Attributes:
lms_covered (list): List with landmarks which should be covered by the
random crop definitely.
output_size_x (int): Width of output image.
output_size_y (int): Heigth of output image.
padding_x (float): Padding in x dimension given as fraction of input
image width.
padding_y (flaot): Padding in y dimension given as fraction of input
image height.
no_rand (boolean): If true, no random sampling for crop position.
Especially for testing.
"""
def __init__(self, output_size, lms_covered, padding=0., no_rand=False,
*lm_keys):
"""Initialization with option to define landmarks which should be
covered for sure and landmarks which should be in output sample.
Args:
output_size (int or tuple): Specifies width and height of output
image. If output_size is int width and height are equal in size
and given by output_size. If outputsize is tuple first entry
defines width and second heigth of output image.
lms_covered (list): Landmarks which are covered in cropped image
definitely. Landmarks should be specified by name without
coordinate postfix, e.g. 'nose'.
padding_ratio (float or tuple): Specifies padding by fraction of
original image dimension. When padding is float both dimensions
uses same padding ratio. With a tuple padding rates can be set
for both dimension separately (padding_x, padding_y). When
padding whould lead to exceeding the image boundaries maximal
padding without this violation is applied.
no_rand (boolean): If True, random sampling for crop position is
switched of and the most central crop position of the valid
range is used. In particular for reproducability in the testing
phase.
*lm_keys (str): Optional number of keys in datasample for
selecting only specified landmarks in returned data sample. If
no landmark keys are given all landmarks within data sample are
considered. Landmark keys should be given without coordinate
postfix. E.g. ['nose', 'lefteye'].
"""
super(RandCoverCrop, self).__init__(*lm_keys)
if isinstance(output_size, int):
self.output_size_x = output_size
self.output_size_y = output_size
else:
self.output_size_x = output_size[0]
self.output_size_y = output_size[1]
self.lms_covered = lms_covered
if isinstance(padding, float):
self.padding_x = padding
self.padding_y = padding
else:
self.padding_x = padding[0]
self.padding_y = padding[1]
self.no_rand = no_rand
@BaseTrf.update_lm_keys
def __call__(self, sample):
"""Crops a subimage with specified size by random such that specified
landmarks are covered definitely.
Args:
sample (dict): A data sample with image and landmark data.
Returns:
cropped_sample (dict): Cropped data sample.
"""
#rectangle region which should be covered definitely
rec_covered = {
'x_min': 0,
'x_max': 0,
'y_min': 0,
'y_max': 0 }
for i, lm_cover in enumerate(self.lms_covered):
if not lm_cover in sample.keys():
raise ValueError("""Specified landmark {lm_cover} which should
be covered by random crop does not exist in data
sample.""".format(lm_cover=lm_cover))
#update rectangle region by landmark positions
x = sample[lm_cover][0]
y = sample[lm_cover][1]
if i==0:
rec_covered['x_min'] = x
rec_covered['x_max'] = x
rec_covered['y_min'] = y
rec_covered['y_max'] = y
else:
#update covered rectangle region
if rec_covered['x_min'] > x:
rec_covered['x_min'] = x
if rec_covered['x_max'] < x:
rec_covered['x_max'] = x
if rec_covered['y_min'] > y:
rec_covered['y_min'] = y
if rec_covered['y_max'] < y:
rec_covered['y_max'] = y
#img dimensions H x W x C
width = sample['img'].shape[1]
height = sample['img'].shape[0]
#extend rectangle region by padding
if not (0 <= self.padding_x <= 1 and 0 <= self.padding_y <= 1):
raise ValueError('Padding must be specified as ratio and be in the ' +
'interval [0., 1.].')
pix_padding_x = int(width * self.padding_x)
pix_padding_y = int(height * self.padding_y)
#clip padding to image boundaries if necessary
rec_covered['x_min'] = max(0, rec_covered['x_min'] - pix_padding_x)
rec_covered['x_max'] = min(width - 1, rec_covered['x_max'] + pix_padding_x)
rec_covered['y_min'] = max(0, rec_covered['y_min'] - pix_padding_y)
rec_covered['y_max'] = min(height -1, rec_covered['y_max'] + pix_padding_y)
crop_range_x = RandCoverCrop.crop_range_1d(
width,
self.output_size_x,
rec_covered['x_min'],
rec_covered['x_max']
)
crop_range_y = RandCoverCrop.crop_range_1d(
height,
self.output_size_y,
rec_covered['y_min'],
rec_covered['y_max']
)
ul_corner_x = 0
ul_corner_y = 0
if self.no_rand:
ul_corner_x = int((crop_range_x[0] + crop_range_x[1]) / 2)
ul_corner_y = int((crop_range_y[0] + crop_range_y[1]) / 2)
else:
#sample upper left corner of cropped subimage
ul_corner_x = random.randint(
crop_range_x[0],
crop_range_x[1]
)
ul_corner_y = random.randint(
crop_range_y[0],
crop_range_y[1]
)
rec_crop = {
'x_min': ul_corner_x,
'x_max': ul_corner_x + self.output_size_x - 1,
'y_min': ul_corner_y,
'y_max': ul_corner_y + self.output_size_y - 1
}
cropping = Crop(rec_crop, *self.lm_keys)
cropped_sample = cropping(sample)
return cropped_sample
@staticmethod
def crop_range_1d(N, n, a, b):
"""Computes the allowed range for the starting point of a cropped image
edge such that it is within original image edge and covers a specified
interval.
Consider the original image edge of length N as an interval [0,N-1].
Now the interval [a,b] which should be covered by the cropped image
edge is within [0,N-1]. The cropped image edge is of length n and has
starting point s, i.e. it corresponds to the interval [s, s+n-1]. This
method returns the maximum interval [l, u] such that cropped image
edges with a starting point s within this interval cover the given
interval [a,b] and are within original image edge interval [0, N-1].
Sketch:
[ s,... , a,... , b, ..., s+n-1 ]
[ a,... , b ]
[ 0,..., s,... , a,... , b ,..., s+n-1,... , N-1 ]
Args:
N (int): Length of the original image edge.
n (int): Length of the cropped image edge.
a (int): Lower bound of interval that should be covered.
b (int): Upper bound of interval that should be covered.
Returns:
crop_range (obj): Range for starting point of cropped image edge.
crop_range is an ndarray of shape (2,) containing the lower and
upper bound [l,u].
"""
#if (a < 0 or b > N-1):
# raise ValueError('Specified interval [a,b] must be within [0,N].')
#if b - a + 1 > n:
# raise ValueError("""Length of cropped image edge n must be greater
# equal length of specified interval b-a+1.""")
#if n > N:
# raise ValueError("""Length of cropped image edge n must be smaller
# equal original image edge length N.""")
#lower and upper bound from limitations of original image edge size
l_N = 0
u_N = N - 1 - (n - 1)
#lower and upper bound for covering interval [a,b]
l_cover = b - (n - 1)
u_cover = a
l = max(l_N, l_cover)
u = min(u_N, u_cover)
#u = a
crop_range = np.array((l,u))
return crop_range
class Normalize(BaseTrf):
"""Acts as functor and normalizes the image.
Normalizes the image value range according to
input[channel] = (input[channel] - mean[channel]) / std[channel]
Attributes:
lm_keys (list): Landmark keys which should be contained in output
sample.
mean (seq): Sequence of mean values for each channel.
std (seq): Sequence of standard deviations for each channel.
"""
def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225],
*lm_keys):
"""Initialization with option to select landmark keys in output sample.
Specify the mean and standard deviation and landmarks for output_sample.
Args:
mean (seq): Sequence of mean values for each channel. Default
values for using pretrained torch models.
std (seq): Sequence of standard deviations for each channel.
Default values for using pretrained torch models.
*lm_keys (str): Optional number of keys in datasample for
selecting only specified landmarks in returned data sample. If
no landmark keys are given all landmarks within data sample are
considered. Landmark keys should be given without coordinate
postfix. E.g. ['nose', 'lefteye'].
"""
super(Normalize, self).__init__(*lm_keys)
self.mean = mean
self.std = std
@BaseTrf.update_lm_keys
def __call__(self, sample):
"""Normalizes the image according to specified mean and std.
Normalizes the image value range according to
input[channel] = (input[channel] - mean[channel]) / std[channel]
Args:
sample (dict): Data sample with image and landmark data.
Returns:
normalized_sample (dict): Data sample with noramlized image.
"""
normalized_sample = {}
#image
img = sample['img']
img = transforms.Normalize(self.mean, self.std)(img)
normalized_sample['img'] = img
#landmarks
for lm_key in self.lm_keys:
normalized_sample[lm_key] = sample[lm_key]
return normalized_sample
| [
"data.celeba_plugins.constr_para_generator_circle_sector.opts2lm_circle_sector_rand",
"data.celeba_plugins.lm_ordering.opts2lm_ordering",
"random.randint",
"pandas.read_csv",
"data.celeba_plugins.SeqSampler.SeqSampler",
"data.celeba_plugins.constr_para_generator_bb.opts2face_bb_rand",
"pathlib.Path",
... | [((2203, 2225), 'data.celeba_plugins.lm_ordering.opts2lm_ordering', 'opts2lm_ordering', (['opts'], {}), '(opts)\n', (2219, 2225), False, 'from data.celeba_plugins.lm_ordering import opts2lm_ordering\n'), ((2332, 2351), 'pathlib.Path', 'Path', (['opts.imgs_dir'], {}), '(opts.imgs_dir)\n', (2336, 2351), False, 'from pathlib import Path\n'), ((2353, 2372), 'pathlib.Path', 'Path', (['opts.lms_file'], {}), '(opts.lms_file)\n', (2357, 2372), False, 'from pathlib import Path\n'), ((6995, 7038), 'pandas.read_csv', 'pd.read_csv', (['lms_file'], {'sep': '"""\\\\s+"""', 'header': '(1)'}), "(lms_file, sep='\\\\s+', header=1)\n", (7006, 7038), True, 'import pandas as pd\n'), ((8571, 8590), 'skimage.io.imread', 'io.imread', (['img_path'], {}), '(img_path)\n', (8580, 8590), False, 'from skimage import io, transform\n'), ((11023, 11051), 'torch.utils.data.SubsetRandomSampler', 'SubsetRandomSampler', (['indices'], {}), '(indices)\n', (11042, 11051), False, 'from torch.utils.data import SubsetRandomSampler, SequentialSampler\n'), ((11550, 11569), 'data.celeba_plugins.SeqSampler.SeqSampler', 'SeqSampler', (['indices'], {}), '(indices)\n', (11560, 11569), False, 'from data.celeba_plugins.SeqSampler import SeqSampler\n'), ((13501, 13529), 'torchvision.transforms.Compose', 'transforms.Compose', (['pipeline'], {}), '(pipeline)\n', (13519, 13529), False, 'from torchvision import transforms\n'), ((18029, 18050), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (18044, 18050), False, 'import functools\n'), ((22325, 22359), 'numpy.random.choice', 'np.random.choice', (['(0, 1)'], {'p': 'p_coin'}), '((0, 1), p=p_coin)\n', (22341, 22359), True, 'import numpy as np\n'), ((37562, 37578), 'numpy.array', 'np.array', (['(l, u)'], {}), '((l, u))\n', (37570, 37578), True, 'import numpy as np\n'), ((1495, 1518), 'data.celeba_plugins.constr_para_generator_bb.opts2face_bb_rand', 'opts2face_bb_rand', (['opts'], {}), '(opts)\n', (1512, 1518), False, 'from data.celeba_plugins.constr_para_generator_bb import opts2face_bb_rand\n'), ((1662, 1689), 'data.celeba_plugins.constr_para_generator_polytope.opts2lm_polytope_rand', 'opts2lm_polytope_rand', (['opts'], {}), '(opts)\n', (1683, 1689), False, 'from data.celeba_plugins.constr_para_generator_polytope import opts2lm_polytope_rand\n'), ((1842, 1874), 'data.celeba_plugins.constr_para_generator_circle_sector.opts2lm_circle_sector_rand', 'opts2lm_circle_sector_rand', (['opts'], {}), '(opts)\n', (1868, 1874), False, 'from data.celeba_plugins.constr_para_generator_circle_sector import opts2lm_circle_sector_rand\n'), ((12072, 12094), 're.sub', 're.sub', (['"""\\\\_x"""', '""""""', 'lm'], {}), "('\\\\_x', '', lm)\n", (12078, 12094), False, 'import re\n'), ((27964, 27996), 'numpy.array', 'np.array', (['(x_cropped, y_cropped)'], {}), '((x_cropped, y_cropped))\n', (27972, 27996), True, 'import numpy as np\n'), ((34668, 34716), 'random.randint', 'random.randint', (['crop_range_x[0]', 'crop_range_x[1]'], {}), '(crop_range_x[0], crop_range_x[1])\n', (34682, 34716), False, 'import random\n'), ((34806, 34854), 'random.randint', 'random.randint', (['crop_range_y[0]', 'crop_range_y[1]'], {}), '(crop_range_y[0], crop_range_y[1])\n', (34820, 34854), False, 'import random\n'), ((39635, 39676), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['self.mean', 'self.std'], {}), '(self.mean, self.std)\n', (39655, 39676), False, 'from torchvision import transforms\n'), ((20788, 20809), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (20804, 20809), False, 'import torch\n'), ((20922, 20942), 'torch.from_numpy', 'torch.from_numpy', (['lm'], {}), '(lm)\n', (20938, 20942), False, 'import torch\n')] |
from fileinput import filename
from genericpath import isfile
from os.path import join
from os import listdir
import csv, cv2 , random, torch
import scipy.io as sio
import numpy as np
import sys
from torch import tensor
from facenet_pytorch import MTCNN, InceptionResnetV1
random.seed()
print("Preparing the data...")
filename_list = [f for f in listdir('celba_limited') if isfile(join('celba_limited',f))]
count=[]
for filename in filename_list:
label=int(filename.split('_')[0])
while label > len(count):
count.append(0)
count[label-1] += 1
train_images=[]
train_labels=[]
test_images=[]
test_labels=[]
t_train_images=[]
t_test_images=[]
for filename in filename_list:
label=int(filename.split('_')[0])
if count[label-1] > 19 :
image = cv2.imread('celba_limited/'+filename)
resized_image = cv2.resize(image, (160, 160),)
#Preparing data for DNN
float_image=resized_image/255.0
tensor_image = torch.from_numpy(float_image).permute(2, 0, 1).float()
#Preparing data for LBPH
gray = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY)
if random.random() > 0.25:
t_train_images.append(tensor_image)
train_images.append(gray)
train_labels.append(label)
else:
t_test_images.append(tensor_image)
test_images.append(gray)
test_labels.append(label)
print("Train images: ", len(train_images))
print("Processing images to the DNN...")
mtcnn = MTCNN(
image_size=160, margin=0, min_face_size=20,
thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=True,
device='cpu'
)
resnet = InceptionResnetV1(pretrained='vggface2').eval().to('cpu')
t_train_images = torch.stack(t_train_images).to('cpu')
embeddings_train = resnet(t_train_images).detach().cpu().numpy()
t_test_images = torch.stack(t_test_images).to('cpu')
embeddings_test = resnet(t_test_images).detach().cpu().numpy()
#Create a SVM
print('Train SVM')
svm = cv2.ml.SVM_create()
svm.setType(cv2.ml.SVM_C_SVC)
svm.setKernel(cv2.ml.SVM_LINEAR)
svm.setTermCriteria((cv2.TERM_CRITERIA_MAX_ITER, 100, 1e-6))
svm.train(np.array(embeddings_train), cv2.ml.ROW_SAMPLE, np.array(train_labels))
svm_predicted = []
for image in embeddings_test:
image = image.reshape(1, -1)
svm_predicted.append(svm.predict(np.array(image)))
svm_correct = 0
for i in range(len(test_images)):
if test_labels[i] == svm_predicted[i][1][0][0]:
svm_correct += 1
print("DNN+SVM Face Recognition Model accuracy: ", svm_correct/len(test_images))
#Train the LBPH face recognition model
LBPH_model = cv2.face.LBPHFaceRecognizer_create()
LBPH_model.train(np.array(train_images), np.array(train_labels))
LBPH_model_predicted = []
print("LBPH Face Recognition Model prediction")
for img in test_images:
LBPH_model_predicted.append(LBPH_model.predict(img))
LBPH_correct = 0
for i in range(len(test_images)):
if test_labels[i] == LBPH_model_predicted[i][0]:
LBPH_correct += 1
print("LBPH Face Recognition Model accuracy: ", LBPH_correct/len(test_images))
print("Done")
| [
"cv2.face.LBPHFaceRecognizer_create",
"torch.stack",
"cv2.cvtColor",
"cv2.ml.SVM_create",
"cv2.imread",
"fileinput.filename.split",
"random.random",
"random.seed",
"numpy.array",
"facenet_pytorch.InceptionResnetV1",
"facenet_pytorch.MTCNN",
"os.path.join",
"os.listdir",
"cv2.resize",
"to... | [((276, 289), 'random.seed', 'random.seed', ([], {}), '()\n', (287, 289), False, 'import csv, cv2, random, torch\n'), ((1532, 1661), 'facenet_pytorch.MTCNN', 'MTCNN', ([], {'image_size': '(160)', 'margin': '(0)', 'min_face_size': '(20)', 'thresholds': '[0.6, 0.7, 0.7]', 'factor': '(0.709)', 'post_process': '(True)', 'device': '"""cpu"""'}), "(image_size=160, margin=0, min_face_size=20, thresholds=[0.6, 0.7, 0.7\n ], factor=0.709, post_process=True, device='cpu')\n", (1537, 1661), False, 'from facenet_pytorch import MTCNN, InceptionResnetV1\n'), ((2019, 2038), 'cv2.ml.SVM_create', 'cv2.ml.SVM_create', ([], {}), '()\n', (2036, 2038), False, 'import csv, cv2, random, torch\n'), ((2651, 2687), 'cv2.face.LBPHFaceRecognizer_create', 'cv2.face.LBPHFaceRecognizer_create', ([], {}), '()\n', (2685, 2687), False, 'import csv, cv2, random, torch\n'), ((2175, 2201), 'numpy.array', 'np.array', (['embeddings_train'], {}), '(embeddings_train)\n', (2183, 2201), True, 'import numpy as np\n'), ((2222, 2244), 'numpy.array', 'np.array', (['train_labels'], {}), '(train_labels)\n', (2230, 2244), True, 'import numpy as np\n'), ((2705, 2727), 'numpy.array', 'np.array', (['train_images'], {}), '(train_images)\n', (2713, 2727), True, 'import numpy as np\n'), ((2729, 2751), 'numpy.array', 'np.array', (['train_labels'], {}), '(train_labels)\n', (2737, 2751), True, 'import numpy as np\n'), ((351, 375), 'os.listdir', 'listdir', (['"""celba_limited"""'], {}), "('celba_limited')\n", (358, 375), False, 'from os import listdir\n'), ((793, 832), 'cv2.imread', 'cv2.imread', (["('celba_limited/' + filename)"], {}), "('celba_limited/' + filename)\n", (803, 832), False, 'import csv, cv2, random, torch\n'), ((856, 885), 'cv2.resize', 'cv2.resize', (['image', '(160, 160)'], {}), '(image, (160, 160))\n', (866, 885), False, 'import csv, cv2, random, torch\n'), ((1091, 1138), 'cv2.cvtColor', 'cv2.cvtColor', (['resized_image', 'cv2.COLOR_BGR2GRAY'], {}), '(resized_image, cv2.COLOR_BGR2GRAY)\n', (1103, 1138), False, 'import csv, cv2, random, torch\n'), ((1757, 1784), 'torch.stack', 'torch.stack', (['t_train_images'], {}), '(t_train_images)\n', (1768, 1784), False, 'import csv, cv2, random, torch\n'), ((1877, 1903), 'torch.stack', 'torch.stack', (['t_test_images'], {}), '(t_test_images)\n', (1888, 1903), False, 'import csv, cv2, random, torch\n'), ((386, 410), 'os.path.join', 'join', (['"""celba_limited"""', 'f'], {}), "('celba_limited', f)\n", (390, 410), False, 'from os.path import join\n'), ((467, 486), 'fileinput.filename.split', 'filename.split', (['"""_"""'], {}), "('_')\n", (481, 486), False, 'from fileinput import filename\n'), ((722, 741), 'fileinput.filename.split', 'filename.split', (['"""_"""'], {}), "('_')\n", (736, 741), False, 'from fileinput import filename\n'), ((1151, 1166), 'random.random', 'random.random', ([], {}), '()\n', (1164, 1166), False, 'import csv, cv2, random, torch\n'), ((2368, 2383), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2376, 2383), True, 'import numpy as np\n'), ((1681, 1721), 'facenet_pytorch.InceptionResnetV1', 'InceptionResnetV1', ([], {'pretrained': '"""vggface2"""'}), "(pretrained='vggface2')\n", (1698, 1721), False, 'from facenet_pytorch import MTCNN, InceptionResnetV1\n'), ((986, 1015), 'torch.from_numpy', 'torch.from_numpy', (['float_image'], {}), '(float_image)\n', (1002, 1015), False, 'import csv, cv2, random, torch\n')] |
# -*- coding: utf-8 -*-
#Estimate head pose according to the facial landmarks"""
import cv2
import numpy as np
import os
actor_height = 157
class PoseEstimator:
"""Estimate head pose according to the facial landmarks"""
"""
(0.0, 0.0, 0.0), # Nose tip
#(0.0, -330.0, -65.0), # Chin
(-2.42, 2.42, -2.42), # Left eye center
(2.42, 2.42, -2.42), # Right eye center
#(-150.0, -150.0, -125.0), # Mouth left corner
#(150.0, -150.0, -125.0) # Mouth right corner
(-4.48, 1.21, -12.1), # Left ear
(4.48, 1.21, -12.1), # Right ear
#(0.0, 170.0, -140.0), # middle point between left and right eye
#(-300.0, -180.0, -70.0), # Left shoulder
#(300.0, -180.0, -70.0), # Right shoulders
(0.0, 0.0, 0.0), # Nose tip
(-2.15, 1.70, -1.35), # Left eye center
(2.15, 1.70, -1.35), # Right eye center
(-4.30, 0.85, -5.40), # Left ear
(4.30, 0.85, -5.40), # Right ear
#(-300.0, -180.0, -70.0), # Left shoulder
#(300.0, -180.0, -70.0), # Right shoulders
JP's model:
(0.0, 4.7, -1.7), # Nose tip
(-3.5, 0, 1.5), # Left eye center
(3.5, 0, 1.5), # Right eye center
(-8.5, 4.0, 11.8), # Left ear
(8.5, 4.0, 11.8), # Right ear
Hoa's model'
(0.0, 4.0, -1.3), # Nose tip
(-3.0, 0, 0.5), # Left eye center
(3.0, 0, 0.5), # Right eye center
(-6.5, 3.0, 6), # Left ear
(6.5, 3.0, 6), # Right ear
"""
def __init__(self, img_size=(480, 640)):
self.size = img_size
#x:right, y:up, z:forward
#Unit = cm
# 3D model points.
self.model_points = np.array([
(0.0, 1.7, -1.35), # Nose tip
(-2.15, 0, 1.35), # Left eye center
(2.15, 0, 1.35), # Right eye center
(-4.30, 0.85, 5.40), # Left ear
(4.30, 0.85, 5.40), # Right ear
])
self.body_points = np.array([
(-0.1295, 0.0, 0.0), #left shoulder
(0.1295, 0.0, 0.0), #right shoulder
(-0.0955, 0.288, 0.0), #left hip
(0.0955, 0.288, 0.0), #right hip
])*actor_height
self.model_points_68 = self._get_full_model_points()
# Camera internals
self.focal_length = self.size[1]
self.camera_center = (self.size[1] / 2, self.size[0] / 2)
self.camera_matrix = np.array(
[[self.focal_length, 0, self.camera_center[0]],
[0, self.focal_length, self.camera_center[1]],
[0, 0, 1]], dtype="double")
self.camera_matrix_body = np.array(
[[self.focal_length, 0, self.camera_center[0]],
[0, self.focal_length, self.camera_center[1]],
[0, 0, 1]], dtype="double")
# Assuming no lens distortion
self.dist_coeefs = np.zeros((4, 1))
# Rotation vector and translation vector
self.r_vec = np.array([[0.01891013], [0.08560084], [-3.14392813]])
self.t_vec = np.array(
[[-14.97821226], [-10.62040383], [-2053.03596872]])
# self.r_vec = None
# self.t_vec = None
def _get_full_model_points(self, filename=os.path.join(os.getcwd(), 'lib', 'estimator', 'assets/model.txt')):
"""Get all 68 3D model points from file"""
raw_value = []
with open(filename) as file:
for line in file:
raw_value.append(line)
model_points = np.array(raw_value, dtype=np.float32)
model_points = np.reshape(model_points, (3, -1)).T
# Transform the model into a front view.
model_points[:, 2] *= -1
return model_points
def show_3d_model(self):
from matplotlib import pyplot
from mpl_toolkits.mplot3d import Axes3D
fig = pyplot.figure()
ax = Axes3D(fig)
x = self.model_points_68[:, 0]
y = self.model_points_68[:, 1]
z = self.model_points_68[:, 2]
ax.scatter(x, y, z)
ax.axis('square')
pyplot.xlabel('x')
pyplot.ylabel('y')
pyplot.show()
def solve_pose(self, image_points, body=False):
"""
Solve pose from image points
Return (rotation_vector, translation_vector) as pose.
"""
#assert image_points.shape[0] == self.model_points_68.shape[0], "3D points and 2D points should be of same number."
#print("points", self.model_points.shape, image_points.shape)
#(_, rotation_vector, translation_vector) = cv2.solvePnP(
# self.model_points, image_points, self.camera_matrix, self.dist_coeefs, None, None, False, cv2.SOLVEPNP_UPNP)
model_points = self.model_points
if body:
model_points = self.body_points
#print(image_points.shape, model_points.shape)
(_, rotation_vector, translation_vector, _) = cv2.solvePnPRansac(
model_points, image_points, self.camera_matrix, self.dist_coeefs, None, None, False, 200, 12, 0.6, None, cv2.SOLVEPNP_UPNP)
# (success, rotation_vector, translation_vector) = cv2.solvePnP(
# self.model_points,
# image_points,
# self.camera_matrix,
# self.dist_coeefs,
# rvec=self.r_vec,
# tvec=self.t_vec,
# useExtrinsicGuess=True)
return rotation_vector, translation_vector
def solve_pose_by_68_points(self, image_points):
"""
Solve pose from all the 68 image points
Return (rotation_vector, translation_vector) as pose.
"""
if self.r_vec is None:
(_, rotation_vector, translation_vector) = cv2.solvePnP(
self.model_points_68, image_points, self.camera_matrix, self.dist_coeefs)
self.r_vec = rotation_vector
self.t_vec = translation_vector
(_, rotation_vector, translation_vector) = cv2.solvePnP(
self.model_points_68,
image_points,
self.camera_matrix,
self.dist_coeefs,
rvec=self.r_vec,
tvec=self.t_vec,
useExtrinsicGuess=True)
return (rotation_vector, translation_vector)
def draw_annotation_box(self, image, rotation_vector, translation_vector, color=(255, 255, 255), line_width=2):
"""Draw a 3D box as annotation of pose"""
#print("rotation", rotation_vector)
#print("translation", translation_vector)
point_3d = []
rear_size = 75
rear_depth = 0
point_3d.append((-rear_size, -rear_size, rear_depth))
point_3d.append((-rear_size, rear_size, rear_depth))
point_3d.append((rear_size, rear_size, rear_depth))
point_3d.append((rear_size, -rear_size, rear_depth))
point_3d.append((-rear_size, -rear_size, rear_depth))
front_size = 100
front_depth = 100
point_3d.append((-front_size, -front_size, front_depth))
point_3d.append((-front_size, front_size, front_depth))
point_3d.append((front_size, front_size, front_depth))
point_3d.append((front_size, -front_size, front_depth))
point_3d.append((-front_size, -front_size, front_depth))
point_3d = np.array(point_3d, dtype=np.float).reshape(-1, 3)
# Map to 2d image points
(point_2d, _) = cv2.projectPoints(point_3d,
rotation_vector,
translation_vector,
self.camera_matrix,
self.dist_coeefs)
point_2d = np.int32(point_2d.reshape(-1, 2))
# Draw all the lines
cv2.polylines(image, [point_2d], True, color, line_width, cv2.LINE_AA)
cv2.line(image, tuple(point_2d[1]), tuple(
point_2d[6]), color, line_width, cv2.LINE_AA)
cv2.line(image, tuple(point_2d[2]), tuple(
point_2d[7]), color, line_width, cv2.LINE_AA)
cv2.line(image, tuple(point_2d[3]), tuple(
point_2d[8]), color, line_width, cv2.LINE_AA)
def draw_axis(self, img, R, t): #x is red, y is green, z is blue
points = np.float32(
[[30, 0, 0], [0, 30, 0], [0, 0, 30], [0, 0, 0]])#.reshape(-1, 3)
axisPoints, _ = cv2.projectPoints(
points, R, t, self.camera_matrix, self.dist_coeefs)
#print(axisPoints[0], axisPoints[1], axisPoints[2], axisPoints[3])
img = cv2.line(img, tuple(axisPoints[3].ravel()), tuple(
axisPoints[0].ravel()), (255, 0, 0), 3)
img = cv2.line(img, tuple(axisPoints[3].ravel()), tuple(
axisPoints[1].ravel()), (0, 255, 0), 3)
img = cv2.line(img, tuple(axisPoints[3].ravel()), tuple(
axisPoints[2].ravel()), (0, 0, 255), 3)
def draw_axes(self, img, R, t):
img = cv2.drawFrameAxes(img, self.camera_matrix, self.dist_coeefs, R, t, 30)
def evaluation(self, img, points, R, t, body=False):
model_points = self.model_points
if body:
model_points = self.body_points
pred_points = cv2.projectPoints(model_points, R, t, self.camera_matrix, self.dist_coeefs)[0]
for p in pred_points:
cv2.circle(img, (int(p[0][0]), int(p[0][1])), 3, (0,0,255), -1)
mses = ((points.reshape([int(points.shape[0]/2), 2]) - pred_points.reshape([int(points.shape[0]/2), 2]))**2).mean()
#print("MSE: mses", mses)
def get_pose_marks(self, marks):
"""Get marks ready for pose estimation from 68 marks"""
pose_marks = []
pose_marks.append(marks[30]) # Nose tip
pose_marks.append(marks[8]) # Chin
pose_marks.append(marks[36]) # Left eye left corner
pose_marks.append(marks[45]) # Right eye right corner
pose_marks.append(marks[48]) # Mouth left corner
pose_marks.append(marks[54]) # Mouth right corner
return pose_marks | [
"matplotlib.pyplot.show",
"mpl_toolkits.mplot3d.Axes3D",
"cv2.polylines",
"os.getcwd",
"cv2.solvePnP",
"numpy.float32",
"numpy.zeros",
"cv2.solvePnPRansac",
"cv2.projectPoints",
"matplotlib.pyplot.figure",
"numpy.array",
"cv2.drawFrameAxes",
"numpy.reshape",
"matplotlib.pyplot.ylabel",
"... | [((2044, 2149), 'numpy.array', 'np.array', (['[(0.0, 1.7, -1.35), (-2.15, 0, 1.35), (2.15, 0, 1.35), (-4.3, 0.85, 5.4), (\n 4.3, 0.85, 5.4)]'], {}), '([(0.0, 1.7, -1.35), (-2.15, 0, 1.35), (2.15, 0, 1.35), (-4.3, 0.85,\n 5.4), (4.3, 0.85, 5.4)])\n', (2052, 2149), True, 'import numpy as np\n'), ((2821, 2957), 'numpy.array', 'np.array', (['[[self.focal_length, 0, self.camera_center[0]], [0, self.focal_length, self\n .camera_center[1]], [0, 0, 1]]'], {'dtype': '"""double"""'}), "([[self.focal_length, 0, self.camera_center[0]], [0, self.\n focal_length, self.camera_center[1]], [0, 0, 1]], dtype='double')\n", (2829, 2957), True, 'import numpy as np\n'), ((3035, 3171), 'numpy.array', 'np.array', (['[[self.focal_length, 0, self.camera_center[0]], [0, self.focal_length, self\n .camera_center[1]], [0, 0, 1]]'], {'dtype': '"""double"""'}), "([[self.focal_length, 0, self.camera_center[0]], [0, self.\n focal_length, self.camera_center[1]], [0, 0, 1]], dtype='double')\n", (3043, 3171), True, 'import numpy as np\n'), ((3272, 3288), 'numpy.zeros', 'np.zeros', (['(4, 1)'], {}), '((4, 1))\n', (3280, 3288), True, 'import numpy as np\n'), ((3360, 3413), 'numpy.array', 'np.array', (['[[0.01891013], [0.08560084], [-3.14392813]]'], {}), '([[0.01891013], [0.08560084], [-3.14392813]])\n', (3368, 3413), True, 'import numpy as np\n'), ((3435, 3495), 'numpy.array', 'np.array', (['[[-14.97821226], [-10.62040383], [-2053.03596872]]'], {}), '([[-14.97821226], [-10.62040383], [-2053.03596872]])\n', (3443, 3495), True, 'import numpy as np\n'), ((3883, 3920), 'numpy.array', 'np.array', (['raw_value'], {'dtype': 'np.float32'}), '(raw_value, dtype=np.float32)\n', (3891, 3920), True, 'import numpy as np\n'), ((4222, 4237), 'matplotlib.pyplot.figure', 'pyplot.figure', ([], {}), '()\n', (4235, 4237), False, 'from matplotlib import pyplot\n'), ((4251, 4262), 'mpl_toolkits.mplot3d.Axes3D', 'Axes3D', (['fig'], {}), '(fig)\n', (4257, 4262), False, 'from mpl_toolkits.mplot3d import Axes3D\n'), ((4444, 4462), 'matplotlib.pyplot.xlabel', 'pyplot.xlabel', (['"""x"""'], {}), "('x')\n", (4457, 4462), False, 'from matplotlib import pyplot\n'), ((4471, 4489), 'matplotlib.pyplot.ylabel', 'pyplot.ylabel', (['"""y"""'], {}), "('y')\n", (4484, 4489), False, 'from matplotlib import pyplot\n'), ((4498, 4511), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (4509, 4511), False, 'from matplotlib import pyplot\n'), ((5312, 5459), 'cv2.solvePnPRansac', 'cv2.solvePnPRansac', (['model_points', 'image_points', 'self.camera_matrix', 'self.dist_coeefs', 'None', 'None', '(False)', '(200)', '(12)', '(0.6)', 'None', 'cv2.SOLVEPNP_UPNP'], {}), '(model_points, image_points, self.camera_matrix, self.\n dist_coeefs, None, None, False, 200, 12, 0.6, None, cv2.SOLVEPNP_UPNP)\n', (5330, 5459), False, 'import cv2\n'), ((6336, 6485), 'cv2.solvePnP', 'cv2.solvePnP', (['self.model_points_68', 'image_points', 'self.camera_matrix', 'self.dist_coeefs'], {'rvec': 'self.r_vec', 'tvec': 'self.t_vec', 'useExtrinsicGuess': '(True)'}), '(self.model_points_68, image_points, self.camera_matrix, self.\n dist_coeefs, rvec=self.r_vec, tvec=self.t_vec, useExtrinsicGuess=True)\n', (6348, 6485), False, 'import cv2\n'), ((7755, 7862), 'cv2.projectPoints', 'cv2.projectPoints', (['point_3d', 'rotation_vector', 'translation_vector', 'self.camera_matrix', 'self.dist_coeefs'], {}), '(point_3d, rotation_vector, translation_vector, self.\n camera_matrix, self.dist_coeefs)\n', (7772, 7862), False, 'import cv2\n'), ((8117, 8187), 'cv2.polylines', 'cv2.polylines', (['image', '[point_2d]', '(True)', 'color', 'line_width', 'cv2.LINE_AA'], {}), '(image, [point_2d], True, color, line_width, cv2.LINE_AA)\n', (8130, 8187), False, 'import cv2\n'), ((8602, 8661), 'numpy.float32', 'np.float32', (['[[30, 0, 0], [0, 30, 0], [0, 0, 30], [0, 0, 0]]'], {}), '([[30, 0, 0], [0, 30, 0], [0, 0, 30], [0, 0, 0]])\n', (8612, 8661), True, 'import numpy as np\n'), ((8716, 8785), 'cv2.projectPoints', 'cv2.projectPoints', (['points', 'R', 't', 'self.camera_matrix', 'self.dist_coeefs'], {}), '(points, R, t, self.camera_matrix, self.dist_coeefs)\n', (8733, 8785), False, 'import cv2\n'), ((9286, 9356), 'cv2.drawFrameAxes', 'cv2.drawFrameAxes', (['img', 'self.camera_matrix', 'self.dist_coeefs', 'R', 't', '(30)'], {}), '(img, self.camera_matrix, self.dist_coeefs, R, t, 30)\n', (9303, 9356), False, 'import cv2\n'), ((2358, 2459), 'numpy.array', 'np.array', (['[(-0.1295, 0.0, 0.0), (0.1295, 0.0, 0.0), (-0.0955, 0.288, 0.0), (0.0955, \n 0.288, 0.0)]'], {}), '([(-0.1295, 0.0, 0.0), (0.1295, 0.0, 0.0), (-0.0955, 0.288, 0.0), (\n 0.0955, 0.288, 0.0)])\n', (2366, 2459), True, 'import numpy as np\n'), ((3625, 3636), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (3634, 3636), False, 'import os\n'), ((3944, 3977), 'numpy.reshape', 'np.reshape', (['model_points', '(3, -1)'], {}), '(model_points, (3, -1))\n', (3954, 3977), True, 'import numpy as np\n'), ((6095, 6186), 'cv2.solvePnP', 'cv2.solvePnP', (['self.model_points_68', 'image_points', 'self.camera_matrix', 'self.dist_coeefs'], {}), '(self.model_points_68, image_points, self.camera_matrix, self.\n dist_coeefs)\n', (6107, 6186), False, 'import cv2\n'), ((9539, 9614), 'cv2.projectPoints', 'cv2.projectPoints', (['model_points', 'R', 't', 'self.camera_matrix', 'self.dist_coeefs'], {}), '(model_points, R, t, self.camera_matrix, self.dist_coeefs)\n', (9556, 9614), False, 'import cv2\n'), ((7647, 7681), 'numpy.array', 'np.array', (['point_3d'], {'dtype': 'np.float'}), '(point_3d, dtype=np.float)\n', (7655, 7681), True, 'import numpy as np\n')] |
import numpy as np
import analyzesimulation as asim
import os
import re
import pickle
def load_data_from_dir(data_dir):
filename_list = os.listdir(data_dir)
conditions = set()
for file in filename_list:
condition = re.findall(r'.*tf_.*_(.*)_t.*', file)
condition = condition[0]
conditions.add(condition)
data_dict = dict()
for key in conditions:
data_dict[key] = {'i_frac': [], 'dsi': [], 'tc': [], 'dv': [],
'dir': [], 'tf': [], 'input_i': [], 'times': []}
for file in filename_list:
datafile = os.path.join(data_dir, file)
inh_fraction = re.findall(r'.*I(.*)[.]pkl', file)
inh_fraction = float(inh_fraction[0])
condition = re.findall(r'.*tf_.*_(.*)_t.*', file)
condition = condition[0]
# Loading the objects:
data = asim.SimData(datafile)
# Discard the first 50ms. Time in ms.
data.trim_data_beginning(500)
data.set_baseline_voltage(-65) # in mV
delta_v = data.get_voltage_change()
data.get_amp_spectrum()
data.get_calcium_response()
# Get tuning curve from A1 component
tc = dict()
dsi = dict()
pref_dir = dict()
tc['max'] = np.max(delta_v, axis=1)
tc['mean'] = np.mean(data.calcium_responses, axis=1)
tc['a1'] = [asim.get_freq_amp(data.fft_freq, data.v_amp[i, :],
data.t_freqs[i]) for i in range(len(data.t_freqs))]
for tc_method in tc.keys():
dsi[tc_method], pref_dir[tc_method] =\
asim.get_pref_dir(tc[tc_method], data.directions)
pref_dir[tc_method] = np.rad2deg(pref_dir[tc_method])
data_dict[condition]['i_frac'].append(inh_fraction)
data_dict[condition]['dsi'].append(dsi)
data_dict[condition]['tc'].append(tc)
data_dict[condition]['dv'].append(delta_v)
data_dict[condition]['input_i'].append(data.input_i)
# Conditional not useful if we need to analyze across tf and dirs. Although it saves memory
# if len(data_dict[condition]['dir']) == 0:
data_dict[condition]['dir'].append(data.directions)
data_dict[condition]['tf'].append(data.t_freqs)
data_dict[condition]['times'].append(data.times)
# Saving the objects:
# Python 3: open(..., 'wb')
with open(os.path.join(data_dir, 'summary_dict.pkl'), 'wb') as f:
pickle.dump(data_dict, f)
| [
"analyzesimulation.SimData",
"pickle.dump",
"analyzesimulation.get_freq_amp",
"numpy.rad2deg",
"numpy.max",
"re.findall",
"numpy.mean",
"analyzesimulation.get_pref_dir",
"os.path.join",
"os.listdir"
] | [((142, 162), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (152, 162), False, 'import os\n'), ((237, 273), 're.findall', 're.findall', (['""".*tf_.*_(.*)_t.*"""', 'file'], {}), "('.*tf_.*_(.*)_t.*', file)\n", (247, 273), False, 'import re\n'), ((590, 618), 'os.path.join', 'os.path.join', (['data_dir', 'file'], {}), '(data_dir, file)\n', (602, 618), False, 'import os\n'), ((643, 676), 're.findall', 're.findall', (['""".*I(.*)[.]pkl"""', 'file'], {}), "('.*I(.*)[.]pkl', file)\n", (653, 676), False, 'import re\n'), ((744, 780), 're.findall', 're.findall', (['""".*tf_.*_(.*)_t.*"""', 'file'], {}), "('.*tf_.*_(.*)_t.*', file)\n", (754, 780), False, 'import re\n'), ((861, 883), 'analyzesimulation.SimData', 'asim.SimData', (['datafile'], {}), '(datafile)\n', (873, 883), True, 'import analyzesimulation as asim\n'), ((1262, 1285), 'numpy.max', 'np.max', (['delta_v'], {'axis': '(1)'}), '(delta_v, axis=1)\n', (1268, 1285), True, 'import numpy as np\n'), ((1307, 1346), 'numpy.mean', 'np.mean', (['data.calcium_responses'], {'axis': '(1)'}), '(data.calcium_responses, axis=1)\n', (1314, 1346), True, 'import numpy as np\n'), ((2431, 2456), 'pickle.dump', 'pickle.dump', (['data_dict', 'f'], {}), '(data_dict, f)\n', (2442, 2456), False, 'import pickle\n'), ((1367, 1434), 'analyzesimulation.get_freq_amp', 'asim.get_freq_amp', (['data.fft_freq', 'data.v_amp[i, :]', 'data.t_freqs[i]'], {}), '(data.fft_freq, data.v_amp[i, :], data.t_freqs[i])\n', (1384, 1434), True, 'import analyzesimulation as asim\n'), ((1593, 1642), 'analyzesimulation.get_pref_dir', 'asim.get_pref_dir', (['tc[tc_method]', 'data.directions'], {}), '(tc[tc_method], data.directions)\n', (1610, 1642), True, 'import analyzesimulation as asim\n'), ((1677, 1708), 'numpy.rad2deg', 'np.rad2deg', (['pref_dir[tc_method]'], {}), '(pref_dir[tc_method])\n', (1687, 1708), True, 'import numpy as np\n'), ((2367, 2409), 'os.path.join', 'os.path.join', (['data_dir', '"""summary_dict.pkl"""'], {}), "(data_dir, 'summary_dict.pkl')\n", (2379, 2409), False, 'import os\n')] |
import torch
import torchvision
from torch import nn
from torch import optim
import pandas as pd
import numpy as np
from torch.utils.data import Dataset
from sklearn.preprocessing import maxabs_scale
from torch.utils.tensorboard import SummaryWriter
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using {device} device")
print(torch.version.cuda)
class FeatureDataset(Dataset):
def __init__(self, file_name):
data_csv = pd.read_csv(file_name, header=0)
dataset = np.array(data_csv, dtype=float)
x = dataset[:, 1:785]
x = maxabs_scale(x, axis=1)
x = torch.tensor(x, dtype=float, device=device)
y = torch.ones((x.shape[0], 1), dtype=float, device=device)
self.x_train = x
self.y_train = y
def __len__(self):
return len(self.y_train)
def __getitem__(self, idx):
return self.x_train[idx], self.y_train[idx]
batch_size = 64
epochs = 50
learning_rate = 3e-4
feature_set = FeatureDataset('data/mnist_train.csv')
data_loader = torch.utils.data.DataLoader(feature_set, batch_size=batch_size, shuffle=True, drop_last=True)
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.linear_stack = nn.Sequential(
nn.Linear(64, 512),
nn.ReLU(),
nn.Linear(512, 1024),
nn.ReLU(),
nn.Linear(1024, 28 * 28),
nn.Tanh()
)
def forward(self, x):
logits = self.linear_stack(x)
return logits
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.linear_stack = nn.Sequential(
nn.Linear(784, 512),
nn.ReLU(),
nn.Linear(512, 1024),
nn.ReLU(),
nn.Linear(1024, 1),
nn.Sigmoid()
)
def forward(self, x):
logits = self.linear_stack(x.float())
return logits
disc = Discriminator().to(device)
gen = Generator().to(device)
opt_disc = optim.Adam(disc.parameters(), lr=learning_rate)
opt_gen = optim.Adam(gen.parameters(), lr=learning_rate)
fixed_noise = torch.randn((batch_size, 64), device=device)
criterian = nn.BCELoss()
scaler_writer = SummaryWriter(f"logs/loss")
writer_fake = SummaryWriter(f"logs/fake")
writer_real = SummaryWriter(f"logs/real")
step = 0
i = 0
for epoch in range(epochs):
print(epoch)
for batch_idx, (real, _) in enumerate(data_loader):
i = i + 1
noise = torch.randn((batch_size, 64), device=device)
fake = gen(noise)
disc_real = disc(real)
lossD_real = criterian(disc_real, torch.ones_like(disc_real, device=device))
disc_fake = disc(fake)
lossD_fake = criterian(disc_fake, torch.zeros_like(disc_fake, device=device))
lossD = (lossD_real + lossD_fake) / 2
disc.zero_grad()
lossD.backward(retain_graph=True)
opt_disc.step()
output = disc(fake)
lossG = criterian(output, torch.ones_like(output))
gen.zero_grad()
lossG.backward()
opt_gen.step()
if batch_idx == 0:
with torch.no_grad():
fake = gen(fixed_noise).reshape(-1, 1, 28, 28)
data = real.reshape(-1, 1, 28, 28)
img_grid_fake = torchvision.utils.make_grid(fake, normalize=True)
img_grid_real = torchvision.utils.make_grid(data, normalize=True)
writer_fake.add_image(
"Fake", img_grid_fake, global_step=step
)
writer_real.add_image(
"Real", img_grid_real, global_step=step
)
if batch_idx % 100 == 0:
scaler_writer.add_scalar("Discriminator", lossD, global_step=step)
scaler_writer.add_scalar("Generator", lossG, global_step=step)
step = step + 1
| [
"torch.ones_like",
"torch.ones",
"torch.nn.ReLU",
"torch.nn.BCELoss",
"torch.utils.data.DataLoader",
"torch.zeros_like",
"pandas.read_csv",
"torch.nn.Tanh",
"torch.randn",
"sklearn.preprocessing.maxabs_scale",
"torchvision.utils.make_grid",
"torch.cuda.is_available",
"numpy.array",
"torch.... | [((1043, 1141), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['feature_set'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'drop_last': '(True)'}), '(feature_set, batch_size=batch_size, shuffle=\n True, drop_last=True)\n', (1070, 1141), False, 'import torch\n'), ((2166, 2210), 'torch.randn', 'torch.randn', (['(batch_size, 64)'], {'device': 'device'}), '((batch_size, 64), device=device)\n', (2177, 2210), False, 'import torch\n'), ((2224, 2236), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (2234, 2236), False, 'from torch import nn\n'), ((2254, 2281), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['f"""logs/loss"""'], {}), "(f'logs/loss')\n", (2267, 2281), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((2297, 2324), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['f"""logs/fake"""'], {}), "(f'logs/fake')\n", (2310, 2324), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((2339, 2366), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['f"""logs/real"""'], {}), "(f'logs/real')\n", (2352, 2366), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((271, 296), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (294, 296), False, 'import torch\n'), ((455, 487), 'pandas.read_csv', 'pd.read_csv', (['file_name'], {'header': '(0)'}), '(file_name, header=0)\n', (466, 487), True, 'import pandas as pd\n'), ((506, 537), 'numpy.array', 'np.array', (['data_csv'], {'dtype': 'float'}), '(data_csv, dtype=float)\n', (514, 537), True, 'import numpy as np\n'), ((581, 604), 'sklearn.preprocessing.maxabs_scale', 'maxabs_scale', (['x'], {'axis': '(1)'}), '(x, axis=1)\n', (593, 604), False, 'from sklearn.preprocessing import maxabs_scale\n'), ((618, 661), 'torch.tensor', 'torch.tensor', (['x'], {'dtype': 'float', 'device': 'device'}), '(x, dtype=float, device=device)\n', (630, 661), False, 'import torch\n'), ((675, 730), 'torch.ones', 'torch.ones', (['(x.shape[0], 1)'], {'dtype': 'float', 'device': 'device'}), '((x.shape[0], 1), dtype=float, device=device)\n', (685, 730), False, 'import torch\n'), ((2520, 2564), 'torch.randn', 'torch.randn', (['(batch_size, 64)'], {'device': 'device'}), '((batch_size, 64), device=device)\n', (2531, 2564), False, 'import torch\n'), ((1289, 1307), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(512)'], {}), '(64, 512)\n', (1298, 1307), False, 'from torch import nn\n'), ((1321, 1330), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1328, 1330), False, 'from torch import nn\n'), ((1344, 1364), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(1024)'], {}), '(512, 1024)\n', (1353, 1364), False, 'from torch import nn\n'), ((1378, 1387), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1385, 1387), False, 'from torch import nn\n'), ((1401, 1425), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(28 * 28)'], {}), '(1024, 28 * 28)\n', (1410, 1425), False, 'from torch import nn\n'), ((1439, 1448), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (1446, 1448), False, 'from torch import nn\n'), ((1706, 1725), 'torch.nn.Linear', 'nn.Linear', (['(784)', '(512)'], {}), '(784, 512)\n', (1715, 1725), False, 'from torch import nn\n'), ((1739, 1748), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1746, 1748), False, 'from torch import nn\n'), ((1762, 1782), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(1024)'], {}), '(512, 1024)\n', (1771, 1782), False, 'from torch import nn\n'), ((1796, 1805), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1803, 1805), False, 'from torch import nn\n'), ((1819, 1837), 'torch.nn.Linear', 'nn.Linear', (['(1024)', '(1)'], {}), '(1024, 1)\n', (1828, 1837), False, 'from torch import nn\n'), ((1851, 1863), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (1861, 1863), False, 'from torch import nn\n'), ((2665, 2706), 'torch.ones_like', 'torch.ones_like', (['disc_real'], {'device': 'device'}), '(disc_real, device=device)\n', (2680, 2706), False, 'import torch\n'), ((2782, 2824), 'torch.zeros_like', 'torch.zeros_like', (['disc_fake'], {'device': 'device'}), '(disc_fake, device=device)\n', (2798, 2824), False, 'import torch\n'), ((3028, 3051), 'torch.ones_like', 'torch.ones_like', (['output'], {}), '(output)\n', (3043, 3051), False, 'import torch\n'), ((3171, 3186), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3184, 3186), False, 'import torch\n'), ((3334, 3383), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['fake'], {'normalize': '(True)'}), '(fake, normalize=True)\n', (3361, 3383), False, 'import torchvision\n'), ((3416, 3465), 'torchvision.utils.make_grid', 'torchvision.utils.make_grid', (['data'], {'normalize': '(True)'}), '(data, normalize=True)\n', (3443, 3465), False, 'import torchvision\n')] |
import os
import numpy as np
from PIL import Image
import torch
import kmod.glo as glo
import argparse
from kmod.torch_models import Generator
img_size = 64
dataname = 'lsun'
epoch = 20
num_images = 30000
gen_model_names = {
'1232_began': 'BEGAN_{}_G.pkl'.format(epoch),
'3212_began': 'BEGAN_{}_G.pkl'.format(epoch),
'1232_dcgan': 'GAN_{}_G.pkl'.format(epoch),
'3212_dcgan': 'GAN_{}_G.pkl'.format(epoch),
}
def pil_loader(path):
with open(path, 'rb') as f:
img = Image.open(f)
img = img.resize((img_size, img_size),
resample=Image.BILINEAR)
return img.convert('RGB')
def generate_images():
use_cuda = args.use_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
default_type = (torch.cuda.FloatTensor if use_cuda
else torch.FloatTensor)
dtype = torch.float
load_options = {} if use_cuda else {'map_location':
lambda storage, loc: storage}
torch.set_default_dtype(dtype)
torch.set_default_tensor_type(default_type)
dir_out = os.path.join(dir_problem, 'data')
if not os.path.exists(dir_out):
os.makedirs(dir_out, exist_ok=True)
dir_model = os.path.join(dir_problem, 'models')
batch_size = 64
z_dim = 100
for c in gen_model_names.keys():
torch.manual_seed(66)
model_path = os.path.join(dir_model, gen_model_names[c])
generator = Generator().to(device)
generator.load(model_path, **load_options)
generator.eval()
gen_imgs = []
for _ in range(0, num_images, batch_size):
z = torch.rand(batch_size, z_dim)
z = z.view(-1, z_dim, 1, 1).uniform_(-1, 1).to(device)
samples = generator(z)
samples = samples.cpu().data.numpy()
gen_imgs.append(samples)
gen_imgs = np.vstack(gen_imgs)[:num_images]
gen_imgs = (gen_imgs * 255).astype(np.uint8)
filepath = '{}/{}.npy'.format(dir_out, c)
print('Saving to {}'.format(filepath))
np.save(filepath, gen_imgs)
def subsample_images():
dir_data = args.datadir
dirnames = os.listdir(dir_data)
dir_out = os.path.join(dir_problem, 'data')
if not os.path.exists(dir_out):
os.makedirs(dir_out, exist_ok=True)
for dirname in dirnames:
path = os.path.join(dir_data, dirname)
print(path, dirname)
filenames = [os.path.join(path, fn) for fn in os.listdir(path)]
filenames = filenames[:num_images]
label_name = dirname
savepath_data = '{}/{}.npy'.format(dir_out, label_name)
# data = np.array([imread(fn) for fn in filenames])
data = np.array([np.array(pil_loader(fn)) for fn in filenames])
np.save(savepath_data, data)
def main():
generate_images()
subsample_images()
if __name__ == '__main__':
dir_problem = os.path.join(glo.shared_resource_folder(),
'problems', dataname)
dir_data = os.path.join(dir_problem, 'imgs')
parser = argparse.ArgumentParser()
parser.add_argument('--datadir', type=str, default=dir_data)
parser.add_argument('--use_cuda', help='gpu option',
action='store_true')
args = parser.parse_args()
main()
| [
"numpy.save",
"argparse.ArgumentParser",
"os.makedirs",
"torch.rand",
"torch.manual_seed",
"os.path.exists",
"torch.set_default_tensor_type",
"kmod.torch_models.Generator",
"PIL.Image.open",
"kmod.glo.shared_resource_folder",
"torch.set_default_dtype",
"torch.cuda.is_available",
"torch.devic... | [((733, 776), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (745, 776), False, 'import torch\n'), ((1030, 1060), 'torch.set_default_dtype', 'torch.set_default_dtype', (['dtype'], {}), '(dtype)\n', (1053, 1060), False, 'import torch\n'), ((1065, 1108), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['default_type'], {}), '(default_type)\n', (1094, 1108), False, 'import torch\n'), ((1124, 1157), 'os.path.join', 'os.path.join', (['dir_problem', '"""data"""'], {}), "(dir_problem, 'data')\n", (1136, 1157), False, 'import os\n'), ((1255, 1290), 'os.path.join', 'os.path.join', (['dir_problem', '"""models"""'], {}), "(dir_problem, 'models')\n", (1267, 1290), False, 'import os\n'), ((2194, 2214), 'os.listdir', 'os.listdir', (['dir_data'], {}), '(dir_data)\n', (2204, 2214), False, 'import os\n'), ((2229, 2262), 'os.path.join', 'os.path.join', (['dir_problem', '"""data"""'], {}), "(dir_problem, 'data')\n", (2241, 2262), False, 'import os\n'), ((3044, 3077), 'os.path.join', 'os.path.join', (['dir_problem', '"""imgs"""'], {}), "(dir_problem, 'imgs')\n", (3056, 3077), False, 'import os\n'), ((3091, 3116), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3114, 3116), False, 'import argparse\n'), ((495, 508), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (505, 508), False, 'from PIL import Image\n'), ((694, 719), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (717, 719), False, 'import torch\n'), ((1169, 1192), 'os.path.exists', 'os.path.exists', (['dir_out'], {}), '(dir_out)\n', (1183, 1192), False, 'import os\n'), ((1202, 1237), 'os.makedirs', 'os.makedirs', (['dir_out'], {'exist_ok': '(True)'}), '(dir_out, exist_ok=True)\n', (1213, 1237), False, 'import os\n'), ((1372, 1393), 'torch.manual_seed', 'torch.manual_seed', (['(66)'], {}), '(66)\n', (1389, 1393), False, 'import torch\n'), ((1416, 1459), 'os.path.join', 'os.path.join', (['dir_model', 'gen_model_names[c]'], {}), '(dir_model, gen_model_names[c])\n', (1428, 1459), False, 'import os\n'), ((2097, 2124), 'numpy.save', 'np.save', (['filepath', 'gen_imgs'], {}), '(filepath, gen_imgs)\n', (2104, 2124), True, 'import numpy as np\n'), ((2274, 2297), 'os.path.exists', 'os.path.exists', (['dir_out'], {}), '(dir_out)\n', (2288, 2297), False, 'import os\n'), ((2307, 2342), 'os.makedirs', 'os.makedirs', (['dir_out'], {'exist_ok': '(True)'}), '(dir_out, exist_ok=True)\n', (2318, 2342), False, 'import os\n'), ((2388, 2419), 'os.path.join', 'os.path.join', (['dir_data', 'dirname'], {}), '(dir_data, dirname)\n', (2400, 2419), False, 'import os\n'), ((2798, 2826), 'numpy.save', 'np.save', (['savepath_data', 'data'], {}), '(savepath_data, data)\n', (2805, 2826), True, 'import numpy as np\n'), ((2946, 2974), 'kmod.glo.shared_resource_folder', 'glo.shared_resource_folder', ([], {}), '()\n', (2972, 2974), True, 'import kmod.glo as glo\n'), ((1669, 1698), 'torch.rand', 'torch.rand', (['batch_size', 'z_dim'], {}), '(batch_size, z_dim)\n', (1679, 1698), False, 'import torch\n'), ((1906, 1925), 'numpy.vstack', 'np.vstack', (['gen_imgs'], {}), '(gen_imgs)\n', (1915, 1925), True, 'import numpy as np\n'), ((2470, 2492), 'os.path.join', 'os.path.join', (['path', 'fn'], {}), '(path, fn)\n', (2482, 2492), False, 'import os\n'), ((1480, 1491), 'kmod.torch_models.Generator', 'Generator', ([], {}), '()\n', (1489, 1491), False, 'from kmod.torch_models import Generator\n'), ((2503, 2519), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (2513, 2519), False, 'import os\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import imageio
import png
import numpy as np
from PIL import Image
root="./hres_img"
n=0
nlst=[]
for i in os.listdir(root):
raw_add = os.path.join(root,i)
ImSize=Image.open(raw_add).size
if ImSize == (320,240):
im2 = True
elif ImSize == (640,480):
im2 = False
else:
raise ValueError
print(raw_add, im2)
if i[-4:]=='.jpg' and i[:4]=='FLIR' and im2:
n+=1
os.system("exiftool -b -RawThermalImage %s > ./temp/tir.png"%raw_add)
im=imageio.imread('./temp/tir.png')
im=im*256+(im//256.0).astype(int)
png.from_array(im, 'L;16').save('./hres_tir/%s.png'%(i[:-4]))
os.system("exiftool -b -EmbeddedImage %s > ./hres_vis/%s.jpg"%(raw_add,i[:-4]))
nlst.append(int(i[4:-4]))
print(nlst)
np.save('nlst.npy',nlst)
print('{} images'.format(n))
| [
"numpy.save",
"imageio.imread",
"os.system",
"PIL.Image.open",
"png.from_array",
"os.path.join",
"os.listdir"
] | [((748, 764), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (758, 764), False, 'import os\n'), ((1369, 1394), 'numpy.save', 'np.save', (['"""nlst.npy"""', 'nlst'], {}), "('nlst.npy', nlst)\n", (1376, 1394), True, 'import numpy as np\n'), ((778, 799), 'os.path.join', 'os.path.join', (['root', 'i'], {}), '(root, i)\n', (790, 799), False, 'import os\n'), ((808, 827), 'PIL.Image.open', 'Image.open', (['raw_add'], {}), '(raw_add)\n', (818, 827), False, 'from PIL import Image\n'), ((1029, 1100), 'os.system', 'os.system', (["('exiftool -b -RawThermalImage %s > ./temp/tir.png' % raw_add)"], {}), "('exiftool -b -RawThermalImage %s > ./temp/tir.png' % raw_add)\n", (1038, 1100), False, 'import os\n'), ((1106, 1138), 'imageio.imread', 'imageio.imread', (['"""./temp/tir.png"""'], {}), "('./temp/tir.png')\n", (1120, 1138), False, 'import imageio\n'), ((1247, 1334), 'os.system', 'os.system', (["('exiftool -b -EmbeddedImage %s > ./hres_vis/%s.jpg' % (raw_add, i[:-4]))"], {}), "('exiftool -b -EmbeddedImage %s > ./hres_vis/%s.jpg' % (raw_add, i\n [:-4]))\n", (1256, 1334), False, 'import os\n'), ((1181, 1207), 'png.from_array', 'png.from_array', (['im', '"""L;16"""'], {}), "(im, 'L;16')\n", (1195, 1207), False, 'import png\n')] |
#!/usr/bin/env python
import itertools as itt
import logging
import os
from datetime import datetime
from getpass import getuser
import numpy as np
import pandas as pd
from .generate import get_percentile_diff, get_inducible_pairs
from .. import hgnc, mi, up, snp as rs
from ..struct.hetnet import HetNet, encode_color_path
log = logging.getLogger()
CP1 = ('p', 'm')
CP1_TERMINAL = ('p', ('m', ()))
CP1_STR = encode_color_path(CP1_TERMINAL, color_path_type='terminal')
CP2 = ('p', 'p', ('g', (('regulated', 'T'),)))
CP2_STR = encode_color_path(CP2, color_path_type='terminal')
def convert_simple_to_terminal(cp):
*head, tail = cp
tail = (tail, ())
if isinstance(cp, list):
return list(head) + [tail]
return tuple(head) + (tail,)
def generate_toy(seed=None):
np.random.seed(seed)
h = HetNet({
"g": {
"regulated": ["T", "F"] # significant regulation
},
"p": {
},
"m": {
},
"s": {
}
})
n_genes = 426
n_proteins = 275
n_mirnas = 16
n_snps = 14
n_mirna_encoding_genes = 8
n_protein_encoding_genes = 275
n_ppis = 292
n_mtis = 51
# ??? not sure about numbers
n_regulated = 25
n_coexprs = 400
regulated = np.random.choice(np.arange(n_genes), size=n_regulated, replace=False)
genes = {}
for i in range(n_genes):
gene = hgnc(i)
genes[i] = gene
h.add_node(gene, dict(color='g', annotations={'regulated': ("T" if i in regulated else "F")}))
protein_encoding_genes = sorted(genes.values())[:n_protein_encoding_genes]
proteins = {}
for i, gene in zip(range(n_proteins), protein_encoding_genes):
protein = up(i)
proteins[i] = protein
h.add_node(protein, dict(color='p', annotations={}))
h.add_edge(gene, protein)
mirna_encoding_genes = list(
np.random.choice(list(set(genes.values()) - set(protein_encoding_genes)), size=n_mirna_encoding_genes,
replace=False))
mirnas = {}
for i, gene in zip(range(n_mirnas), 2 * mirna_encoding_genes):
mirna = mi(i)
mirnas[i] = mirna
h.add_node(mirna, dict(color='m', annotations={}))
h.add_edge(gene, mirna)
h.mirna_encoding = mirna_encoding_genes
mutations = np.random.choice(list(set(genes.values()) - set(protein_encoding_genes) - set(mirna_encoding_genes)),
size=n_snps, replace=False)
snps = {}
for i, gene in zip(range(n_snps), mutations):
snp = rs(i)
snps[i] = snp
h.add_node(snp, dict(color='s', annotations={}))
h.add_edge(gene, snp)
pp = list(itt.combinations(proteins, 2))
for i in np.random.choice(len(pp), size=n_ppis, replace=False):
a, b = pp[i]
h.add_edge(proteins[a], proteins[b])
gg = list(itt.combinations(genes, 2))
for i in np.random.choice(len(gg), size=n_coexprs, replace=False):
a, b = gg[i]
h.add_edge(genes[a], genes[b])
mp = list(itt.product(proteins, mirnas))
for i in np.random.choice(len(mp), size=n_mtis, replace=False):
a, b = mp[i]
h.add_edge(proteins[a], mirnas[b])
h.graph['generation_manifest'] = {
'user': getuser(),
'generation_time': str(datetime.now()),
'np_random_seed': seed,
'protein_encoding': protein_encoding_genes
}
return h
def induce_toy(graph, target_nodes, upper=100, lower=90, loc_coef=1.1, scale_coef=1.2, seed=None):
"""
Generate network pertaining to actual biology, and induce 2 features over protein-coding genes:
- protein-mirna
- protein-protein-regulated_gene
:param graph: the network to induce
:param target_nodes: the nodes to induce in the network
:param upper: the upper percentile to calculate for the color paths
:param lower: the lower percentile to calculate for the color paths
:param loc_coef: the factor to multiply the upper percentile for induction for the mean of random gaussian sampling
:param scale_coef: the factor to multiply the upper-lower percentile difference for the standard deviation of random
gaussian sampling
:param seed: seed for numpy random number generator
:return:
"""
np.random.seed(seed)
pairs = []
# TODO factor out induction parameters
p100a, p98a, pt2a = get_percentile_diff(graph, CP1, upper, lower, color_path_type='simple')
log.debug(
'CP {} -> {}%: {}, {}%: {}, D{}%: {}'.format(encode_color_path(CP1, color_path_type="simple"), upper, p100a,
lower, p98a, upper - lower, pt2a))
p100b, p98b, pt2b = get_percentile_diff(graph, CP2, upper, lower, color_path_type='terminal')
log.debug(
'CP {} -> {}%: {}, {}%: {}, D{}%: {}'.format(encode_color_path(CP2, color_path_type="terminal"), upper, p100b,
lower, p98b, upper - lower, pt2b))
for node in target_nodes:
for edge in get_inducible_pairs(graph, node, CP1,
int(np.random.normal(loc=(loc_coef * p100a), scale=(scale_coef * pt2a))),
color_path_type='simple'):
pairs.append(edge)
for edge in get_inducible_pairs(graph, node, CP2,
int(np.random.normal(loc=(loc_coef * p100b), scale=(scale_coef * pt2b))),
color_path_type='terminal'):
pairs.append(edge)
"""
params = [
(CP1, upper, lower, 1.1, 1.2, 'simple'),
(CP2, upper, lower, 1.1, 1.2, 'terminal')
]
pairs = []
for cp, up, lo, lc, sc, cpt in params:
p100, p98, pt2 = get_percentile_diff(graph, cp, up, lo, color_path_type=cpt)
for node in target_nodes:
ne = int(np.random.normal(loc=(lc * p100), scale=(sc * pt2)))
pairs.extend(get_inducible_pairs(graph, node, cp, ne, color_path_type=cpt))
"""
for a, b in set(pairs):
graph.add_edge(a, b)
graph.graph['induction_manifest'] = {
'user': getuser(),
'induction_time': str(datetime.now()),
'induced': sorted(target_nodes),
'upper': upper,
'lower': lower,
'loc_coef': loc_coef,
'scale_coef': scale_coef,
'np_random_seed': seed
}
return graph
def main(directory, percent=0.8, seed=None):
"""
:param directory: output directory
:param percent: if given, outputs a training and test manifest
:param seed: seed for numpy random number generator
"""
np.random.seed(seed)
h = generate_toy()
n_induce = int(0.5 + 7 / percent)
target_nodes = np.random.choice(h.graph['generation_manifest']['protein_encoding'], size=n_induce, replace=False)
hn = induce_toy(h, target_nodes)
hn.to_resource(directory)
induced = hn.graph['induction_manifest']['induced']
nodes = sorted(hn.graph['generation_manifest']['protein_encoding'])
full_induction_manifest = pd.DataFrame([node in induced for node in nodes], index=nodes, columns=['induced'])
full_induction_manifest.to_csv(os.path.join(directory, 'full_induce_manifest.csv'))
not_induced = list(set(nodes) - set(induced))
n_induced = len(induced)
n_not_induced = len(not_induced)
np.random.shuffle(induced)
np.random.shuffle(not_induced)
head_induced = induced[:int(percent * n_induced)]
tail_induced = induced[int(percent * n_induced):]
head_not_induced = not_induced[:int(percent * n_not_induced)]
tail_not_induced = not_induced[int(percent * n_not_induced):]
head = sorted(head_induced) + sorted(head_not_induced)
tail = sorted(tail_induced) + sorted(tail_not_induced)
full_induction_manifest.loc[head].to_csv(os.path.join(directory, 'training_induce_manifest.csv'))
full_induction_manifest.loc[tail].to_csv(os.path.join(directory, 'test_induce_manifest.csv'))
| [
"pandas.DataFrame",
"numpy.random.seed",
"getpass.getuser",
"logging.getLogger",
"datetime.datetime.now",
"itertools.combinations",
"numpy.arange",
"numpy.random.normal",
"numpy.random.choice",
"itertools.product",
"os.path.join",
"numpy.random.shuffle"
] | [((334, 353), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (351, 353), False, 'import logging\n'), ((797, 817), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (811, 817), True, 'import numpy as np\n'), ((4312, 4332), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4326, 4332), True, 'import numpy as np\n'), ((6691, 6711), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (6705, 6711), True, 'import numpy as np\n'), ((6793, 6896), 'numpy.random.choice', 'np.random.choice', (["h.graph['generation_manifest']['protein_encoding']"], {'size': 'n_induce', 'replace': '(False)'}), "(h.graph['generation_manifest']['protein_encoding'], size=\n n_induce, replace=False)\n", (6809, 6896), True, 'import numpy as np\n'), ((7119, 7209), 'pandas.DataFrame', 'pd.DataFrame', (['[(node in induced) for node in nodes]'], {'index': 'nodes', 'columns': "['induced']"}), "([(node in induced) for node in nodes], index=nodes, columns=[\n 'induced'])\n", (7131, 7209), True, 'import pandas as pd\n'), ((7413, 7439), 'numpy.random.shuffle', 'np.random.shuffle', (['induced'], {}), '(induced)\n', (7430, 7439), True, 'import numpy as np\n'), ((7444, 7474), 'numpy.random.shuffle', 'np.random.shuffle', (['not_induced'], {}), '(not_induced)\n', (7461, 7474), True, 'import numpy as np\n'), ((1291, 1309), 'numpy.arange', 'np.arange', (['n_genes'], {}), '(n_genes)\n', (1300, 1309), True, 'import numpy as np\n'), ((2693, 2722), 'itertools.combinations', 'itt.combinations', (['proteins', '(2)'], {}), '(proteins, 2)\n', (2709, 2722), True, 'import itertools as itt\n'), ((2873, 2899), 'itertools.combinations', 'itt.combinations', (['genes', '(2)'], {}), '(genes, 2)\n', (2889, 2899), True, 'import itertools as itt\n'), ((3047, 3076), 'itertools.product', 'itt.product', (['proteins', 'mirnas'], {}), '(proteins, mirnas)\n', (3058, 3076), True, 'import itertools as itt\n'), ((3266, 3275), 'getpass.getuser', 'getuser', ([], {}), '()\n', (3273, 3275), False, 'from getpass import getuser\n'), ((6195, 6204), 'getpass.getuser', 'getuser', ([], {}), '()\n', (6202, 6204), False, 'from getpass import getuser\n'), ((7238, 7289), 'os.path.join', 'os.path.join', (['directory', '"""full_induce_manifest.csv"""'], {}), "(directory, 'full_induce_manifest.csv')\n", (7250, 7289), False, 'import os\n'), ((7882, 7937), 'os.path.join', 'os.path.join', (['directory', '"""training_induce_manifest.csv"""'], {}), "(directory, 'training_induce_manifest.csv')\n", (7894, 7937), False, 'import os\n'), ((7984, 8035), 'os.path.join', 'os.path.join', (['directory', '"""test_induce_manifest.csv"""'], {}), "(directory, 'test_induce_manifest.csv')\n", (7996, 8035), False, 'import os\n'), ((3308, 3322), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3320, 3322), False, 'from datetime import datetime\n'), ((6236, 6250), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6248, 6250), False, 'from datetime import datetime\n'), ((5162, 5225), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(loc_coef * p100a)', 'scale': '(scale_coef * pt2a)'}), '(loc=loc_coef * p100a, scale=scale_coef * pt2a)\n', (5178, 5225), True, 'import numpy as np\n'), ((5432, 5495), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(loc_coef * p100b)', 'scale': '(scale_coef * pt2b)'}), '(loc=loc_coef * p100b, scale=scale_coef * pt2b)\n', (5448, 5495), True, 'import numpy as np\n')] |
"""
"""
import numpy as np
import matplotlib.pyplot as plt
import socket
import os
import mne
from mne.minimum_norm import read_inverse_operator, source_induced_power
###############################################################################
# SETUP PATHS AND PREPARE RAW DATA
hostname = socket.gethostname()
if hostname == "wintermute":
data_path = "/home/mje/mnt/caa/scratch/"
else:
data_path = "/projects/MINDLAB2015_MEG-CorticalAlphaAttention/scratch/"
# CHANGE DIR TO SAVE FILES THE RIGTH PLACE
os.chdir(data_path)
subjects_dir = data_path + "fs_subjects_dir/"
save_folder = data_path + "filter_ica_data/"
maxfiltered_folder = data_path + "maxfiltered_data/"
epochs_folder = data_path + "epoched_data/"
tf_folder = data_path + "tf_data/"
mne_folder = data_path + "minimum_norm/"
subjects = ["0004", "0005", "0006", "0007", "0008", "0009", "0010", "0011",
"0012", "0013", "0014", "0015", "0016", "0017", "0020", "0021",
"0022", "0023", "0024", "0025"] # subject to run
# Compute a source estimate per frequency band including and excluding the
# evoked response
frequencies = np.arange(6, 90, 3) # define frequencies of interest
n_cycles = frequencies / 3. # different number of cycle per frequency
subject = "0005"
# TODO: insert loop here
# subtract the evoked response in order to exclude evoked activity
epochs = mne.read_epochs(epochs_folder +
"%s_filtered_ica_mc_tsss-epo.fif" % subject)
epochs = epochs["ent_left", "ctl_left"]
epochs.crop(None, 0.8)
epochs.resample(500)
# epochs_clt_left = epochs["ctl_left"].copy()
# ind_ent_left = epochs["ent_left"].copy().subtract_evoked()
# ind_clt_left = epochs["ctl_left"].copy().subtract_evoked()
# ind_clt_left = epochs_clt_left.copy().subtract_evoked()
inverse_operator = read_inverse_operator(mne_folder + "%s-inv.fif" % subject)
labels = mne.read_labels_from_annot(subject, parc='PALS_B12_Lobes',
# regexp="Bro",
subjects_dir=subjects_dir)
label = labels[9]
for cond in ["ent_left", "ctl_left"]:
# compute the source space power and phase lock
power, phase_lock = source_induced_power(
epochs[cond], inverse_operator, frequencies, label, baseline=(-0.3, 0.),
baseline_mode="percent", n_cycles=n_cycles, n_jobs=1, pca=True)
exec("power_%s = np.mean(power, axis=0)" % cond) # average over sources
exec("phase_lock_%s = np.mean(phase_lock, axis=0)" % cond) # average over sources
times = epochs.times
power = np.mean(power, axis=0)
phase_lock = np.mean(phase_lock, axis=0)
##########################################################################
# View time-frequency plots
plt.figure()
plt.imshow(20 * power,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=0., vmax=None, cmap='hot')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Power (%s)' % (cond,))
plt.colorbar()
plt.show()
plt.figure()
plt.imshow(phase_lock,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=0, vmax=None,
cmap='hot')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Phase-lock (%s)' % (cond))
plt.colorbar()
plt.show()
diff_phase = phase_lock_ctl_left - phase_lock_ent_left
diff_power = power_ctl_left - power_ent_left
plt.figure()
plt.imshow(diff_power,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=None, vmax=None, cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Power (%s)' % ("Difference"))
plt.colorbar()
plt.show()
plt.figure()
plt.imshow(diff_phase,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=None, vmax=None,
cmap='RdBu_r')
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.title('Phase-lock (%s)' % ("Difference"))
plt.colorbar()
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"mne.read_labels_from_annot",
"matplotlib.pyplot.imshow",
"mne.minimum_norm.source_induced_power",
"matplotlib.pyplot.colorbar",
"mne.minimum_norm.read_inverse_operator",
"socket.gethostname",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.a... | [((296, 316), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (314, 316), False, 'import socket\n'), ((518, 537), 'os.chdir', 'os.chdir', (['data_path'], {}), '(data_path)\n', (526, 537), False, 'import os\n'), ((1128, 1147), 'numpy.arange', 'np.arange', (['(6)', '(90)', '(3)'], {}), '(6, 90, 3)\n', (1137, 1147), True, 'import numpy as np\n'), ((1374, 1450), 'mne.read_epochs', 'mne.read_epochs', (["(epochs_folder + '%s_filtered_ica_mc_tsss-epo.fif' % subject)"], {}), "(epochs_folder + '%s_filtered_ica_mc_tsss-epo.fif' % subject)\n", (1389, 1450), False, 'import mne\n'), ((1806, 1864), 'mne.minimum_norm.read_inverse_operator', 'read_inverse_operator', (["(mne_folder + '%s-inv.fif' % subject)"], {}), "(mne_folder + '%s-inv.fif' % subject)\n", (1827, 1864), False, 'from mne.minimum_norm import read_inverse_operator, source_induced_power\n'), ((1874, 1964), 'mne.read_labels_from_annot', 'mne.read_labels_from_annot', (['subject'], {'parc': '"""PALS_B12_Lobes"""', 'subjects_dir': 'subjects_dir'}), "(subject, parc='PALS_B12_Lobes', subjects_dir=\n subjects_dir)\n", (1900, 1964), False, 'import mne\n'), ((3529, 3541), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3539, 3541), True, 'import matplotlib.pyplot as plt\n'), ((3542, 3703), 'matplotlib.pyplot.imshow', 'plt.imshow', (['diff_power'], {'extent': '[times[0], times[-1], frequencies[0], frequencies[-1]]', 'aspect': '"""auto"""', 'origin': '"""lower"""', 'vmin': 'None', 'vmax': 'None', 'cmap': '"""RdBu_r"""'}), "(diff_power, extent=[times[0], times[-1], frequencies[0],\n frequencies[-1]], aspect='auto', origin='lower', vmin=None, vmax=None,\n cmap='RdBu_r')\n", (3552, 3703), True, 'import matplotlib.pyplot as plt\n'), ((3718, 3740), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (3728, 3740), True, 'import matplotlib.pyplot as plt\n'), ((3741, 3769), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (3751, 3769), True, 'import matplotlib.pyplot as plt\n'), ((3770, 3808), 'matplotlib.pyplot.title', 'plt.title', (["('Power (%s)' % 'Difference')"], {}), "('Power (%s)' % 'Difference')\n", (3779, 3808), True, 'import matplotlib.pyplot as plt\n'), ((3811, 3825), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3823, 3825), True, 'import matplotlib.pyplot as plt\n'), ((3826, 3836), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3834, 3836), True, 'import matplotlib.pyplot as plt\n'), ((3838, 3850), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3848, 3850), True, 'import matplotlib.pyplot as plt\n'), ((3851, 4012), 'matplotlib.pyplot.imshow', 'plt.imshow', (['diff_phase'], {'extent': '[times[0], times[-1], frequencies[0], frequencies[-1]]', 'aspect': '"""auto"""', 'origin': '"""lower"""', 'vmin': 'None', 'vmax': 'None', 'cmap': '"""RdBu_r"""'}), "(diff_phase, extent=[times[0], times[-1], frequencies[0],\n frequencies[-1]], aspect='auto', origin='lower', vmin=None, vmax=None,\n cmap='RdBu_r')\n", (3861, 4012), True, 'import matplotlib.pyplot as plt\n'), ((4038, 4060), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (4048, 4060), True, 'import matplotlib.pyplot as plt\n'), ((4061, 4089), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (4071, 4089), True, 'import matplotlib.pyplot as plt\n'), ((4090, 4133), 'matplotlib.pyplot.title', 'plt.title', (["('Phase-lock (%s)' % 'Difference')"], {}), "('Phase-lock (%s)' % 'Difference')\n", (4099, 4133), True, 'import matplotlib.pyplot as plt\n'), ((4136, 4150), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (4148, 4150), True, 'import matplotlib.pyplot as plt\n'), ((4151, 4161), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4159, 4161), True, 'import matplotlib.pyplot as plt\n'), ((2186, 2352), 'mne.minimum_norm.source_induced_power', 'source_induced_power', (['epochs[cond]', 'inverse_operator', 'frequencies', 'label'], {'baseline': '(-0.3, 0.0)', 'baseline_mode': '"""percent"""', 'n_cycles': 'n_cycles', 'n_jobs': '(1)', 'pca': '(True)'}), "(epochs[cond], inverse_operator, frequencies, label,\n baseline=(-0.3, 0.0), baseline_mode='percent', n_cycles=n_cycles,\n n_jobs=1, pca=True)\n", (2206, 2352), False, 'from mne.minimum_norm import read_inverse_operator, source_induced_power\n'), ((2563, 2585), 'numpy.mean', 'np.mean', (['power'], {'axis': '(0)'}), '(power, axis=0)\n', (2570, 2585), True, 'import numpy as np\n'), ((2603, 2630), 'numpy.mean', 'np.mean', (['phase_lock'], {'axis': '(0)'}), '(phase_lock, axis=0)\n', (2610, 2630), True, 'import numpy as np\n'), ((2747, 2759), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2757, 2759), True, 'import matplotlib.pyplot as plt\n'), ((2764, 2921), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(20 * power)'], {'extent': '[times[0], times[-1], frequencies[0], frequencies[-1]]', 'aspect': '"""auto"""', 'origin': '"""lower"""', 'vmin': '(0.0)', 'vmax': 'None', 'cmap': '"""hot"""'}), "(20 * power, extent=[times[0], times[-1], frequencies[0],\n frequencies[-1]], aspect='auto', origin='lower', vmin=0.0, vmax=None,\n cmap='hot')\n", (2774, 2921), True, 'import matplotlib.pyplot as plt\n'), ((2947, 2969), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (2957, 2969), True, 'import matplotlib.pyplot as plt\n'), ((2974, 3002), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (2984, 3002), True, 'import matplotlib.pyplot as plt\n'), ((3007, 3040), 'matplotlib.pyplot.title', 'plt.title', (["('Power (%s)' % (cond,))"], {}), "('Power (%s)' % (cond,))\n", (3016, 3040), True, 'import matplotlib.pyplot as plt\n'), ((3045, 3059), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3057, 3059), True, 'import matplotlib.pyplot as plt\n'), ((3064, 3074), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3072, 3074), True, 'import matplotlib.pyplot as plt\n'), ((3080, 3092), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3090, 3092), True, 'import matplotlib.pyplot as plt\n'), ((3097, 3252), 'matplotlib.pyplot.imshow', 'plt.imshow', (['phase_lock'], {'extent': '[times[0], times[-1], frequencies[0], frequencies[-1]]', 'aspect': '"""auto"""', 'origin': '"""lower"""', 'vmin': '(0)', 'vmax': 'None', 'cmap': '"""hot"""'}), "(phase_lock, extent=[times[0], times[-1], frequencies[0],\n frequencies[-1]], aspect='auto', origin='lower', vmin=0, vmax=None,\n cmap='hot')\n", (3107, 3252), True, 'import matplotlib.pyplot as plt\n'), ((3294, 3316), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (3304, 3316), True, 'import matplotlib.pyplot as plt\n'), ((3321, 3349), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (3331, 3349), True, 'import matplotlib.pyplot as plt\n'), ((3354, 3389), 'matplotlib.pyplot.title', 'plt.title', (["('Phase-lock (%s)' % cond)"], {}), "('Phase-lock (%s)' % cond)\n", (3363, 3389), True, 'import matplotlib.pyplot as plt\n'), ((3396, 3410), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3408, 3410), True, 'import matplotlib.pyplot as plt\n'), ((3415, 3425), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3423, 3425), True, 'import matplotlib.pyplot as plt\n')] |
import pandas as pd
import math
import time
import numpy as np
'''Find and replace NaN values'''
def est_nan(data, target_feature, reference_feature):
plotting = False # Show plots for data estimation where missing values were found
# Max number of values to use for ratio
tail_n = 100
# make sure there are values for first and last rows
if (pd.isnull(data[target_feature].iloc[-1])):
print('NaN values at end of data with length: ' + str(len(data)))
trim_at = data[target_feature].iloc[:(len(data) - 1)].last_valid_index()
row_drop_num = len(data) - trim_at
print('Dropping %d rows' % row_drop_num)
data = data.drop(data.index[trim_at: -1])
print('New length of dataset: ' + str(len(data)))
if (pd.isnull(data[target_feature].iloc[0])):
print('NaN values at beginning of data with length: ' + str(len(data)))
trim_at = data[target_feature].iloc[0:].first_valid_index()
row_drop_num = trim_at
print('Dropping %d rows' % row_drop_num)
data = data.drop(data.index[0: trim_at])
print('New length of dataset: ' + str(len(data)))
# find indexes of NaNs in A and B columns and create arrays
nanindex = data.index[data[target_feature].apply(np.isnan)]
valIndex = data.index[data[target_feature].apply(np.isfinite)]
valAIndex = data.index[data[reference_feature].apply(np.isfinite)]
dualIndex = data.index[data[target_feature].apply(np.isfinite) & data[reference_feature].apply(np.isfinite)]
df_index = data.index.values.tolist()
nindex = [df_index.index(i) for i in nanindex]
# valArray = [df_index.index(i) for i in valIndex]
# bcRatio set as 1, unless using Coindesk values to fill in NaNs
try:
# sum the last 100 values (~2 hours) of ticker data to get the conversion rate
bcRatio = (
sum(data[target_feature].ix[dualIndex].tail(tail_n)) / sum(data[reference_feature].ix[dualIndex].tail(tail_n)))
except:
bcRatio = 1
# Find nearest value function
def find_nearest(array, value):
idx = np.searchsorted(array, value, side="left")
if idx > 0 and (idx == len(array) or math.fabs(value - array[idx - 1]) < math.fabs(value - array[idx])):
return array[idx - 1]
else:
return array[idx]
nanStart = 0
nanEnd = 0
prevNanIndex = -1
for n in range(len(nindex)):
# Indices of NaN array
n_i_1t = (nindex[n] - 1)
n_i_t = nindex[n]
n_i_t1 = (nindex[n] + 1)
# Values of NaN Array
n_v_1t = data.ix[n_i_1t][reference_feature]
# If the last value in the data array is NaN
# and the next value is not NaN
if (prevNanIndex == n_i_1t) & (n_i_t1 not in nindex):
# The NaN Series ends with the next non NaN index
nanEnd = n_i_t1
placeholder = float(data.loc[nanStart, target_feature])
# The number of NaN values in the series
nanDiff = nanEnd - (nanStart + 1)
# The averaged difference in values between start of NaN series and end of NaN Series
diff = (data.ix[nanEnd][target_feature] - data.ix[nanStart][target_feature]) / (nanDiff + 1)
# For each NaN in series, replace with scaled value
for i in range(nanDiff):
# Local index of NaN series
r = i + 1
# Global index of the dataframe
row_index = nanStart + r
# Find the nearest value to serve as reference
nearestA = find_nearest(valAIndex, (row_index))
nearestB = find_nearest(valIndex, (row_index))
nnA = abs(nearestA - row_index)
nnB = abs(nearestB - row_index)
if (nnB <= nnA):
# Increment by the averaged difference
increment = r * diff
estimated = (placeholder + increment)
data.loc[row_index, target_feature] = estimated
else:
# If A is closer use the conversion rate to port over values
placeholderA = data.loc[nearestA, reference_feature]
estimated = placeholderA * float(bcRatio)
data.loc[row_index, target_feature] = estimated
# Reset Series Variables
nanStart = 0
nanEnd = 0
prevNanIndex = -1
# If the last value was NaN and so is the next
elif (prevNanIndex == n_i_1t) & (n_i_t1 in nindex):
pass
# If the last value is not NaN, but the next is, mark the start index
elif (n_i_1t not in nindex) & (n_i_t1 in nindex):
nanStart = n_i_1t
# If only one NaN is found isolated, use the preceding and folling values to fill it in
elif (n_i_t1 not in nindex) & (n_i_t1 not in nindex):
nanDiff = n_i_t1 - (n_i_1t + 1)
placeholder = float(data.loc[n_i_1t, target_feature])
diff = (data.ix[n_i_t1][target_feature] - data.ix[n_i_1t][target_feature]) / float(nanDiff + 1)
row_index = n_i_t
estimated = (data.ix[n_i_1t][target_feature] + diff) * bcRatio
data.loc[row_index, target_feature] = estimated
# Reset Series Variables
nanStart = 0
nanEnd = 0
prevNanIndex = -1
else:
print("Error matching NaN series")
nanStart = n_i_1t
# Set the index of the last NaN to the current index
prevNanIndex = nindex[n]
if plotting == True:
# print(data)
plot_results(data.index, data[target_feature], data[reference_feature])
return data
def replace_nans_noise(data, feature_columns):
for col in range(len(feature_columns)):
standard_deviation = data[feature_columns[col]].std(axis=0, skipna=True)
mean_data = data[feature_columns[col]].mean(axis=0, skipna=True)
data[feature_columns[col]] = [np.random.normal(mean_data, standard_deviation, 1)[0]
if pd.isnull(data[feature_columns[col]].iloc[row])
else data[feature_columns[col]].iloc[row]
for row in range(len(data))]
return data
# Plot results
def plot_results(X_plot, A_plot, B_plot):
plt.plot(X_plot, A_plot, 'blue', alpha=0.5)
plt.plot(X_plot, B_plot, 'red', alpha=0.5)
plt.legend(loc='lower left')
plt.show() | [
"math.fabs",
"numpy.random.normal",
"numpy.searchsorted",
"pandas.isnull"
] | [((368, 408), 'pandas.isnull', 'pd.isnull', (['data[target_feature].iloc[-1]'], {}), '(data[target_feature].iloc[-1])\n', (377, 408), True, 'import pandas as pd\n'), ((775, 814), 'pandas.isnull', 'pd.isnull', (['data[target_feature].iloc[0]'], {}), '(data[target_feature].iloc[0])\n', (784, 814), True, 'import pandas as pd\n'), ((2104, 2146), 'numpy.searchsorted', 'np.searchsorted', (['array', 'value'], {'side': '"""left"""'}), "(array, value, side='left')\n", (2119, 2146), True, 'import numpy as np\n'), ((6155, 6202), 'pandas.isnull', 'pd.isnull', (['data[feature_columns[col]].iloc[row]'], {}), '(data[feature_columns[col]].iloc[row])\n', (6164, 6202), True, 'import pandas as pd\n'), ((2192, 2225), 'math.fabs', 'math.fabs', (['(value - array[idx - 1])'], {}), '(value - array[idx - 1])\n', (2201, 2225), False, 'import math\n'), ((2228, 2257), 'math.fabs', 'math.fabs', (['(value - array[idx])'], {}), '(value - array[idx])\n', (2237, 2257), False, 'import math\n'), ((6060, 6110), 'numpy.random.normal', 'np.random.normal', (['mean_data', 'standard_deviation', '(1)'], {}), '(mean_data, standard_deviation, 1)\n', (6076, 6110), True, 'import numpy as np\n')] |
"""
Module provides methods for analyzing the statistics of a sample
(or values) generated with Monte Carlo techniques.
"""
import numpy as np
from scipy import stats
from .helper import interpret_array
def lag_auto_cov(values, k, mean=None):
if mean is None:
mean = np.mean(values)
return np.einsum('ij,ij->j',
(values[:-k] - mean), (values[k:] - mean)) / len(values)
def auto_cov(values, mean=None, variance=None):
""" Compute the lag-autocovariance of a given array of values.
:param values: Array of values.
:param mean: Previously calculated or known mean.
:param variance: Previously calculated or known variance.
:return: Numpy array containing at index k the k-lag autocovariance.
"""
size = values.shape[0]
values = interpret_array(values)
if mean is None:
mean = np.mean(values, 0)
if variance is None:
variance = np.var(values, 0)
centered = values - mean
acov = np.empty_like(values)
acov[0] = variance
for k in range(1, size):
acov[k] = np.einsum('ij,ij->j', centered[:-k], centered[k:]) / size
return acov
def auto_corr(values, mean=None, variance=None):
""" Compute the autocorrelation of a given array of values. """
acov = auto_cov(values, mean, variance)
return acov / acov[0]
def effective_sample_size(sample, mean, var):
""" Estimate the effective sample size of a auto-correlated Markov sample.
Estimated according to http://arxiv.org/abs/1111.4246
:param sample: Sample object.
:param mean: Mean of distribution, do not approximate via current sample!
:param var: Variance of distribution, do not approximate via current sample!
:return: Estimate of effective sample size.
"""
mean = interpret_array(mean, sample.ndim)
var = interpret_array(var, sample.ndim)
sum = np.zeros(sample.ndim)
unbias = sample.size / (sample.size - np.arange(sample.size))
acor = auto_corr(sample.data, mean, var) * unbias[:, np.newaxis]
for dim in range(sample.ndim):
lag = 1
rho = acor[lag, dim]
while rho >= 0.05 and lag < sample.size - 1:
sum[dim] = sum[dim] + (1 - lag / sample.size) * rho
lag = lag + 1
rho = acor[lag, dim]
return sample.size / (1 + 2 * sum)
def fd_bins(sample):
""" Estimates a good bin width. Use with caution.
Use Freedman Diaconis Estimator with scaling like in sec 3.4 Eqn 3.61 (p83):
<NAME>. (1992),
Multivariate Density Estimation: Theory, Practice, and Visualization
"""
mins = np.min(sample.data, axis=0)
maxs = np.max(sample.data, axis=0)
# h=2IQR(x)N^{−1/3} -> N^{-1/(2 + d)}
widths = (2 * stats.iqr(sample.data.transpose(), axis=1) *
sample.size ** (- 1 / (2 + sample.ndim)))
bins = np.ceil((maxs - mins) / widths).astype(np.int)
bins = np.maximum(1, bins)
return np.asanyarray(bins).astype(np.int)
def bin_wise_chi2(sample, bins=None, bin_range=None,
min_count=10, int_steps=100):
""" Compute the bin-wise chi^2 / dof value for a given number of bins.
If the distribution of points in each bin follows a Poisson distribution
with the expectation value according to the probability distribution,
the returned value follows a chi squared distribution with expectation
value 1.
"""
try:
pdf = sample.target.pdf
except AttributeError:
def pdf(xs):
prob = np.empty(xs.shape[0])
for j, x in zip(range(prob.size), xs):
prob[i] = sample.target(x)
return prob
if bins is None:
bins = fd_bins(sample)
count, edges = np.histogramdd(sample.data, bins, bin_range)
edges: tuple # type hint for pycharm
relevant = np.where(count >= min_count)
expected = np.empty(len(relevant[0]))
vol = np.prod([edges[d][1] - edges[d][0] for d in range(sample.ndim)])
i = 0
for mi in zip(*relevant):
low = [edges[d][mi[d]] for d in range(sample.ndim)]
high = [edges[d][mi[d]+1] for d in range(sample.ndim)]
expected[i] = np.mean(pdf(np.random.uniform(
low, high, (int_steps, sample.ndim)))) * sample.size * vol
i += 1
finals = np.where(expected >= min_count)[0]
f_obs = count[relevant][finals]
chi2, p = stats.chisquare(f_obs, f_exp=expected[finals])
if len(f_obs > 0):
return chi2 / (f_obs.size-1), p, f_obs.size
return None, None, None
| [
"numpy.random.uniform",
"numpy.maximum",
"numpy.ceil",
"numpy.empty",
"numpy.asanyarray",
"numpy.zeros",
"numpy.empty_like",
"numpy.histogramdd",
"numpy.einsum",
"numpy.min",
"numpy.max",
"numpy.where",
"numpy.mean",
"numpy.arange",
"numpy.var",
"scipy.stats.chisquare"
] | [((981, 1002), 'numpy.empty_like', 'np.empty_like', (['values'], {}), '(values)\n', (994, 1002), True, 'import numpy as np\n'), ((1872, 1893), 'numpy.zeros', 'np.zeros', (['sample.ndim'], {}), '(sample.ndim)\n', (1880, 1893), True, 'import numpy as np\n'), ((2605, 2632), 'numpy.min', 'np.min', (['sample.data'], {'axis': '(0)'}), '(sample.data, axis=0)\n', (2611, 2632), True, 'import numpy as np\n'), ((2644, 2671), 'numpy.max', 'np.max', (['sample.data'], {'axis': '(0)'}), '(sample.data, axis=0)\n', (2650, 2671), True, 'import numpy as np\n'), ((2902, 2921), 'numpy.maximum', 'np.maximum', (['(1)', 'bins'], {}), '(1, bins)\n', (2912, 2921), True, 'import numpy as np\n'), ((3714, 3758), 'numpy.histogramdd', 'np.histogramdd', (['sample.data', 'bins', 'bin_range'], {}), '(sample.data, bins, bin_range)\n', (3728, 3758), True, 'import numpy as np\n'), ((3816, 3844), 'numpy.where', 'np.where', (['(count >= min_count)'], {}), '(count >= min_count)\n', (3824, 3844), True, 'import numpy as np\n'), ((4364, 4410), 'scipy.stats.chisquare', 'stats.chisquare', (['f_obs'], {'f_exp': 'expected[finals]'}), '(f_obs, f_exp=expected[finals])\n', (4379, 4410), False, 'from scipy import stats\n'), ((282, 297), 'numpy.mean', 'np.mean', (['values'], {}), '(values)\n', (289, 297), True, 'import numpy as np\n'), ((309, 369), 'numpy.einsum', 'np.einsum', (['"""ij,ij->j"""', '(values[:-k] - mean)', '(values[k:] - mean)'], {}), "('ij,ij->j', values[:-k] - mean, values[k:] - mean)\n", (318, 369), True, 'import numpy as np\n'), ((860, 878), 'numpy.mean', 'np.mean', (['values', '(0)'], {}), '(values, 0)\n', (867, 878), True, 'import numpy as np\n'), ((923, 940), 'numpy.var', 'np.var', (['values', '(0)'], {}), '(values, 0)\n', (929, 940), True, 'import numpy as np\n'), ((4279, 4310), 'numpy.where', 'np.where', (['(expected >= min_count)'], {}), '(expected >= min_count)\n', (4287, 4310), True, 'import numpy as np\n'), ((1073, 1123), 'numpy.einsum', 'np.einsum', (['"""ij,ij->j"""', 'centered[:-k]', 'centered[k:]'], {}), "('ij,ij->j', centered[:-k], centered[k:])\n", (1082, 1123), True, 'import numpy as np\n'), ((1937, 1959), 'numpy.arange', 'np.arange', (['sample.size'], {}), '(sample.size)\n', (1946, 1959), True, 'import numpy as np\n'), ((2844, 2875), 'numpy.ceil', 'np.ceil', (['((maxs - mins) / widths)'], {}), '((maxs - mins) / widths)\n', (2851, 2875), True, 'import numpy as np\n'), ((2933, 2952), 'numpy.asanyarray', 'np.asanyarray', (['bins'], {}), '(bins)\n', (2946, 2952), True, 'import numpy as np\n'), ((3502, 3523), 'numpy.empty', 'np.empty', (['xs.shape[0]'], {}), '(xs.shape[0])\n', (3510, 3523), True, 'import numpy as np\n'), ((4161, 4215), 'numpy.random.uniform', 'np.random.uniform', (['low', 'high', '(int_steps, sample.ndim)'], {}), '(low, high, (int_steps, sample.ndim))\n', (4178, 4215), True, 'import numpy as np\n')] |
import gym
import simple_environments # NOQA
import dqn
import rl_loop
from ngraph.frontends import neon
import numpy as np
def model(action_axes):
return neon.Sequential([
neon.Affine(
nout=10,
weight_init=neon.GlorotInit(),
bias_init=neon.ConstantInit(),
activation=neon.Tanh(),
),
neon.Affine(
weight_init=neon.GlorotInit(),
bias_init=neon.ConstantInit(),
activation=neon.Tanh(),
axes=(action_axes, )
),
])
def test_dependent_environment():
environment = gym.make('DependentEnv-v0')
total_rewards = []
for i in range(10):
agent = dqn.Agent(
dqn.space_shape(environment.observation_space),
environment.action_space,
model=model,
epsilon=dqn.decay_generator(start=1.0, decay=0.995, minimum=0.1),
gamma=0.99,
learning_rate=0.1,
)
rl_loop.rl_loop_train(environment, agent, episodes=10)
total_rewards.append(
rl_loop.evaluate_single_episode(environment, agent)
)
# most of these 10 agents will be able to converge to the perfect policy
assert np.mean(np.array(total_rewards) == 100) >= 0.5
if __name__ == "__main__":
test_dependent_environment()
| [
"gym.make",
"ngraph.frontends.neon.Tanh",
"ngraph.frontends.neon.ConstantInit",
"numpy.array",
"dqn.decay_generator",
"dqn.space_shape",
"rl_loop.rl_loop_train",
"ngraph.frontends.neon.GlorotInit",
"rl_loop.evaluate_single_episode"
] | [((603, 630), 'gym.make', 'gym.make', (['"""DependentEnv-v0"""'], {}), "('DependentEnv-v0')\n", (611, 630), False, 'import gym\n'), ((981, 1035), 'rl_loop.rl_loop_train', 'rl_loop.rl_loop_train', (['environment', 'agent'], {'episodes': '(10)'}), '(environment, agent, episodes=10)\n', (1002, 1035), False, 'import rl_loop\n'), ((718, 764), 'dqn.space_shape', 'dqn.space_shape', (['environment.observation_space'], {}), '(environment.observation_space)\n', (733, 764), False, 'import dqn\n'), ((1079, 1130), 'rl_loop.evaluate_single_episode', 'rl_loop.evaluate_single_episode', (['environment', 'agent'], {}), '(environment, agent)\n', (1110, 1130), False, 'import rl_loop\n'), ((849, 905), 'dqn.decay_generator', 'dqn.decay_generator', ([], {'start': '(1.0)', 'decay': '(0.995)', 'minimum': '(0.1)'}), '(start=1.0, decay=0.995, minimum=0.1)\n', (868, 905), False, 'import dqn\n'), ((1238, 1261), 'numpy.array', 'np.array', (['total_rewards'], {}), '(total_rewards)\n', (1246, 1261), True, 'import numpy as np\n'), ((246, 263), 'ngraph.frontends.neon.GlorotInit', 'neon.GlorotInit', ([], {}), '()\n', (261, 263), False, 'from ngraph.frontends import neon\n'), ((287, 306), 'ngraph.frontends.neon.ConstantInit', 'neon.ConstantInit', ([], {}), '()\n', (304, 306), False, 'from ngraph.frontends import neon\n'), ((331, 342), 'ngraph.frontends.neon.Tanh', 'neon.Tanh', ([], {}), '()\n', (340, 342), False, 'from ngraph.frontends import neon\n'), ((400, 417), 'ngraph.frontends.neon.GlorotInit', 'neon.GlorotInit', ([], {}), '()\n', (415, 417), False, 'from ngraph.frontends import neon\n'), ((441, 460), 'ngraph.frontends.neon.ConstantInit', 'neon.ConstantInit', ([], {}), '()\n', (458, 460), False, 'from ngraph.frontends import neon\n'), ((485, 496), 'ngraph.frontends.neon.Tanh', 'neon.Tanh', ([], {}), '()\n', (494, 496), False, 'from ngraph.frontends import neon\n')] |
# Imports
import matplotlib.pyplot as plt
import pysal.lib as lp
import numpy as np
import geopandas as gpd
from pysal.explore.esda.moran import Moran_BV_matrix
from pysal.viz.splot.esda import moran_facet
# Load data and calculate Moran Local statistics
f = gpd.read_file(lp.examples.get_path("sids2.dbf"))
varnames = ['SIDR74', 'SIDR79', 'NWR74', 'NWR79']
vars = [np.array(f[var]) for var in varnames]
w = lp.io.open(lp.examples.get_path("sids2.gal")).read()
moran_matrix = Moran_BV_matrix(vars, w, varnames = varnames)
# Plot
fig, axarr = moran_facet(moran_matrix)
plt.show()
# Customize plot
fig, axarr = moran_facet(moran_matrix,
fitline_bv_kwds=dict(color='#4393c3'))
plt.show()
| [
"pysal.lib.examples.get_path",
"matplotlib.pyplot.show",
"numpy.array",
"pysal.explore.esda.moran.Moran_BV_matrix",
"pysal.viz.splot.esda.moran_facet"
] | [((482, 525), 'pysal.explore.esda.moran.Moran_BV_matrix', 'Moran_BV_matrix', (['vars', 'w'], {'varnames': 'varnames'}), '(vars, w, varnames=varnames)\n', (497, 525), False, 'from pysal.explore.esda.moran import Moran_BV_matrix\n'), ((552, 577), 'pysal.viz.splot.esda.moran_facet', 'moran_facet', (['moran_matrix'], {}), '(moran_matrix)\n', (563, 577), False, 'from pysal.viz.splot.esda import moran_facet\n'), ((578, 588), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (586, 588), True, 'import matplotlib.pyplot as plt\n'), ((711, 721), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (719, 721), True, 'import matplotlib.pyplot as plt\n'), ((276, 309), 'pysal.lib.examples.get_path', 'lp.examples.get_path', (['"""sids2.dbf"""'], {}), "('sids2.dbf')\n", (296, 309), True, 'import pysal.lib as lp\n'), ((372, 388), 'numpy.array', 'np.array', (['f[var]'], {}), '(f[var])\n', (380, 388), True, 'import numpy as np\n'), ((425, 458), 'pysal.lib.examples.get_path', 'lp.examples.get_path', (['"""sids2.gal"""'], {}), "('sids2.gal')\n", (445, 458), True, 'import pysal.lib as lp\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 13 13:01:38 2014
Downsample (or upsample) a curve defined as :
| v[0] | v[1] | ... | v[nz-1] |
z[0] z[1] z[2] z[nz-1] z[nz]
| | ... | |
v v v v
zp[0] zp[1] zp[nz-2] zp[nz-1]
To obtain :
| vp[0] | vp[0] | ... | vp[nzFilt-1] |
zFilt[0] zFilt[1] zFilt[2] zFilt[nzFilt-1] zFilt[nzFilt]
| | ... | |
v v v v
zFiltp[0] zFiltp[1] zFiltp[nzFilt-2] zFiltp[nzFilt-1]
If nzFilt < nz => downsample
If nzFilt > nz => upsample
vp in intrapolated by a linear approximation
@author: alexis dot bottero at gmail dot com
"""
import numpy as np # NumPy (multidimensional arrays, linear algebra, ...)
import matplotlib.pyplot as plt
def find_interval(array, value):
""" Returns the index idxInf and idxSup of array verifying :
array[idxInf] < value < array[idxSup] """
n = [abs(i-value) for i in array]
idx = n.index(min(n))
idxInf=-1
idxSup=-1
if value < array.min() or value > array.max():
idxInf=-1
idxSup=-1
elif array[idx] >= value and idx != 0:
idxInf=idx-1
idxSup=idx
else:
idxInf=idx
idxSup=idx+1
return idxInf,idxSup
def reSample(zp,v,nzFilt):
""" Downsample (or upsample) a curve defined as :
| v[0] | v[1] | ... | v[nz-1] |
z[0] z[1] z[2] z[nz-1] z[nz]
| | ... | |
v v v v
zp[0] zp[1] zp[nz-2] zp[nz-1]
To obtain :
| vp[0] | vp[0] | ... | vp[nzFilt-1] |
zFilt[0] zFilt[1] zFilt[2] zFilt[nzFilt-1] zFilt[nzFilt]
| | ... | |
v v v v
zFiltp[0] zFiltp[1] zFiltp[nzFilt-2] zFiltp[nzFilt-1]
If nzFilt < nz => downsample
If nzFilt > nz => upsample
vp in intrapolated by a linear approximation
@author: alexis dot bottero at gmail dot com
"""
zFilt=np.linspace(zp.min(),zp.max(),nzFilt)
zFiltp=np.zeros(nzFilt-1)
for i in np.arange(1,len(zFilt)):
zFiltp[i-1]=zFilt[i-1]+(zFilt[i]-zFilt[i-1])/2.0
vFilt=np.zeros(nzFilt-1)
for i in np.arange(0,len(vFilt)):
idxInf,idxSup=find_interval(zp,zFiltp[i])
vFilt[i]=(v[idxSup]-v[idxInf])/(zp[idxSup]-zp[idxInf])*(zFiltp[i]-zp[idxInf])+v[idxInf]
return zFiltp,vFilt
#zmin=0.0
#zmax=10.0
#nz=150 # Number of points describing the curve that we want to downsample
#nzFilt=50 # Number of border points in the downsampled curve (nzFilt < nz)
#z=np.linspace(zmin,zmax,nz)
#zp=np.zeros(nz-1)
#for i in np.arange(1,len(z)):
# zp[i-1]=z[i-1]+(z[i]-z[i-1])/2.0
#v=3.0*np.cos(zp/2.5)+4000.0;
#
#zFiltp,vFilt = reSample(zp,v,nzFilt)
#
#plt.hold(True)
#plt.plot(zp,v,'x',color='blue')
#plt.plot(zFiltp,vFilt,'x',color='red')
logP=np.loadtxt("VP-botteroRS4.txt")
logS=np.loadtxt("VS-botteroRS4.txt")
zmin=0.0
zmax=1219.051181
nz=len(logP)
z=np.linspace(zmin,zmax,nz)
zp=np.zeros(nz)
dz=(z[1]-z[0])/2.0
for i in np.arange(0,len(z)):
zp[i]=z[i]+dz
nzFilt=2049
zFiltp,logPfilt = reSample(zp,logP,nzFilt)
zFiltp,logSfilt = reSample(zp,logS,nzFilt)
plt.figure()
plt.hold(True)
plt.plot(zp,logP,color='blue')
plt.plot(zFiltp,logPfilt,color='red')
plt.figure()
plt.hold(True)
plt.plot(zp,logS,color='blue')
plt.plot(zFiltp,logSfilt,color='red')
np.savetxt("logPreal.txt", np.vstack([zFiltp,logPfilt]).T, delimiter=" ")
np.savetxt("logSreal.txt", np.vstack([zFiltp,logSfilt]).T, delimiter=" ")
| [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.hold",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.loadtxt",
"numpy.linspace",
"numpy.vstack"
] | [((3641, 3672), 'numpy.loadtxt', 'np.loadtxt', (['"""VP-botteroRS4.txt"""'], {}), "('VP-botteroRS4.txt')\n", (3651, 3672), True, 'import numpy as np\n'), ((3678, 3709), 'numpy.loadtxt', 'np.loadtxt', (['"""VS-botteroRS4.txt"""'], {}), "('VS-botteroRS4.txt')\n", (3688, 3709), True, 'import numpy as np\n'), ((3751, 3778), 'numpy.linspace', 'np.linspace', (['zmin', 'zmax', 'nz'], {}), '(zmin, zmax, nz)\n', (3762, 3778), True, 'import numpy as np\n'), ((3780, 3792), 'numpy.zeros', 'np.zeros', (['nz'], {}), '(nz)\n', (3788, 3792), True, 'import numpy as np\n'), ((3960, 3972), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3970, 3972), True, 'import matplotlib.pyplot as plt\n'), ((3973, 3987), 'matplotlib.pyplot.hold', 'plt.hold', (['(True)'], {}), '(True)\n', (3981, 3987), True, 'import matplotlib.pyplot as plt\n'), ((3988, 4020), 'matplotlib.pyplot.plot', 'plt.plot', (['zp', 'logP'], {'color': '"""blue"""'}), "(zp, logP, color='blue')\n", (3996, 4020), True, 'import matplotlib.pyplot as plt\n'), ((4019, 4058), 'matplotlib.pyplot.plot', 'plt.plot', (['zFiltp', 'logPfilt'], {'color': '"""red"""'}), "(zFiltp, logPfilt, color='red')\n", (4027, 4058), True, 'import matplotlib.pyplot as plt\n'), ((4057, 4069), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4067, 4069), True, 'import matplotlib.pyplot as plt\n'), ((4070, 4084), 'matplotlib.pyplot.hold', 'plt.hold', (['(True)'], {}), '(True)\n', (4078, 4084), True, 'import matplotlib.pyplot as plt\n'), ((4085, 4117), 'matplotlib.pyplot.plot', 'plt.plot', (['zp', 'logS'], {'color': '"""blue"""'}), "(zp, logS, color='blue')\n", (4093, 4117), True, 'import matplotlib.pyplot as plt\n'), ((4116, 4155), 'matplotlib.pyplot.plot', 'plt.plot', (['zFiltp', 'logSfilt'], {'color': '"""red"""'}), "(zFiltp, logSfilt, color='red')\n", (4124, 4155), True, 'import matplotlib.pyplot as plt\n'), ((2830, 2850), 'numpy.zeros', 'np.zeros', (['(nzFilt - 1)'], {}), '(nzFilt - 1)\n', (2838, 2850), True, 'import numpy as np\n'), ((2954, 2974), 'numpy.zeros', 'np.zeros', (['(nzFilt - 1)'], {}), '(nzFilt - 1)\n', (2962, 2974), True, 'import numpy as np\n'), ((4182, 4211), 'numpy.vstack', 'np.vstack', (['[zFiltp, logPfilt]'], {}), '([zFiltp, logPfilt])\n', (4191, 4211), True, 'import numpy as np\n'), ((4256, 4285), 'numpy.vstack', 'np.vstack', (['[zFiltp, logSfilt]'], {}), '([zFiltp, logSfilt])\n', (4265, 4285), True, 'import numpy as np\n')] |
import numpy as np
lines = np.array([[57,106,177], [218,124,48], [62,150,81], [204,37,41], [83,81,84], [107,76,154], [146,36,40], [148,139,61]], dtype='float')/255
bars = np.array([[114,147,203], [225,151,76], [132,186,91], [211,94,96], [128,133,133], [144,103,167], [171,104,87], [204,194,16]], dtype='float')/255
# These look better in print
vlines = ['cornflowerblue', 'green', 'firebrick', 'orange', 'black', 'indigo']
vbars = ['steelblue', 'darkgreen', 'darkred', 'darkorange', 'grey', 'mediumvioletred']
lines = np.hstack((lines, np.ones((lines.shape[0],1))))
bars = np.hstack((bars, np.ones((bars.shape[0],1))))
def darken(c,power=2):
co = np.array(c).copy()
co = np.clip(co**power, 0, 1.0)
co[-1] = c[-1]
return co
def lighten(c, power=2):
co = 1.0-np.array(c).copy()
co = darken(co, power)
return 1.0-co | [
"numpy.array",
"numpy.ones",
"numpy.clip"
] | [((28, 180), 'numpy.array', 'np.array', (['[[57, 106, 177], [218, 124, 48], [62, 150, 81], [204, 37, 41], [83, 81, 84],\n [107, 76, 154], [146, 36, 40], [148, 139, 61]]'], {'dtype': '"""float"""'}), "([[57, 106, 177], [218, 124, 48], [62, 150, 81], [204, 37, 41], [83,\n 81, 84], [107, 76, 154], [146, 36, 40], [148, 139, 61]], dtype='float')\n", (36, 180), True, 'import numpy as np\n'), ((172, 337), 'numpy.array', 'np.array', (['[[114, 147, 203], [225, 151, 76], [132, 186, 91], [211, 94, 96], [128, 133,\n 133], [144, 103, 167], [171, 104, 87], [204, 194, 16]]'], {'dtype': '"""float"""'}), "([[114, 147, 203], [225, 151, 76], [132, 186, 91], [211, 94, 96], [\n 128, 133, 133], [144, 103, 167], [171, 104, 87], [204, 194, 16]], dtype\n ='float')\n", (180, 337), True, 'import numpy as np\n'), ((683, 711), 'numpy.clip', 'np.clip', (['(co ** power)', '(0)', '(1.0)'], {}), '(co ** power, 0, 1.0)\n', (690, 711), True, 'import numpy as np\n'), ((539, 567), 'numpy.ones', 'np.ones', (['(lines.shape[0], 1)'], {}), '((lines.shape[0], 1))\n', (546, 567), True, 'import numpy as np\n'), ((593, 620), 'numpy.ones', 'np.ones', (['(bars.shape[0], 1)'], {}), '((bars.shape[0], 1))\n', (600, 620), True, 'import numpy as np\n'), ((655, 666), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (663, 666), True, 'import numpy as np\n'), ((782, 793), 'numpy.array', 'np.array', (['c'], {}), '(c)\n', (790, 793), True, 'import numpy as np\n')] |
import cv2 as cv
import numpy as np
import wget
from os import mkdir, path
from os.path import join, abspath, dirname, exists
file_path = abspath(__file__)
file_parent_dir = dirname(file_path)
config_dir = join(file_parent_dir, 'config')
inputs_dir = join(file_parent_dir, 'inputs')
yolo_weights_path = join(config_dir, 'yolov3.weights')
yolo_names_path = join(config_dir, 'coco.names')
yolo_config_path = join(config_dir, 'yolov3.cfg')
input_image = join(inputs_dir, 'kemy.jpg')
net = cv.dnn.readNet(yolo_weights_path, yolo_config_path)
#To load all objects that have to be detected
classes=[]
with open(yolo_names_path,"r") as file_object:
lines = file_object.readlines()
for line in lines:
classes.append(line.strip("\n"))
#Defining layer names
layer_names = net.getLayerNames()
output_layers = []
for i in net.getUnconnectedOutLayers():
output_layers.append(layer_names[i[0]-1])
img = cv.imread(input_image)
height, width, channels = img.shape
#Extracting features to detect objects
blob = cv.dnn.blobFromImage(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
#We need to pass the img_blob to the algorithm
net.setInput(blob)
outs = net.forward(output_layers)
#Displaying information on the screen
class_ids = []
confidences = []
boxes = []
for output in outs:
for detection in output:
#Detecting confidence in 3 steps
scores = detection[5:] #1
class_id = np.argmax(scores) #2
confidence = scores[class_id] #3
if confidence > 0.5: #Means if the object is detected
center_x = int(detection[0]*width)
center_y = int(detection[1]*height)
w = int(detection[2]*width)
h = int(detection[3]*height)
#Drawing a rectangle
x = int(center_x-w/2) # top left value
y = int(center_y-h/2) # top left value
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
# cv.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
#Removing Double Boxes
indexes = cv.dnn.NMSBoxes(boxes, confidences, 0.3, 0.4)
for i in range(len(boxes)):
if i in indexes[0]:
x, y, w, h = boxes[i]
label = classes[class_ids[i]] # name of the objects
cv.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv.putText(img, label, (x, y), cv.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)
title = f"Found <persons={indexes[0]}>"
cv.imshow(title, img)
cv.waitKey(0)
cv.destroyAllWindows() | [
"os.path.abspath",
"cv2.putText",
"cv2.dnn.NMSBoxes",
"numpy.argmax",
"cv2.waitKey",
"cv2.destroyAllWindows",
"os.path.dirname",
"cv2.dnn.blobFromImage",
"cv2.dnn.readNet",
"cv2.imread",
"cv2.rectangle",
"cv2.imshow",
"os.path.join"
] | [((139, 156), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (146, 156), False, 'from os.path import join, abspath, dirname, exists\n'), ((175, 193), 'os.path.dirname', 'dirname', (['file_path'], {}), '(file_path)\n', (182, 193), False, 'from os.path import join, abspath, dirname, exists\n'), ((207, 238), 'os.path.join', 'join', (['file_parent_dir', '"""config"""'], {}), "(file_parent_dir, 'config')\n", (211, 238), False, 'from os.path import join, abspath, dirname, exists\n'), ((252, 283), 'os.path.join', 'join', (['file_parent_dir', '"""inputs"""'], {}), "(file_parent_dir, 'inputs')\n", (256, 283), False, 'from os.path import join, abspath, dirname, exists\n'), ((304, 338), 'os.path.join', 'join', (['config_dir', '"""yolov3.weights"""'], {}), "(config_dir, 'yolov3.weights')\n", (308, 338), False, 'from os.path import join, abspath, dirname, exists\n'), ((357, 387), 'os.path.join', 'join', (['config_dir', '"""coco.names"""'], {}), "(config_dir, 'coco.names')\n", (361, 387), False, 'from os.path import join, abspath, dirname, exists\n'), ((407, 437), 'os.path.join', 'join', (['config_dir', '"""yolov3.cfg"""'], {}), "(config_dir, 'yolov3.cfg')\n", (411, 437), False, 'from os.path import join, abspath, dirname, exists\n'), ((452, 480), 'os.path.join', 'join', (['inputs_dir', '"""kemy.jpg"""'], {}), "(inputs_dir, 'kemy.jpg')\n", (456, 480), False, 'from os.path import join, abspath, dirname, exists\n'), ((488, 539), 'cv2.dnn.readNet', 'cv.dnn.readNet', (['yolo_weights_path', 'yolo_config_path'], {}), '(yolo_weights_path, yolo_config_path)\n', (502, 539), True, 'import cv2 as cv\n'), ((906, 928), 'cv2.imread', 'cv.imread', (['input_image'], {}), '(input_image)\n', (915, 928), True, 'import cv2 as cv\n'), ((1012, 1087), 'cv2.dnn.blobFromImage', 'cv.dnn.blobFromImage', (['img', '(0.00392)', '(416, 416)', '(0, 0, 0)', '(True)'], {'crop': '(False)'}), '(img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\n', (1032, 1087), True, 'import cv2 as cv\n'), ((2112, 2157), 'cv2.dnn.NMSBoxes', 'cv.dnn.NMSBoxes', (['boxes', 'confidences', '(0.3)', '(0.4)'], {}), '(boxes, confidences, 0.3, 0.4)\n', (2127, 2157), True, 'import cv2 as cv\n'), ((2498, 2519), 'cv2.imshow', 'cv.imshow', (['title', 'img'], {}), '(title, img)\n', (2507, 2519), True, 'import cv2 as cv\n'), ((2520, 2533), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (2530, 2533), True, 'import cv2 as cv\n'), ((2534, 2556), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (2554, 2556), True, 'import cv2 as cv\n'), ((1428, 1445), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (1437, 1445), True, 'import numpy as np\n'), ((2318, 2375), 'cv2.rectangle', 'cv.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (2330, 2375), True, 'import cv2 as cv\n'), ((2384, 2456), 'cv2.putText', 'cv.putText', (['img', 'label', '(x, y)', 'cv.FONT_HERSHEY_PLAIN', '(1)', '(0, 0, 255)', '(2)'], {}), '(img, label, (x, y), cv.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2)\n', (2394, 2456), True, 'import cv2 as cv\n')] |
import numpy as np
from typing import Dict
from alibi_detect.utils.sampling import reservoir_sampling
def update_reference(X_ref: np.ndarray,
X: np.ndarray,
n: int,
update_method: Dict[str, int] = None,
) -> np.ndarray:
"""
Update reference dataset for drift detectors.
Parameters
----------
X_ref
Current reference dataset.
X
New data.
n
Count of the total number of instances that have been used so far.
update_method
Dict with as key `reservoir_sampling` or `last` and as value n. `reservoir_sampling` will apply
reservoir sampling with reservoir of size n while `last` will return (at most) the last n instances.
Returns
-------
Updated reference dataset.
"""
if isinstance(update_method, dict):
update_type = list(update_method.keys())[0]
size = update_method[update_type]
if update_type == 'reservoir_sampling':
return reservoir_sampling(X_ref, X, size, n)
elif update_type == 'last':
X_update = np.concatenate([X_ref, X], axis=0)
return X_update[-size:]
else:
raise KeyError('Only `reservoir_sampling` and `last` are valid update options for X_ref.')
else:
return X_ref
| [
"numpy.concatenate",
"alibi_detect.utils.sampling.reservoir_sampling"
] | [((1041, 1078), 'alibi_detect.utils.sampling.reservoir_sampling', 'reservoir_sampling', (['X_ref', 'X', 'size', 'n'], {}), '(X_ref, X, size, n)\n', (1059, 1078), False, 'from alibi_detect.utils.sampling import reservoir_sampling\n'), ((1138, 1172), 'numpy.concatenate', 'np.concatenate', (['[X_ref, X]'], {'axis': '(0)'}), '([X_ref, X], axis=0)\n', (1152, 1172), True, 'import numpy as np\n')] |
# game.py
#
# Author: <NAME>
# Created On: 01 Feb 2019
import pygame
from . import objects
from . import maze
from . import game_logic
from . import game_rendering
from . import ai
import os.path
import numpy as np
COLORS = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
(0, 255, 255), (255, 0, 255), (255, 255, 255), (0, 0, 0)]
LOGIC_INTERVAL = 100.0
def process_human_input(human):
human.move[:] = 0
pressed = pygame.key.get_pressed()
if pressed[pygame.K_UP]:
human.move[1] = -1
elif pressed[pygame.K_DOWN]:
human.move[1] = 1
elif pressed[pygame.K_LEFT]:
human.move[0] = -1
elif pressed[pygame.K_RIGHT]:
human.move[0] = 1
human.drop_bomb = False
if pressed[pygame.K_SPACE]:
human.drop_bomb = True
def spawn_points(map):
width, height = map.size
return [(0, 0), (width - 1, 0),
(0, height - 1), (width - 1, height - 1),
(0, int(height / 2)), (int(width / 2), 0),
(width - 1, int(height / 2)), (int(width / 2), height - 1)]
def find_spawn_point(start, player, map):
player.pos = np.array(start, dtype=np.int)
points = [(start[0], start[1])]
visited = {}
found = False
while not found and points:
point = points[0]
points = points[1:]
visited[point] = True
if not map.is_blocked(point):
found = True
player.pos = np.array(point, dtype=np.int)
else:
neighs = [(1, 0), (-1, 0), (0, 1), (0, -1)]
for n in neighs:
npos = (point[0] + n[0], point[1] + n[1])
if npos not in visited and map.is_valid(npos):
points.append(npos)
def recolor_player(sprite, id):
result = sprite.copy()
tmp = pygame.surfarray.pixels3d(result)
tmp_mask = tmp == 255
tmp_mask = np.logical_and(tmp_mask[:, :, 0], tmp_mask[:, :, 2])
tmp[tmp_mask] = np.array(COLORS[id])
return result
def run():
root_dir = os.path.dirname(os.path.realpath(__file__))
asset_dir = os.path.join(root_dir, 'assets')
stone_file = os.path.join(root_dir, 'assets/stoneblock.png')
grass_file = os.path.join(root_dir, 'assets/grass.png')
bomb_file = os.path.join(root_dir, 'assets/bomb.png')
explosion_file = os.path.join(root_dir, 'assets/explosion.png')
pygame.init()
grid_size = (19, 19)
tile_size = (30, 30)
screen_size = (grid_size[0] * tile_size[0], grid_size[1] * tile_size[1])
# create pygame window
screen = pygame.display.set_mode(screen_size)
# create tile map
world = objects.World()
world.map = objects.TileMap(grid_size, tile_size)
maze.generate(world.map)
world.players = [objects.Player(i) for i in range(4)]
spawns = spawn_points(world.map)
for s, p in zip(spawns, world.players):
find_spawn_point(s, p, world.map)
world.bombs = []
world.explosions = []
human = world.players[0]
ais = [ai.AI(p) for p in world.players[1:]]
for sname in ['stand', 'walk_up', 'walk_down', 'walk_left', 'walk_right']:
fname = os.path.join(asset_dir, sname + '.png')
sprite = pygame.image.load(fname)
for p in world.players:
p.sprites[sname] = recolor_player(sprite, p.id)
# load assets
sprites = {}
sprites['tiles'] = [pygame.image.load(grass_file),
pygame.image.load(stone_file)]
sprites['bomb'] = pygame.image.load(bomb_file)
sprites['explosion'] = pygame.image.load(explosion_file)
done = False
clock = pygame.time.Clock()
timeAccount = 0.0
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
process_human_input(human)
while timeAccount >= LOGIC_INTERVAL:
# update game logic
game_logic.update(world)
# execute all ai based on updated world
for a in ais:
a.update(world)
# reduce time account
timeAccount -= LOGIC_INTERVAL
if screen is not None:
# calculate the render position for each player
for player in world.players:
diff = player.pos - player.prev_pos
fac = max(0.0, timeAccount / LOGIC_INTERVAL)
# move the sprite according to the amount
player.render_pos = player.prev_pos + fac * diff
game_rendering.render(screen, world, sprites)
timeAccount += clock.tick(60)
| [
"numpy.logical_and",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.init",
"numpy.array",
"pygame.surfarray.pixels3d",
"pygame.image.load",
"pygame.time.Clock",
"pygame.key.get_pressed"
] | [((443, 467), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (465, 467), False, 'import pygame\n'), ((1127, 1156), 'numpy.array', 'np.array', (['start'], {'dtype': 'np.int'}), '(start, dtype=np.int)\n', (1135, 1156), True, 'import numpy as np\n'), ((1795, 1828), 'pygame.surfarray.pixels3d', 'pygame.surfarray.pixels3d', (['result'], {}), '(result)\n', (1820, 1828), False, 'import pygame\n'), ((1870, 1922), 'numpy.logical_and', 'np.logical_and', (['tmp_mask[:, :, 0]', 'tmp_mask[:, :, 2]'], {}), '(tmp_mask[:, :, 0], tmp_mask[:, :, 2])\n', (1884, 1922), True, 'import numpy as np\n'), ((1944, 1964), 'numpy.array', 'np.array', (['COLORS[id]'], {}), '(COLORS[id])\n', (1952, 1964), True, 'import numpy as np\n'), ((2362, 2375), 'pygame.init', 'pygame.init', ([], {}), '()\n', (2373, 2375), False, 'import pygame\n'), ((2545, 2581), 'pygame.display.set_mode', 'pygame.display.set_mode', (['screen_size'], {}), '(screen_size)\n', (2568, 2581), False, 'import pygame\n'), ((3460, 3488), 'pygame.image.load', 'pygame.image.load', (['bomb_file'], {}), '(bomb_file)\n', (3477, 3488), False, 'import pygame\n'), ((3516, 3549), 'pygame.image.load', 'pygame.image.load', (['explosion_file'], {}), '(explosion_file)\n', (3533, 3549), False, 'import pygame\n'), ((3581, 3600), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (3598, 3600), False, 'import pygame\n'), ((3175, 3199), 'pygame.image.load', 'pygame.image.load', (['fname'], {}), '(fname)\n', (3192, 3199), False, 'import pygame\n'), ((3352, 3381), 'pygame.image.load', 'pygame.image.load', (['grass_file'], {}), '(grass_file)\n', (3369, 3381), False, 'import pygame\n'), ((3407, 3436), 'pygame.image.load', 'pygame.image.load', (['stone_file'], {}), '(stone_file)\n', (3424, 3436), False, 'import pygame\n'), ((3665, 3683), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (3681, 3683), False, 'import pygame\n'), ((1433, 1462), 'numpy.array', 'np.array', (['point'], {'dtype': 'np.int'}), '(point, dtype=np.int)\n', (1441, 1462), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import copy
import posenet.constants
import math
#used to calculate the angles
def find_angle(a, b, c):
try:
ang = int(math.degrees(math.atan2(c[1]-b[1], c[0]-b[0]) - math.atan2(a[1]-b[1], a[0]-b[0])))
return ang + 360 if ang < 0 else ang
except Exception:
return 0
def valid_resolution(width, height, output_stride=16):
target_width = (int(width) // output_stride) * output_stride + 1
target_height = (int(height) // output_stride) * output_stride + 1
return target_width, target_height
def _process_input(source_img, scale_factor=1.0, output_stride=16):
target_width, target_height = valid_resolution(
source_img.shape[1] * scale_factor, source_img.shape[0] * scale_factor, output_stride=output_stride)
scale = np.array([source_img.shape[0] / target_height, source_img.shape[1] / target_width])
input_img = cv2.resize(source_img, (target_width, target_height), interpolation=cv2.INTER_LINEAR)
input_img = cv2.cvtColor(input_img, cv2.COLOR_BGR2RGB).astype(np.float32)
input_img = input_img * (2.0 / 255.0) - 1.0
input_img = input_img.reshape(1, target_height, target_width, 3)
return input_img, source_img, scale
def read_cap(cap, scale_factor=1.0, output_stride=16):
res, img = cap.read()
if not res:
raise IOError("webcam failure")
return _process_input(img, scale_factor, output_stride)
def read_imgfile(path, scale_factor=1.0, output_stride=16):
img = cv2.imread(path)
return _process_input(img, scale_factor, output_stride)
def draw_keypoints(
img, instance_scores, keypoint_scores, keypoint_coords,
min_pose_confidence=0.5, min_part_confidence=0.5):
cv_keypoints = []
for ii, score in enumerate(instance_scores):
if score < min_pose_confidence:
continue
for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):
if ks < min_part_confidence:
continue
cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks))
out_img = cv2.drawKeypoints(img, cv_keypoints, outImage=np.array([]))
return out_img
def get_adjacent_keypoints(keypoint_scores, keypoint_coords, min_confidence=0.1):
results = []
for left, right in posenet.CONNECTED_PART_INDICES:
if keypoint_scores[left] < min_confidence or keypoint_scores[right] < min_confidence:
continue
results.append(
np.array([keypoint_coords[left][::-1], keypoint_coords[right][::-1]]).astype(np.int32),
)
return results
def draw_skeleton(
img, instance_scores, keypoint_scores, keypoint_coords,
min_pose_confidence=0.5, min_part_confidence=0.5):
out_img = img
adjacent_keypoints = []
for ii, score in enumerate(instance_scores):
if score < min_pose_confidence:
continue
new_keypoints = get_adjacent_keypoints(
keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_confidence)
adjacent_keypoints.extend(new_keypoints)
out_img = cv2.polylines(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0))
return out_img
def draw_skel_and_kp(img, instance_scores, keypoint_scores, keypoint_coords, body_rotation,frame_count,new1,new_adj,min_pose_score=0.5, min_part_score=0.5):
out_img = img
adjacent_keypoints = []
cv_keypoints = []
for ii, score in enumerate(instance_scores):
if score < min_pose_score:
continue
new_keypoints = get_adjacent_keypoints(
keypoint_scores[ii, :], keypoint_coords[ii, :, :], min_part_score)
adjacent_keypoints.extend(new_keypoints)
for ks, kc in zip(keypoint_scores[ii, :], keypoint_coords[ii, :, :]):
if ks < min_part_score:
continue
cv_keypoints.append(cv2.KeyPoint(kc[1], kc[0], 10. * ks))
#converting into integer
pts = cv2.KeyPoint_convert(cv_keypoints)
print('frame-count(utils)=',frame_count)
frame_count=frame_count+1
pts_ret=cv2.KeyPoint_convert(new1)
#new1 = copy.copy(cv_keypoints) if len(new1) <= 0 else new1
if(frame_count %15 ==0 or frame_count==1):
#stabilazation of frames (pranoy)
out_img = cv2.drawKeypoints(out_img, cv_keypoints, outImage=np.array([]), color=(255, 255, 255),flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
out_img = cv2.polylines(out_img, adjacent_keypoints, False, (255, 255, 255), 4)
new_adj=copy.copy(adjacent_keypoints)
new1=copy.copy(cv_keypoints)
else:
out_img = cv2.drawKeypoints(out_img, new1, outImage=np.array([]), color=(255, 255, 255),flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
out_img = cv2.polylines(out_img, new_adj, False, (255, 255, 255), 4)
pts_new=cv2.KeyPoint_convert(new1)
#angle calculation (pranoy)
print('pts-new= ',pts_new)
try:
ang1=find_angle(pts_new[5],pts_new[7],pts_new[9])
if ang1>180:
ang1=360-ang1
cv2.putText(out_img,"{}".format(ang1), (pts[7][0],pts[7][1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2,lineType=cv2.LINE_AA)
#printing on the right side of the window.
#text1= 'Left Elbow= '+str(ang1)
#cv2.putText(out_img,text1, (400,20), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (222, 0, 0), 1,lineType=cv2.LINE_AA)
ang2=find_angle(pts_new[7],pts_new[5],pts_new[11])
cv2.putText(out_img,"{}".format(ang2), (pts[5][0],pts[5][1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255, 255), 2,lineType=cv2.LINE_AA)
#printing on the right side of the window.
#text2= 'Left Shoulder= '+str(ang2)
#cv2.putText(out_img,text2, (400,37), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (222, 0, 0), 1,lineType=cv2.LINE_AA)
ang3=find_angle(pts_new[10],pts_new[8],pts_new[6])
if ang3>180:
ang3=360-ang3
cv2.putText(out_img,"{}".format(ang3), (pts[8][0],pts[8][1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2,lineType=cv2.LINE_AA)
#printing on the right side of the window.
#text3='Right Elbow= '+str(ang3)
#cv2.putText(out_img,text3, (400,98), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 222, 0), 1,lineType=cv2.LINE_AA)
ang4=find_angle(pts_new[12],pts_new[6],pts_new[8])
cv2.putText(out_img,"{}".format(ang4), (pts[6][0],pts[6][1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2,lineType=cv2.LINE_AA)
#printing on the right side of the window.
#text4='Right shoulder='+str(ang4)
#cv2.putText(out_img,text4, (400,115), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 222, 0), 1,lineType=cv2.LINE_AA)
ang5=find_angle(pts_new[15],pts_new[13],pts_new[11])
cv2.putText(out_img,"{}".format(ang5), (pts[13][0],pts[13][1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2,lineType=cv2.LINE_AA)
#printing on the right side
#text5='Left Knee=' + str(ang5)
#cv2.putText(out_img,text5, (400,71), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (222, 0, 0), 1,lineType=cv2.LINE_AA)
ang6=find_angle(pts_new[12],pts_new[14],pts_new[16])
cv2.putText(out_img,"{}".format(ang6), (pts[14][0],pts[14][1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2,lineType=cv2.LINE_AA)
#printing on the right side
#text6='Right Knee='+str(ang6)
#cv2.putText(out_img,text6, (400,149), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 222, 0), 1,lineType=cv2.LINE_AA)
ang7=find_angle(pts_new[5],pts_new[11],pts_new[13])
cv2.putText(out_img,"{}".format(ang7), (pts[11][0],pts[11][1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2,lineType=cv2.LINE_AA)
#printing on the right side
#text7='left heap= '+ str(ang7)
#cv2.putText(out_img,text7, (400,54), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (222, 0, 0), 1,lineType=cv2.LINE_AA)
ang8=find_angle(pts_new[6],pts_new[12],pts_new[14])
cv2.putText(out_img,"{}".format(ang8), (pts[12][0],pts[12][1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2,lineType=cv2.LINE_AA)
#printing on the right side
#text8='Right Heap='+str(ang8)
#cv2.putText(out_img,text8, (400,132), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 222, 0), 1,lineType=cv2.LINE_AA)
#Drawing using sticker
except Exception:
print('points not found')
return out_img,new1,new_adj,pts_ret
| [
"cv2.polylines",
"math.atan2",
"cv2.cvtColor",
"copy.copy",
"cv2.imread",
"cv2.KeyPoint",
"numpy.array",
"cv2.KeyPoint_convert",
"cv2.resize"
] | [((815, 902), 'numpy.array', 'np.array', (['[source_img.shape[0] / target_height, source_img.shape[1] / target_width]'], {}), '([source_img.shape[0] / target_height, source_img.shape[1] /\n target_width])\n', (823, 902), True, 'import numpy as np\n'), ((916, 1006), 'cv2.resize', 'cv2.resize', (['source_img', '(target_width, target_height)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(source_img, (target_width, target_height), interpolation=cv2.\n INTER_LINEAR)\n', (926, 1006), False, 'import cv2\n'), ((1508, 1524), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (1518, 1524), False, 'import cv2\n'), ((3088, 3167), 'cv2.polylines', 'cv2.polylines', (['out_img', 'adjacent_keypoints'], {'isClosed': '(False)', 'color': '(255, 255, 0)'}), '(out_img, adjacent_keypoints, isClosed=False, color=(255, 255, 0))\n', (3101, 3167), False, 'import cv2\n'), ((3952, 3986), 'cv2.KeyPoint_convert', 'cv2.KeyPoint_convert', (['cv_keypoints'], {}), '(cv_keypoints)\n', (3972, 3986), False, 'import cv2\n'), ((4079, 4105), 'cv2.KeyPoint_convert', 'cv2.KeyPoint_convert', (['new1'], {}), '(new1)\n', (4099, 4105), False, 'import cv2\n'), ((4433, 4502), 'cv2.polylines', 'cv2.polylines', (['out_img', 'adjacent_keypoints', '(False)', '(255, 255, 255)', '(4)'], {}), '(out_img, adjacent_keypoints, False, (255, 255, 255), 4)\n', (4446, 4502), False, 'import cv2\n'), ((4520, 4549), 'copy.copy', 'copy.copy', (['adjacent_keypoints'], {}), '(adjacent_keypoints)\n', (4529, 4549), False, 'import copy\n'), ((4563, 4586), 'copy.copy', 'copy.copy', (['cv_keypoints'], {}), '(cv_keypoints)\n', (4572, 4586), False, 'import copy\n'), ((4790, 4848), 'cv2.polylines', 'cv2.polylines', (['out_img', 'new_adj', '(False)', '(255, 255, 255)', '(4)'], {}), '(out_img, new_adj, False, (255, 255, 255), 4)\n', (4803, 4848), False, 'import cv2\n'), ((4866, 4892), 'cv2.KeyPoint_convert', 'cv2.KeyPoint_convert', (['new1'], {}), '(new1)\n', (4886, 4892), False, 'import cv2\n'), ((1018, 1060), 'cv2.cvtColor', 'cv2.cvtColor', (['input_img', 'cv2.COLOR_BGR2RGB'], {}), '(input_img, cv2.COLOR_BGR2RGB)\n', (1030, 1060), False, 'import cv2\n'), ((2136, 2148), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2144, 2148), True, 'import numpy as np\n'), ((2038, 2075), 'cv2.KeyPoint', 'cv2.KeyPoint', (['kc[1]', 'kc[0]', '(10.0 * ks)'], {}), '(kc[1], kc[0], 10.0 * ks)\n', (2050, 2075), False, 'import cv2\n'), ((3874, 3911), 'cv2.KeyPoint', 'cv2.KeyPoint', (['kc[1]', 'kc[0]', '(10.0 * ks)'], {}), '(kc[1], kc[0], 10.0 * ks)\n', (3886, 3911), False, 'import cv2\n'), ((4329, 4341), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4337, 4341), True, 'import numpy as np\n'), ((4686, 4698), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4694, 4698), True, 'import numpy as np\n'), ((176, 212), 'math.atan2', 'math.atan2', (['(c[1] - b[1])', '(c[0] - b[0])'], {}), '(c[1] - b[1], c[0] - b[0])\n', (186, 212), False, 'import math\n'), ((211, 247), 'math.atan2', 'math.atan2', (['(a[1] - b[1])', '(a[0] - b[0])'], {}), '(a[1] - b[1], a[0] - b[0])\n', (221, 247), False, 'import math\n'), ((2476, 2545), 'numpy.array', 'np.array', (['[keypoint_coords[left][::-1], keypoint_coords[right][::-1]]'], {}), '([keypoint_coords[left][::-1], keypoint_coords[right][::-1]])\n', (2484, 2545), True, 'import numpy as np\n')] |
# Data filtering rules
#
# Note! Physics observable (fiducial / kinematic) cuts are defined in cuts.py, not here.
#
# <EMAIL>, 2021
import numpy as np
import numba
from icenet.tools import stx
def filter_nofilter(X, ids, isMC, xcorr_flow=False):
""" All pass """
return np.ones(X.shape[0], dtype=np.bool_) # Note datatype np.bool_
def filter_standard(X, ids, isMC, xcorr_flow=False):
""" Basic filters.
Args:
X : Number of vectors N x Number of variables D
ids : Variable name array D
isMC : MC or not
Returns:
ind : Passing indices
"""
if isMC == 'mode_e1':
cutlist = ['gen_e1_l1_dr < 0.2',
'gen_e2_l1_dr < 0.2',
'gen_e1_hlt_dr < 0.2']
elif isMC == 'mode_e2':
cutlist = ['gen_e1_l1_dr < 0.2',
'gen_e2_l1_dr < 0.2',
'gen_e1_hlt_dr < 0.2',
'gen_e2_hlt_dr < 0.2']
elif isMC == 'data':
cutlist = ['isgjson == 1']
else:
raise Exception(__name__ + '.filter_standard: Unknown isMC mode')
# Construct and apply
cuts, names = stx.construct_columnar_cuts(X=X, ids=ids, cutlist=cutlist)
ind = stx.apply_cutflow(cut=cuts, names=names, xcorr_flow=xcorr_flow)
return ind
# Add alternative filters here ...
| [
"icenet.tools.stx.apply_cutflow",
"icenet.tools.stx.construct_columnar_cuts",
"numpy.ones"
] | [((282, 317), 'numpy.ones', 'np.ones', (['X.shape[0]'], {'dtype': 'np.bool_'}), '(X.shape[0], dtype=np.bool_)\n', (289, 317), True, 'import numpy as np\n'), ((1148, 1206), 'icenet.tools.stx.construct_columnar_cuts', 'stx.construct_columnar_cuts', ([], {'X': 'X', 'ids': 'ids', 'cutlist': 'cutlist'}), '(X=X, ids=ids, cutlist=cutlist)\n', (1175, 1206), False, 'from icenet.tools import stx\n'), ((1225, 1288), 'icenet.tools.stx.apply_cutflow', 'stx.apply_cutflow', ([], {'cut': 'cuts', 'names': 'names', 'xcorr_flow': 'xcorr_flow'}), '(cut=cuts, names=names, xcorr_flow=xcorr_flow)\n', (1242, 1288), False, 'from icenet.tools import stx\n')] |
#!/usr/bin/env python3
import numpy as np
def compute_Happ(steps , s , Ms):
# Describes the applied field
# following the logic used by OOMMF
# N : é o numero de passos de simulação
# | Inicial | Final |Duracao|
# |x_i y z | x_f y z | steps |
# |1 2 3 | 4 5 6 | 7 |
# s= [ 0 0 0 1 0 0 221
# 1 0 0 1 1 0 221
# 1 1 0 0 1 0 221
# 0 1 0 0 0 0 221
# 0 0 0 0 0 0 221]
if sum(s[ : , 6]) != steps:
print("Total pulse duration != Total steps")
h_app = np.zeros((steps,3))
count = 0
t2am = 1/(4*4*np.arctan(1)*10**-7)
t2am = 1
print(t2am)
for line in range(len(s)): #iterate over lines of s
for dimension in range(3): # iterate over de dimension x, y, z
slope = (s[line,dimension+3]-s[line, dimension])/(s[line,6]-1)
for n in range(int(s[line,6])): # iterate over duration steps of the ramp
h_app[count+n,dimension] = (s[line,dimension]+n*slope)*t2am
count = count + n+1
return h_app
| [
"numpy.arctan",
"numpy.zeros"
] | [((632, 652), 'numpy.zeros', 'np.zeros', (['(steps, 3)'], {}), '((steps, 3))\n', (640, 652), True, 'import numpy as np\n'), ((688, 700), 'numpy.arctan', 'np.arctan', (['(1)'], {}), '(1)\n', (697, 700), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 20 14:34:47 2019
@author: matusmacbookpro
"""
import numpy as np
from PIL import Image
from PIL import ImageDraw
from math import acos
from math import sqrt
from math import pi
import colorsys
import copy
#adapted from: https://github.com/NVlabs/Deep_Object_Pose/blob/master/scripts/train.py
def CreateBeliefMap(img_size,points_belief,nbpoints = 9,sigma=2,scale = 8):
"""
Args:
img_size: list of image sizes
pointsBelief: list of points in the form of
[nb object, nb points, 2 (x,y)], nested array, nb points is allways 8, nb object - number of objects in screen regardless of category
nbpoints: (int) number of points, DOPE uses 8 points here
sigma: (int) size of the belief map point
return:
return an array of PIL black and white images representing the
belief maps
"""
pointsBelief = copy.deepcopy(points_belief)
# scale points belief for appropriate output size
for i in range(len(pointsBelief[0])):
pointsBelief[0][i] = (pointsBelief[0][i][0]/scale,pointsBelief[0][i][1]/scale)
img_size[0] = int(img_size[0]/scale)
img_size[1] = int(img_size[1]/scale)
beliefsImg = []
sigma = int(sigma)
for numb_point in range(nbpoints):
array = np.zeros(img_size)
out = np.zeros(img_size)
for point in pointsBelief:
p = point[numb_point]
w = int(sigma*2)
if p[0]-w>=0 and p[0]+w<img_size[0] and p[1]-w>=0 and p[1]+w<img_size[1]:
for i in range(int(p[0])-w, int(p[0])+w):
for j in range(int(p[1])-w, int(p[1])+w):
array[i,j] = np.exp(-(((i - p[0])**2 + (j - p[1])**2)/(2*(sigma**2))))
stack = np.stack([array,array,array],axis=0).transpose(2,1,0)
# imgBelief = Image.new(img_mode, img_size, "black")
beliefsImg.append(Image.fromarray((stack*255).astype('uint8')))
bi_concat = []
for bi in beliefsImg:
bi_concat.append(np.array(bi)[:,:,0:1])
belief_concat = np.concatenate(tuple(bi_concat),axis = 2)
belief_concat = np.rollaxis(belief_concat,2,0)
return belief_concat
def normalize(v):
norm=np.linalg.norm(v, ord=1)
if norm==0:
norm=np.finfo(v.dtype).eps
return v/norm
def length(v):
return sqrt(v[0]**2+v[1]**2)
def determinant(v,w):
return v[0]*w[1]-v[1]*w[0]
def dot_product(v,w):
return v[0]*w[0]+v[1]*w[1]
def inner_angle(v,w):
cosx=dot_product(v,w)/(length(v)*length(w))
rad=acos(cosx) # in radians
return rad*180/pi # returns degrees
def py_ang(A, B=(1,0)):
inner=inner_angle(A,B)
det = determinant(A,B)
if det<0: #this is a property of the det. If the det < 0 then B is clockwise of A
return inner
else: # if the det > 0 then A is immediately clockwise of B
return 360-inner
def getAfinityCenter(width, height, point, center, radius=7, img_affinity=None):
"""
Function to create the affinity maps,
e.g., vector maps pointing toward the object center.
Args:
width: image wight
height: image height
point: (x,y)
center: (x,y)
radius: pixel radius
img_affinity: tensor to add to
return:
return a tensor
"""
tensor = np.zeros((2,height,width))
# Create the canvas for the afinity output
imgAffinity = Image.new("RGB", (width,height), "black")
# totensor = transforms.Compose([transforms.ToTensor()])
draw = ImageDraw.Draw(imgAffinity)
r1 = radius
p = point
draw.ellipse((p[0]-r1,p[1]-r1,p[0]+r1,p[1]+r1),(255,255,255))
del draw
# Compute the array to add the afinity
array = (np.array(imgAffinity)/255)[:,:,0]
angle_vector = np.array(center) - np.array(point)
angle_vector = normalize(angle_vector)
affinity = np.concatenate([[array*angle_vector[0]],[array*angle_vector[1]]])
# print (tensor)
if not img_affinity is None:
# Find the angle vector
# print (angle_vector)
if length(angle_vector) >0:
angle=py_ang(angle_vector)
else:
angle = 0
# print(angle)
c = np.array(colorsys.hsv_to_rgb(angle/360,1,1)) * 255
draw = ImageDraw.Draw(img_affinity)
draw.ellipse((p[0]-r1,p[1]-r1,p[0]+r1,p[1]+r1),fill=(int(c[0]),int(c[1]),int(c[2])))
del draw
# re = torch.from_numpy(affinity).float() + tensor
re = affinity + tensor
return re, img_affinity
def GenerateMapAffinity(img_size,img_mode,nb_vertex,points_belief,centroids2d,scale = 8,radius = 1):
"""
Function to create the affinity maps,
e.g., vector maps pointing toward the object center.
Args:
img_size: PIL image
nb_vertex: (int) number of points
pointsInterest: list of points
objects_centroid: (x,y) centroids for the obects
scale: (float) by how much you need to scale down the image
return:
return a list of tensors for each point except centroid point
"""
pointsInterest = copy.deepcopy(points_belief)
objects_centroid = copy.deepcopy(centroids2d)
#scale points_belief
for i in range(len(pointsInterest[0])):
pointsInterest[0][i] = (pointsInterest[0][i][0]/scale,pointsInterest[0][i][1]/scale)
#scale centroids
objects_centroid = objects_centroid/scale
objects_centroid = [tuple(objects_centroid[0,:])]
# Apply the downscale right now, so the vectors are correct.
img_affinity = Image.new(img_mode, (int(img_size[0]/scale),int(img_size[1]/scale)), "black")
# Create the empty tensors
# totensor = transforms.Compose([transforms.ToTensor()])
affinities = []
for i_points in range(nb_vertex):
# affinities.append(torch.zeros(2,int(img.size[1]/scale),int(img.size[0]/scale)))
affinities.append(np.zeros((2,int(img_size[1]/scale),int(img_size[0]/scale))))
for i_pointsImage in range(len(pointsInterest)):
pointsImage = pointsInterest[i_pointsImage]
center = objects_centroid[i_pointsImage]
for i_points in range(nb_vertex):
point = pointsImage[i_points]
affinity_pair, img_affinity = getAfinityCenter(int(img_size[0]/scale),
int(img_size[1]/scale),
tuple((np.array(pointsImage[i_points])/1).tolist()),
tuple((np.array(center)/1).tolist()),
img_affinity = img_affinity, radius=radius)
affinities[i_points] = (affinities[i_points] + affinity_pair)/2
# Normalizing
v = affinities[i_points]
xvec = v[0]
yvec = v[1]
norms = np.sqrt(xvec * xvec + yvec * yvec)
nonzero = norms > 0
xvec[nonzero]/=norms[nonzero]
yvec[nonzero]/=norms[nonzero]
affinities[i_points] = np.concatenate([[xvec],[yvec]])
affinities = np.concatenate(affinities,0)
return affinities | [
"numpy.stack",
"copy.deepcopy",
"PIL.Image.new",
"math.sqrt",
"colorsys.hsv_to_rgb",
"numpy.zeros",
"math.acos",
"numpy.finfo",
"numpy.linalg.norm",
"numpy.array",
"numpy.exp",
"numpy.rollaxis",
"PIL.ImageDraw.Draw",
"numpy.concatenate",
"numpy.sqrt"
] | [((964, 992), 'copy.deepcopy', 'copy.deepcopy', (['points_belief'], {}), '(points_belief)\n', (977, 992), False, 'import copy\n'), ((2256, 2288), 'numpy.rollaxis', 'np.rollaxis', (['belief_concat', '(2)', '(0)'], {}), '(belief_concat, 2, 0)\n', (2267, 2288), True, 'import numpy as np\n'), ((2350, 2374), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {'ord': '(1)'}), '(v, ord=1)\n', (2364, 2374), True, 'import numpy as np\n'), ((2471, 2498), 'math.sqrt', 'sqrt', (['(v[0] ** 2 + v[1] ** 2)'], {}), '(v[0] ** 2 + v[1] ** 2)\n', (2475, 2498), False, 'from math import sqrt\n'), ((2676, 2686), 'math.acos', 'acos', (['cosx'], {}), '(cosx)\n', (2680, 2686), False, 'from math import acos\n'), ((3443, 3471), 'numpy.zeros', 'np.zeros', (['(2, height, width)'], {}), '((2, height, width))\n', (3451, 3471), True, 'import numpy as np\n'), ((3536, 3578), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(width, height)', '"""black"""'], {}), "('RGB', (width, height), 'black')\n", (3545, 3578), False, 'from PIL import Image\n'), ((3654, 3681), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['imgAffinity'], {}), '(imgAffinity)\n', (3668, 3681), False, 'from PIL import ImageDraw\n'), ((4000, 4070), 'numpy.concatenate', 'np.concatenate', (['[[array * angle_vector[0]], [array * angle_vector[1]]]'], {}), '([[array * angle_vector[0]], [array * angle_vector[1]]])\n', (4014, 4070), True, 'import numpy as np\n'), ((5243, 5271), 'copy.deepcopy', 'copy.deepcopy', (['points_belief'], {}), '(points_belief)\n', (5256, 5271), False, 'import copy\n'), ((5295, 5321), 'copy.deepcopy', 'copy.deepcopy', (['centroids2d'], {}), '(centroids2d)\n', (5308, 5321), False, 'import copy\n'), ((7167, 7196), 'numpy.concatenate', 'np.concatenate', (['affinities', '(0)'], {}), '(affinities, 0)\n', (7181, 7196), True, 'import numpy as np\n'), ((1393, 1411), 'numpy.zeros', 'np.zeros', (['img_size'], {}), '(img_size)\n', (1401, 1411), True, 'import numpy as np\n'), ((1426, 1444), 'numpy.zeros', 'np.zeros', (['img_size'], {}), '(img_size)\n', (1434, 1444), True, 'import numpy as np\n'), ((3907, 3923), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (3915, 3923), True, 'import numpy as np\n'), ((3926, 3941), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (3934, 3941), True, 'import numpy as np\n'), ((4396, 4424), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img_affinity'], {}), '(img_affinity)\n', (4410, 4424), False, 'from PIL import ImageDraw\n'), ((2404, 2421), 'numpy.finfo', 'np.finfo', (['v.dtype'], {}), '(v.dtype)\n', (2412, 2421), True, 'import numpy as np\n'), ((3853, 3874), 'numpy.array', 'np.array', (['imgAffinity'], {}), '(imgAffinity)\n', (3861, 3874), True, 'import numpy as np\n'), ((6917, 6951), 'numpy.sqrt', 'np.sqrt', (['(xvec * xvec + yvec * yvec)'], {}), '(xvec * xvec + yvec * yvec)\n', (6924, 6951), True, 'import numpy as np\n'), ((7117, 7149), 'numpy.concatenate', 'np.concatenate', (['[[xvec], [yvec]]'], {}), '([[xvec], [yvec]])\n', (7131, 7149), True, 'import numpy as np\n'), ((1862, 1901), 'numpy.stack', 'np.stack', (['[array, array, array]'], {'axis': '(0)'}), '([array, array, array], axis=0)\n', (1870, 1901), True, 'import numpy as np\n'), ((2137, 2149), 'numpy.array', 'np.array', (['bi'], {}), '(bi)\n', (2145, 2149), True, 'import numpy as np\n'), ((4339, 4377), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['(angle / 360)', '(1)', '(1)'], {}), '(angle / 360, 1, 1)\n', (4358, 4377), False, 'import colorsys\n'), ((1787, 1852), 'numpy.exp', 'np.exp', (['(-(((i - p[0]) ** 2 + (j - p[1]) ** 2) / (2 * sigma ** 2)))'], {}), '(-(((i - p[0]) ** 2 + (j - p[1]) ** 2) / (2 * sigma ** 2)))\n', (1793, 1852), True, 'import numpy as np\n'), ((6512, 6543), 'numpy.array', 'np.array', (['pointsImage[i_points]'], {}), '(pointsImage[i_points])\n', (6520, 6543), True, 'import numpy as np\n'), ((6581, 6597), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (6589, 6597), True, 'import numpy as np\n')] |
'''
Author: <NAME> <<EMAIL>>
If you use this code, please cite the following paper:
<NAME>, and <NAME>. Unsupervised Depth Completion with Calibrated Backprojection Layers.
https://arxiv.org/pdf/2108.10531.pdf
@inproceedings{wong2021unsupervised,
title={Unsupervised Depth Completion with Calibrated Backprojection Layers},
author={<NAME> and <NAME>},
booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision},
pages={12747--12756},
year={2021}
}
'''
import warnings
warnings.filterwarnings("ignore")
import os, sys, glob, cv2, argparse
import multiprocessing as mp
import numpy as np
sys.path.insert(0, 'src')
import data_utils
from sklearn.cluster import MiniBatchKMeans
N_CLUSTER = 1500
O_HEIGHT = 480
O_WIDTH = 640
N_HEIGHT = 416
N_WIDTH = 576
MIN_POINTS = 1100
TEMPORAL_WINDOW = 21
RANDOM_SEED = 1
parser = argparse.ArgumentParser()
parser.add_argument('--sparse_depth_distro_type', type=str, default='corner')
parser.add_argument('--n_points', type=int, default=N_CLUSTER)
parser.add_argument('--min_points', type=int, default=MIN_POINTS)
parser.add_argument('--temporal_window', type=int, default=TEMPORAL_WINDOW)
parser.add_argument('--n_height', type=int, default=N_HEIGHT)
parser.add_argument('--n_width', type=int, default=N_WIDTH)
args = parser.parse_args()
NYU_ROOT_DIRPATH = \
os.path.join('data', 'nyu_v2')
NYU_OUTPUT_DIRPATH = \
os.path.join('data', 'nyu_v2_kbnet')
NYU_TEST_IMAGE_SPLIT_FILEPATH = \
os.path.join('setup', 'nyu_v2_test_image.txt')
NYU_TEST_DEPTH_SPLIT_FILEPATH = \
os.path.join('setup', 'nyu_v2_test_depth.txt')
TRAIN_REF_DIRPATH = os.path.join('training', 'nyu_v2')
VAL_REF_DIRPATH = os.path.join('validation', 'nyu_v2')
TEST_REF_DIRPATH = os.path.join('testing', 'nyu_v2')
TRAIN_IMAGE_OUTPUT_FILEPATH = \
os.path.join(TRAIN_REF_DIRPATH, 'nyu_v2_train_image_{}.txt'.format(args.sparse_depth_distro_type))
TRAIN_SPARSE_DEPTH_OUTPUT_FILEPATH = \
os.path.join(TRAIN_REF_DIRPATH, 'nyu_v2_train_sparse_depth_{}.txt'.format(args.sparse_depth_distro_type))
TRAIN_VALIDITY_MAP_OUTPUT_FILEPATH = \
os.path.join(TRAIN_REF_DIRPATH, 'nyu_v2_train_validity_map_{}.txt'.format(args.sparse_depth_distro_type))
TRAIN_GROUND_TRUTH_OUTPUT_FILEPATH = \
os.path.join(TRAIN_REF_DIRPATH, 'nyu_v2_train_ground_truth_{}.txt'.format(args.sparse_depth_distro_type))
TRAIN_INTRINSICS_OUTPUT_FILEPATH = \
os.path.join(TRAIN_REF_DIRPATH, 'nyu_v2_train_intrinsics_{}.txt'.format(args.sparse_depth_distro_type))
VAL_IMAGE_OUTPUT_FILEPATH = \
os.path.join(VAL_REF_DIRPATH, 'nyu_v2_val_image_{}.txt'.format(args.sparse_depth_distro_type))
VAL_SPARSE_DEPTH_OUTPUT_FILEPATH = \
os.path.join(VAL_REF_DIRPATH, 'nyu_v2_val_sparse_depth_{}.txt'.format(args.sparse_depth_distro_type))
VAL_VALIDITY_MAP_OUTPUT_FILEPATH = \
os.path.join(VAL_REF_DIRPATH, 'nyu_v2_val_validity_map_{}.txt'.format(args.sparse_depth_distro_type))
VAL_GROUND_TRUTH_OUTPUT_FILEPATH = \
os.path.join(VAL_REF_DIRPATH, 'nyu_v2_val_ground_truth_{}.txt'.format(args.sparse_depth_distro_type))
VAL_INTRINSICS_OUTPUT_FILEPATH = \
os.path.join(VAL_REF_DIRPATH, 'nyu_v2_val_intrinsics_{}.txt'.format(args.sparse_depth_distro_type))
TEST_IMAGE_OUTPUT_FILEPATH = \
os.path.join(TEST_REF_DIRPATH, 'nyu_v2_test_image_{}.txt'.format(args.sparse_depth_distro_type))
TEST_SPARSE_DEPTH_OUTPUT_FILEPATH = \
os.path.join(TEST_REF_DIRPATH, 'nyu_v2_test_sparse_depth_{}.txt'.format(args.sparse_depth_distro_type))
TEST_VALIDITY_MAP_OUTPUT_FILEPATH = \
os.path.join(TEST_REF_DIRPATH, 'nyu_v2_test_validity_map_{}.txt'.format(args.sparse_depth_distro_type))
TEST_GROUND_TRUTH_OUTPUT_FILEPATH = \
os.path.join(TEST_REF_DIRPATH, 'nyu_v2_test_ground_truth_{}.txt'.format(args.sparse_depth_distro_type))
TEST_INTRINSICS_OUTPUT_FILEPATH = \
os.path.join(TEST_REF_DIRPATH, 'nyu_v2_test_intrinsics_{}.txt'.format(args.sparse_depth_distro_type))
def process_frame(inputs):
'''
Processes a single frame
Arg(s):
inputs : tuple
image path at time t=0,
image path at time t=1,
image path at time t=-1,
sparse depth path at time t=0,
validity map path at time t=0,
ground truth path at time t=0
Returns:
str : output concatenated image path at time t=0
str : output sparse depth path at time t=0
str : output validity map path at time t=0
str : output ground truth path at time t=0
'''
image0_path, image1_path, image2_path, ground_truth_path = inputs
# Load image (for corner detection) to generate valid map
image0 = cv2.imread(image0_path)
image0 = np.float32(cv2.cvtColor(image0, cv2.COLOR_BGR2GRAY))
# Load dense depth
ground_truth = data_utils.load_depth(ground_truth_path)
assert image0.shape[0] == ground_truth.shape[0] and image0.shape[1] == ground_truth.shape[1]
assert image0.shape[0] == O_HEIGHT and image0.shape[1] == O_WIDTH
# Crop away white borders
if args.n_height != O_HEIGHT or args.n_width != O_WIDTH:
d_height = O_HEIGHT - args.n_height
d_width = O_WIDTH - args.n_width
y_start = d_height // 2
x_start = d_width // 2
y_end = y_start + args.n_height
x_end = x_start + args.n_width
image0 = image0[y_start:y_end, x_start:x_end]
ground_truth = ground_truth[y_start:y_end, x_start:x_end]
if args.sparse_depth_distro_type == 'corner':
N_INIT_CORNER = 30000
# Run Harris corner detector
corners = cv2.cornerHarris(image0, blockSize=5, ksize=3, k=0.04)
# Remove the corners that are located on invalid depth locations
corners = corners * np.where(ground_truth > 0.0, 1.0, 0.0)
# Vectorize corner map to 1D vector and select N_INIT_CORNER corner locations
corners = corners.ravel()
corner_locations = np.argsort(corners)[0:N_INIT_CORNER]
# Get locations of corners as indices as (x, y)
corner_locations = np.unravel_index(
corner_locations,
(image0.shape[0], image0.shape[1]))
# Convert to (y, x) convention
corner_locations = \
np.transpose(np.array([corner_locations[0], corner_locations[1]]))
# Cluster them into n_points (number of output points)
kmeans = MiniBatchKMeans(
n_clusters=args.n_points,
max_iter=2,
n_init=1,
init_size=None,
random_state=RANDOM_SEED,
reassignment_ratio=1e-11)
kmeans.fit(corner_locations)
# Use k-Means means as corners
selected_indices = kmeans.cluster_centers_.astype(np.uint16)
elif args.sparse_depth_distro_type == 'uniform':
indices = \
np.array([[h, w] for h in range(args.n_height) for w in range(args.n_width)])
# Randomly select n_points number of points
selected_indices = \
np.random.permutation(range(args.n_height * args.n_width))[0:args.n_points]
selected_indices = indices[selected_indices]
else:
raise ValueError('Unsupported sparse depth distribution type: {}'.format(
args.sparse_depth_distro_type))
# Convert the indicies into validity map
validity_map = np.zeros_like(image0).astype(np.int16)
validity_map[selected_indices[:, 0], selected_indices[:, 1]] = 1.0
# Build validity map from selected points, keep only ones greater than 0
validity_map = np.where(validity_map * ground_truth > 0.0, 1.0, 0.0)
# Get sparse depth based on validity map
sparse_depth = validity_map * ground_truth
# Shape check
error_flag = False
if np.squeeze(sparse_depth).shape != (args.n_height, args.n_width):
error_flag = True
print('FAILED: np.squeeze(sparse_depth).shape != ({}, {})'.format(args.n_height, args.n_width))
# Validity map check
if not np.array_equal(np.unique(validity_map), np.array([0, 1])):
error_flag = True
print('FAILED: not np.array_equal(np.unique(validity_map), np.array([0, 1]))')
if validity_map.sum() < args.min_points:
error_flag = True
print('FAILED: validity_map.sum() < MIN_POINTS')
# Depth value check
if np.min(ground_truth) < 0.0 or np.max(ground_truth) > 256.0:
error_flag = True
print('FAILED: np.min(ground_truth) < 0.0 or np.max(ground_truth) > 256.0')
if np.sum(np.where(validity_map > 0.0, 1.0, 0.0)) < args.min_points:
error_flag = True
print('FAILED: np.sum(np.where(validity_map > 0.0, 1.0, 0.0)) < MIN_POINTS', np.sum(np.where(validity_map > 0.0, 1.0, 0.0)))
if np.sum(np.where(ground_truth > 0.0, 1.0, 0.0)) < args.min_points:
error_flag = True
print('FAILED: np.sum(np.where(ground_truth > 0.0, 1.0, 0.0)) < MIN_POINTS')
# NaN check
if np.any(np.isnan(sparse_depth)):
error_flag = True
print('FAILED: np.any(np.isnan(sparse_depth))')
if not error_flag:
# Read images and concatenate together
image0 = cv2.imread(image0_path)
image1 = cv2.imread(image1_path)
image2 = cv2.imread(image2_path)
if args.n_height != O_HEIGHT or args.n_width != O_WIDTH:
image0 = image0[y_start:y_end, x_start:x_end, :]
image1 = image1[y_start:y_end, x_start:x_end, :]
image2 = image2[y_start:y_end, x_start:x_end, :]
imagec = np.concatenate([image1, image0, image2], axis=1)
# Example: nyu/training/depths/raw_data/bedroom_0001/r-1294886360.208451-2996770081.png
image_output_path = image0_path \
.replace(NYU_ROOT_DIRPATH, NYU_OUTPUT_DIRPATH)
sparse_depth_output_path = ground_truth_path \
.replace(NYU_ROOT_DIRPATH, NYU_OUTPUT_DIRPATH) \
.replace('depth', 'sparse_depth')
validity_map_output_path = ground_truth_path \
.replace(NYU_ROOT_DIRPATH, NYU_OUTPUT_DIRPATH) \
.replace('depth', 'validity_map')
ground_truth_output_path = ground_truth_path \
.replace(NYU_ROOT_DIRPATH, NYU_OUTPUT_DIRPATH) \
.replace('depth', 'ground_truth')
image_output_dirpath = os.path.dirname(image_output_path)
sparse_depth_output_dirpath = os.path.dirname(sparse_depth_output_path)
validity_map_output_dirpath = os.path.dirname(validity_map_output_path)
ground_truth_output_dirpath = os.path.dirname(ground_truth_output_path)
# Create output directories
output_dirpaths = [
image_output_dirpath,
sparse_depth_output_dirpath,
validity_map_output_dirpath,
ground_truth_output_dirpath,
]
for dirpath in output_dirpaths:
if not os.path.exists(dirpath):
os.makedirs(dirpath, exist_ok=True)
# Write to file
cv2.imwrite(image_output_path, imagec)
data_utils.save_depth(sparse_depth, sparse_depth_output_path)
data_utils.save_validity_map(validity_map, validity_map_output_path)
data_utils.save_depth(ground_truth, ground_truth_output_path)
else:
print('Found error in {}'.format(ground_truth_path))
image_output_path = 'error'
sparse_depth_output_path = 'error'
validity_map_output_path = 'error'
ground_truth_output_path = 'error'
return (image_output_path,
sparse_depth_output_path,
validity_map_output_path,
ground_truth_output_path)
def filter_sequence(seq):
keep_sequence = \
'_0000/' in seq or \
'_0001/' in seq or \
'_0002/' in seq or \
'_0003/' in seq or \
'_0004/' in seq
return keep_sequence
def filter_paths(paths):
paths_ = []
for path in paths:
if filter_sequence(path):
paths_.append(path)
return paths_
# Create output directories first
dirpaths = [
NYU_OUTPUT_DIRPATH,
TRAIN_REF_DIRPATH,
VAL_REF_DIRPATH,
TEST_REF_DIRPATH
]
for dirpath in dirpaths:
if not os.path.exists(dirpath):
os.makedirs(dirpath, exist_ok=True)
'''
Setup intrinsics (values are copied from camera_params.m)
'''
fx_rgb = 518.85790117450188
fy_rgb = 519.46961112127485
cx_rgb = 325.58244941119034
cy_rgb = 253.73616633400465
intrinsic_matrix = np.array([
[fx_rgb, 0., cx_rgb],
[0., fy_rgb, cy_rgb],
[0., 0., 1. ]], dtype=np.float32)
if args.n_height != O_HEIGHT or args.n_width != O_WIDTH:
d_height = O_HEIGHT - args.n_height
d_width = O_WIDTH - args.n_width
y_start = d_height // 2
x_start = d_width // 2
intrinsic_matrix = intrinsic_matrix + [[0.0, 0.0, -x_start],
[0.0, 0.0, -y_start],
[0.0, 0.0, 0.0 ]]
intrinsics_output_path = os.path.join(NYU_OUTPUT_DIRPATH, 'intrinsics.npy')
np.save(intrinsics_output_path, intrinsic_matrix)
'''
Process training paths
'''
train_image_output_paths = []
train_sparse_depth_output_paths = []
train_validity_map_output_paths = []
train_ground_truth_output_paths = []
train_intrinsics_output_paths = [intrinsics_output_path]
train_image_sequences = sorted(glob.glob(
os.path.join(NYU_ROOT_DIRPATH, 'training', 'images', 'raw_data', '*/')))
train_depth_sequences = sorted(glob.glob(
os.path.join(NYU_ROOT_DIRPATH, 'training', 'depths', 'raw_data', '*/')))
# Use only a subset for training
train_image_sequences = filter_paths(train_image_sequences)
train_depth_sequences = filter_paths(train_depth_sequences)
w = int(args.temporal_window // 2)
for image_sequence, depth_sequence in zip(train_image_sequences, train_depth_sequences):
# Fetch image and dense depth from sequence directory
image_paths = \
sorted(glob.glob(os.path.join(image_sequence, '*.png')))
ground_truth_paths = \
sorted(glob.glob(os.path.join(depth_sequence, '*.png')))
n_sample = len(image_paths)
for image_path, ground_truth_path in zip(image_paths, ground_truth_paths):
assert os.path.join(*(image_path.split(os.sep)[-3:])) == os.path.join(*(image_path.split(os.sep)[-3:]))
pool_input = [
(image_paths[idx], image_paths[idx-w], image_paths[idx+w], ground_truth_paths[idx])
for idx in range(w, n_sample - w)
]
print('Processing {} samples in: {}'.format(n_sample - 2 * w + 1, image_sequence))
with mp.Pool() as pool:
pool_results = pool.map(process_frame, pool_input)
for result in pool_results:
image_output_path, \
sparse_depth_output_path, \
validity_map_output_path, \
ground_truth_output_path = result
error_encountered = \
image_output_path == 'error' or \
sparse_depth_output_path == 'error' or \
validity_map_output_path == 'error' or \
ground_truth_output_path == 'error'
if error_encountered:
continue
# Collect filepaths
train_image_output_paths.append(image_output_path)
train_sparse_depth_output_paths.append(sparse_depth_output_path)
train_validity_map_output_paths.append(validity_map_output_path)
train_ground_truth_output_paths.append(ground_truth_output_path)
train_intrinsics_output_paths = train_intrinsics_output_paths * len(train_image_output_paths)
print('Storing {} training image file paths into: {}'.format(
len(train_image_output_paths), TRAIN_IMAGE_OUTPUT_FILEPATH))
data_utils.write_paths(TRAIN_IMAGE_OUTPUT_FILEPATH, train_image_output_paths)
print('Storing {} training sparse depth file paths into: {}'.format(
len(train_sparse_depth_output_paths), TRAIN_SPARSE_DEPTH_OUTPUT_FILEPATH))
data_utils.write_paths(TRAIN_SPARSE_DEPTH_OUTPUT_FILEPATH, train_sparse_depth_output_paths)
print('Storing {} training validity_map file paths into: {}'.format(
len(train_validity_map_output_paths), TRAIN_VALIDITY_MAP_OUTPUT_FILEPATH))
data_utils.write_paths(TRAIN_VALIDITY_MAP_OUTPUT_FILEPATH, train_validity_map_output_paths)
print('Storing {} training ground truth file paths into: {}'.format(
len(train_ground_truth_output_paths), TRAIN_GROUND_TRUTH_OUTPUT_FILEPATH))
data_utils.write_paths(TRAIN_GROUND_TRUTH_OUTPUT_FILEPATH, train_ground_truth_output_paths)
print('Storing {} training intrinsics file paths into: {}'.format(
len(train_intrinsics_output_paths), TRAIN_INTRINSICS_OUTPUT_FILEPATH))
data_utils.write_paths(TRAIN_INTRINSICS_OUTPUT_FILEPATH, train_intrinsics_output_paths)
'''
Process validation and testing paths
'''
test_image_split_paths = data_utils.read_paths(NYU_TEST_IMAGE_SPLIT_FILEPATH)
val_image_output_paths = []
val_sparse_depth_output_paths = []
val_validity_map_output_paths = []
val_ground_truth_output_paths = []
val_intrinsics_output_paths = [intrinsics_output_path]
test_image_output_paths = []
test_sparse_depth_output_paths = []
test_validity_map_output_paths = []
test_ground_truth_output_paths = []
test_intrinsics_output_paths = [intrinsics_output_path]
test_image_paths = sorted(glob.glob(
os.path.join(NYU_ROOT_DIRPATH, 'testing', 'images', '*.png')))
test_ground_truth_paths = sorted(glob.glob(
os.path.join(NYU_ROOT_DIRPATH, 'testing', 'depths', '*.png')))
n_sample = len(test_image_paths)
for image_path, ground_truth_path in zip(test_image_paths, test_ground_truth_paths):
assert os.path.join(*(image_path.split(os.sep)[-3:])) == os.path.join(*(image_path.split(os.sep)[-3:]))
pool_input = [
(test_image_paths[idx], test_image_paths[idx], test_image_paths[idx], test_ground_truth_paths[idx])
for idx in range(n_sample)
]
print('Processing {} samples for validation and testing'.format(n_sample))
with mp.Pool() as pool:
pool_results = pool.map(process_frame, pool_input)
for result in pool_results:
image_output_path, \
sparse_depth_output_path, \
validity_map_output_path, \
ground_truth_output_path = result
error_encountered = \
image_output_path == 'error' or \
sparse_depth_output_path == 'error' or \
validity_map_output_path == 'error' or \
ground_truth_output_path == 'error'
if error_encountered:
continue
test_split = False
for test_image_path in test_image_split_paths:
if test_image_path in image_output_path:
test_split = True
if test_split:
# Collect test filepaths
test_image_output_paths.append(image_output_path)
test_sparse_depth_output_paths.append(sparse_depth_output_path)
test_validity_map_output_paths.append(validity_map_output_path)
test_ground_truth_output_paths.append(ground_truth_output_path)
else:
# Collect validation filepaths
val_image_output_paths.append(image_output_path)
val_sparse_depth_output_paths.append(sparse_depth_output_path)
val_validity_map_output_paths.append(validity_map_output_path)
val_ground_truth_output_paths.append(ground_truth_output_path)
val_intrinsics_output_paths = val_intrinsics_output_paths * len(val_image_output_paths)
test_intrinsics_output_paths = test_intrinsics_output_paths * len(test_image_output_paths)
'''
Write validation output paths
'''
print('Storing {} validation image file paths into: {}'.format(
len(val_image_output_paths), VAL_IMAGE_OUTPUT_FILEPATH))
data_utils.write_paths(VAL_IMAGE_OUTPUT_FILEPATH, val_image_output_paths)
print('Storing {} validation sparse depth file paths into: {}'.format(
len(val_sparse_depth_output_paths), VAL_SPARSE_DEPTH_OUTPUT_FILEPATH))
data_utils.write_paths(VAL_SPARSE_DEPTH_OUTPUT_FILEPATH, val_sparse_depth_output_paths)
print('Storing {} validation validity_map file paths into: {}'.format(
len(val_validity_map_output_paths), VAL_VALIDITY_MAP_OUTPUT_FILEPATH))
data_utils.write_paths(VAL_VALIDITY_MAP_OUTPUT_FILEPATH, val_validity_map_output_paths)
print('Storing {} validation dense depth file paths into: {}'.format(
len(val_ground_truth_output_paths), VAL_GROUND_TRUTH_OUTPUT_FILEPATH))
data_utils.write_paths(VAL_GROUND_TRUTH_OUTPUT_FILEPATH, val_ground_truth_output_paths)
print('Storing {} validation intrinsics file paths into: {}'.format(
len(val_intrinsics_output_paths), VAL_INTRINSICS_OUTPUT_FILEPATH))
data_utils.write_paths(VAL_INTRINSICS_OUTPUT_FILEPATH, val_intrinsics_output_paths)
'''
Write testing output paths
'''
print('Storing {} testing image file paths into: {}'.format(
len(test_image_output_paths), TEST_IMAGE_OUTPUT_FILEPATH))
data_utils.write_paths(TEST_IMAGE_OUTPUT_FILEPATH, test_image_output_paths)
print('Storing {} testing sparse depth file paths into: {}'.format(
len(test_sparse_depth_output_paths), TEST_SPARSE_DEPTH_OUTPUT_FILEPATH))
data_utils.write_paths(TEST_SPARSE_DEPTH_OUTPUT_FILEPATH, test_sparse_depth_output_paths)
print('Storing {} testing validity_map file paths into: {}'.format(
len(test_validity_map_output_paths), TEST_VALIDITY_MAP_OUTPUT_FILEPATH))
data_utils.write_paths(TEST_VALIDITY_MAP_OUTPUT_FILEPATH, test_validity_map_output_paths)
print('Storing {} testing dense depth file paths into: {}'.format(
len(test_ground_truth_output_paths), TEST_GROUND_TRUTH_OUTPUT_FILEPATH))
data_utils.write_paths(TEST_GROUND_TRUTH_OUTPUT_FILEPATH, test_ground_truth_output_paths)
print('Storing {} testing intrinsics file paths into: {}'.format(
len(test_intrinsics_output_paths), TEST_INTRINSICS_OUTPUT_FILEPATH))
data_utils.write_paths(TEST_INTRINSICS_OUTPUT_FILEPATH, test_intrinsics_output_paths)
| [
"sklearn.cluster.MiniBatchKMeans",
"argparse.ArgumentParser",
"numpy.isnan",
"numpy.argsort",
"data_utils.write_paths",
"os.path.join",
"numpy.unique",
"numpy.zeros_like",
"cv2.cvtColor",
"cv2.imwrite",
"os.path.dirname",
"os.path.exists",
"numpy.max",
"data_utils.save_validity_map",
"cv... | [((506, 539), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (529, 539), False, 'import warnings\n'), ((625, 650), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""src"""'], {}), "(0, 'src')\n", (640, 650), False, 'import os, sys, glob, cv2, argparse\n'), ((855, 880), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (878, 880), False, 'import os, sys, glob, cv2, argparse\n'), ((1414, 1444), 'os.path.join', 'os.path.join', (['"""data"""', '"""nyu_v2"""'], {}), "('data', 'nyu_v2')\n", (1426, 1444), False, 'import os, sys, glob, cv2, argparse\n'), ((1472, 1508), 'os.path.join', 'os.path.join', (['"""data"""', '"""nyu_v2_kbnet"""'], {}), "('data', 'nyu_v2_kbnet')\n", (1484, 1508), False, 'import os, sys, glob, cv2, argparse\n'), ((1548, 1594), 'os.path.join', 'os.path.join', (['"""setup"""', '"""nyu_v2_test_image.txt"""'], {}), "('setup', 'nyu_v2_test_image.txt')\n", (1560, 1594), False, 'import os, sys, glob, cv2, argparse\n'), ((1633, 1679), 'os.path.join', 'os.path.join', (['"""setup"""', '"""nyu_v2_test_depth.txt"""'], {}), "('setup', 'nyu_v2_test_depth.txt')\n", (1645, 1679), False, 'import os, sys, glob, cv2, argparse\n'), ((1701, 1735), 'os.path.join', 'os.path.join', (['"""training"""', '"""nyu_v2"""'], {}), "('training', 'nyu_v2')\n", (1713, 1735), False, 'import os, sys, glob, cv2, argparse\n'), ((1754, 1790), 'os.path.join', 'os.path.join', (['"""validation"""', '"""nyu_v2"""'], {}), "('validation', 'nyu_v2')\n", (1766, 1790), False, 'import os, sys, glob, cv2, argparse\n'), ((1810, 1843), 'os.path.join', 'os.path.join', (['"""testing"""', '"""nyu_v2"""'], {}), "('testing', 'nyu_v2')\n", (1822, 1843), False, 'import os, sys, glob, cv2, argparse\n'), ((12397, 12492), 'numpy.array', 'np.array', (['[[fx_rgb, 0.0, cx_rgb], [0.0, fy_rgb, cy_rgb], [0.0, 0.0, 1.0]]'], {'dtype': 'np.float32'}), '([[fx_rgb, 0.0, cx_rgb], [0.0, fy_rgb, cy_rgb], [0.0, 0.0, 1.0]],\n dtype=np.float32)\n', (12405, 12492), True, 'import numpy as np\n'), ((12938, 12988), 'os.path.join', 'os.path.join', (['NYU_OUTPUT_DIRPATH', '"""intrinsics.npy"""'], {}), "(NYU_OUTPUT_DIRPATH, 'intrinsics.npy')\n", (12950, 12988), False, 'import os, sys, glob, cv2, argparse\n'), ((12989, 13038), 'numpy.save', 'np.save', (['intrinsics_output_path', 'intrinsic_matrix'], {}), '(intrinsics_output_path, intrinsic_matrix)\n', (12996, 13038), True, 'import numpy as np\n'), ((15655, 15732), 'data_utils.write_paths', 'data_utils.write_paths', (['TRAIN_IMAGE_OUTPUT_FILEPATH', 'train_image_output_paths'], {}), '(TRAIN_IMAGE_OUTPUT_FILEPATH, train_image_output_paths)\n', (15677, 15732), False, 'import data_utils\n'), ((15882, 15977), 'data_utils.write_paths', 'data_utils.write_paths', (['TRAIN_SPARSE_DEPTH_OUTPUT_FILEPATH', 'train_sparse_depth_output_paths'], {}), '(TRAIN_SPARSE_DEPTH_OUTPUT_FILEPATH,\n train_sparse_depth_output_paths)\n', (15904, 15977), False, 'import data_utils\n'), ((16123, 16218), 'data_utils.write_paths', 'data_utils.write_paths', (['TRAIN_VALIDITY_MAP_OUTPUT_FILEPATH', 'train_validity_map_output_paths'], {}), '(TRAIN_VALIDITY_MAP_OUTPUT_FILEPATH,\n train_validity_map_output_paths)\n', (16145, 16218), False, 'import data_utils\n'), ((16364, 16459), 'data_utils.write_paths', 'data_utils.write_paths', (['TRAIN_GROUND_TRUTH_OUTPUT_FILEPATH', 'train_ground_truth_output_paths'], {}), '(TRAIN_GROUND_TRUTH_OUTPUT_FILEPATH,\n train_ground_truth_output_paths)\n', (16386, 16459), False, 'import data_utils\n'), ((16599, 16690), 'data_utils.write_paths', 'data_utils.write_paths', (['TRAIN_INTRINSICS_OUTPUT_FILEPATH', 'train_intrinsics_output_paths'], {}), '(TRAIN_INTRINSICS_OUTPUT_FILEPATH,\n train_intrinsics_output_paths)\n', (16621, 16690), False, 'import data_utils\n'), ((16759, 16811), 'data_utils.read_paths', 'data_utils.read_paths', (['NYU_TEST_IMAGE_SPLIT_FILEPATH'], {}), '(NYU_TEST_IMAGE_SPLIT_FILEPATH)\n', (16780, 16811), False, 'import data_utils\n'), ((19627, 19700), 'data_utils.write_paths', 'data_utils.write_paths', (['VAL_IMAGE_OUTPUT_FILEPATH', 'val_image_output_paths'], {}), '(VAL_IMAGE_OUTPUT_FILEPATH, val_image_output_paths)\n', (19649, 19700), False, 'import data_utils\n'), ((19848, 19939), 'data_utils.write_paths', 'data_utils.write_paths', (['VAL_SPARSE_DEPTH_OUTPUT_FILEPATH', 'val_sparse_depth_output_paths'], {}), '(VAL_SPARSE_DEPTH_OUTPUT_FILEPATH,\n val_sparse_depth_output_paths)\n', (19870, 19939), False, 'import data_utils\n'), ((20083, 20174), 'data_utils.write_paths', 'data_utils.write_paths', (['VAL_VALIDITY_MAP_OUTPUT_FILEPATH', 'val_validity_map_output_paths'], {}), '(VAL_VALIDITY_MAP_OUTPUT_FILEPATH,\n val_validity_map_output_paths)\n', (20105, 20174), False, 'import data_utils\n'), ((20317, 20408), 'data_utils.write_paths', 'data_utils.write_paths', (['VAL_GROUND_TRUTH_OUTPUT_FILEPATH', 'val_ground_truth_output_paths'], {}), '(VAL_GROUND_TRUTH_OUTPUT_FILEPATH,\n val_ground_truth_output_paths)\n', (20339, 20408), False, 'import data_utils\n'), ((20546, 20633), 'data_utils.write_paths', 'data_utils.write_paths', (['VAL_INTRINSICS_OUTPUT_FILEPATH', 'val_intrinsics_output_paths'], {}), '(VAL_INTRINSICS_OUTPUT_FILEPATH,\n val_intrinsics_output_paths)\n', (20568, 20633), False, 'import data_utils\n'), ((20791, 20866), 'data_utils.write_paths', 'data_utils.write_paths', (['TEST_IMAGE_OUTPUT_FILEPATH', 'test_image_output_paths'], {}), '(TEST_IMAGE_OUTPUT_FILEPATH, test_image_output_paths)\n', (20813, 20866), False, 'import data_utils\n'), ((21013, 21106), 'data_utils.write_paths', 'data_utils.write_paths', (['TEST_SPARSE_DEPTH_OUTPUT_FILEPATH', 'test_sparse_depth_output_paths'], {}), '(TEST_SPARSE_DEPTH_OUTPUT_FILEPATH,\n test_sparse_depth_output_paths)\n', (21035, 21106), False, 'import data_utils\n'), ((21249, 21342), 'data_utils.write_paths', 'data_utils.write_paths', (['TEST_VALIDITY_MAP_OUTPUT_FILEPATH', 'test_validity_map_output_paths'], {}), '(TEST_VALIDITY_MAP_OUTPUT_FILEPATH,\n test_validity_map_output_paths)\n', (21271, 21342), False, 'import data_utils\n'), ((21484, 21577), 'data_utils.write_paths', 'data_utils.write_paths', (['TEST_GROUND_TRUTH_OUTPUT_FILEPATH', 'test_ground_truth_output_paths'], {}), '(TEST_GROUND_TRUTH_OUTPUT_FILEPATH,\n test_ground_truth_output_paths)\n', (21506, 21577), False, 'import data_utils\n'), ((21714, 21803), 'data_utils.write_paths', 'data_utils.write_paths', (['TEST_INTRINSICS_OUTPUT_FILEPATH', 'test_intrinsics_output_paths'], {}), '(TEST_INTRINSICS_OUTPUT_FILEPATH,\n test_intrinsics_output_paths)\n', (21736, 21803), False, 'import data_utils\n'), ((4699, 4722), 'cv2.imread', 'cv2.imread', (['image0_path'], {}), '(image0_path)\n', (4709, 4722), False, 'import os, sys, glob, cv2, argparse\n'), ((4832, 4872), 'data_utils.load_depth', 'data_utils.load_depth', (['ground_truth_path'], {}), '(ground_truth_path)\n', (4853, 4872), False, 'import data_utils\n'), ((7556, 7609), 'numpy.where', 'np.where', (['(validity_map * ground_truth > 0.0)', '(1.0)', '(0.0)'], {}), '(validity_map * ground_truth > 0.0, 1.0, 0.0)\n', (7564, 7609), True, 'import numpy as np\n'), ((17874, 17883), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (17881, 17883), True, 'import multiprocessing as mp\n'), ((4747, 4787), 'cv2.cvtColor', 'cv2.cvtColor', (['image0', 'cv2.COLOR_BGR2GRAY'], {}), '(image0, cv2.COLOR_BGR2GRAY)\n', (4759, 4787), False, 'import os, sys, glob, cv2, argparse\n'), ((5619, 5673), 'cv2.cornerHarris', 'cv2.cornerHarris', (['image0'], {'blockSize': '(5)', 'ksize': '(3)', 'k': '(0.04)'}), '(image0, blockSize=5, ksize=3, k=0.04)\n', (5635, 5673), False, 'import os, sys, glob, cv2, argparse\n'), ((6084, 6154), 'numpy.unravel_index', 'np.unravel_index', (['corner_locations', '(image0.shape[0], image0.shape[1])'], {}), '(corner_locations, (image0.shape[0], image0.shape[1]))\n', (6100, 6154), True, 'import numpy as np\n'), ((6409, 6545), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', ([], {'n_clusters': 'args.n_points', 'max_iter': '(2)', 'n_init': '(1)', 'init_size': 'None', 'random_state': 'RANDOM_SEED', 'reassignment_ratio': '(1e-11)'}), '(n_clusters=args.n_points, max_iter=2, n_init=1, init_size=\n None, random_state=RANDOM_SEED, reassignment_ratio=1e-11)\n', (6424, 6545), False, 'from sklearn.cluster import MiniBatchKMeans\n'), ((8937, 8959), 'numpy.isnan', 'np.isnan', (['sparse_depth'], {}), '(sparse_depth)\n', (8945, 8959), True, 'import numpy as np\n'), ((9133, 9156), 'cv2.imread', 'cv2.imread', (['image0_path'], {}), '(image0_path)\n', (9143, 9156), False, 'import os, sys, glob, cv2, argparse\n'), ((9174, 9197), 'cv2.imread', 'cv2.imread', (['image1_path'], {}), '(image1_path)\n', (9184, 9197), False, 'import os, sys, glob, cv2, argparse\n'), ((9215, 9238), 'cv2.imread', 'cv2.imread', (['image2_path'], {}), '(image2_path)\n', (9225, 9238), False, 'import os, sys, glob, cv2, argparse\n'), ((9506, 9554), 'numpy.concatenate', 'np.concatenate', (['[image1, image0, image2]'], {'axis': '(1)'}), '([image1, image0, image2], axis=1)\n', (9520, 9554), True, 'import numpy as np\n'), ((10271, 10305), 'os.path.dirname', 'os.path.dirname', (['image_output_path'], {}), '(image_output_path)\n', (10286, 10305), False, 'import os, sys, glob, cv2, argparse\n'), ((10344, 10385), 'os.path.dirname', 'os.path.dirname', (['sparse_depth_output_path'], {}), '(sparse_depth_output_path)\n', (10359, 10385), False, 'import os, sys, glob, cv2, argparse\n'), ((10424, 10465), 'os.path.dirname', 'os.path.dirname', (['validity_map_output_path'], {}), '(validity_map_output_path)\n', (10439, 10465), False, 'import os, sys, glob, cv2, argparse\n'), ((10504, 10545), 'os.path.dirname', 'os.path.dirname', (['ground_truth_output_path'], {}), '(ground_truth_output_path)\n', (10519, 10545), False, 'import os, sys, glob, cv2, argparse\n'), ((10948, 10986), 'cv2.imwrite', 'cv2.imwrite', (['image_output_path', 'imagec'], {}), '(image_output_path, imagec)\n', (10959, 10986), False, 'import os, sys, glob, cv2, argparse\n'), ((10995, 11056), 'data_utils.save_depth', 'data_utils.save_depth', (['sparse_depth', 'sparse_depth_output_path'], {}), '(sparse_depth, sparse_depth_output_path)\n', (11016, 11056), False, 'import data_utils\n'), ((11065, 11133), 'data_utils.save_validity_map', 'data_utils.save_validity_map', (['validity_map', 'validity_map_output_path'], {}), '(validity_map, validity_map_output_path)\n', (11093, 11133), False, 'import data_utils\n'), ((11142, 11203), 'data_utils.save_depth', 'data_utils.save_depth', (['ground_truth', 'ground_truth_output_path'], {}), '(ground_truth, ground_truth_output_path)\n', (11163, 11203), False, 'import data_utils\n'), ((12129, 12152), 'os.path.exists', 'os.path.exists', (['dirpath'], {}), '(dirpath)\n', (12143, 12152), False, 'import os, sys, glob, cv2, argparse\n'), ((12162, 12197), 'os.makedirs', 'os.makedirs', (['dirpath'], {'exist_ok': '(True)'}), '(dirpath, exist_ok=True)\n', (12173, 12197), False, 'import os, sys, glob, cv2, argparse\n'), ((13317, 13387), 'os.path.join', 'os.path.join', (['NYU_ROOT_DIRPATH', '"""training"""', '"""images"""', '"""raw_data"""', '"""*/"""'], {}), "(NYU_ROOT_DIRPATH, 'training', 'images', 'raw_data', '*/')\n", (13329, 13387), False, 'import os, sys, glob, cv2, argparse\n'), ((13436, 13506), 'os.path.join', 'os.path.join', (['NYU_ROOT_DIRPATH', '"""training"""', '"""depths"""', '"""raw_data"""', '"""*/"""'], {}), "(NYU_ROOT_DIRPATH, 'training', 'depths', 'raw_data', '*/')\n", (13448, 13506), False, 'import os, sys, glob, cv2, argparse\n'), ((14508, 14517), 'multiprocessing.Pool', 'mp.Pool', ([], {}), '()\n', (14515, 14517), True, 'import multiprocessing as mp\n'), ((17237, 17297), 'os.path.join', 'os.path.join', (['NYU_ROOT_DIRPATH', '"""testing"""', '"""images"""', '"""*.png"""'], {}), "(NYU_ROOT_DIRPATH, 'testing', 'images', '*.png')\n", (17249, 17297), False, 'import os, sys, glob, cv2, argparse\n'), ((17348, 17408), 'os.path.join', 'os.path.join', (['NYU_ROOT_DIRPATH', '"""testing"""', '"""depths"""', '"""*.png"""'], {}), "(NYU_ROOT_DIRPATH, 'testing', 'depths', '*.png')\n", (17360, 17408), False, 'import os, sys, glob, cv2, argparse\n'), ((5776, 5814), 'numpy.where', 'np.where', (['(ground_truth > 0.0)', '(1.0)', '(0.0)'], {}), '(ground_truth > 0.0, 1.0, 0.0)\n', (5784, 5814), True, 'import numpy as np\n'), ((5963, 5982), 'numpy.argsort', 'np.argsort', (['corners'], {}), '(corners)\n', (5973, 5982), True, 'import numpy as np\n'), ((6274, 6326), 'numpy.array', 'np.array', (['[corner_locations[0], corner_locations[1]]'], {}), '([corner_locations[0], corner_locations[1]])\n', (6282, 6326), True, 'import numpy as np\n'), ((7349, 7370), 'numpy.zeros_like', 'np.zeros_like', (['image0'], {}), '(image0)\n', (7362, 7370), True, 'import numpy as np\n'), ((7753, 7777), 'numpy.squeeze', 'np.squeeze', (['sparse_depth'], {}), '(sparse_depth)\n', (7763, 7777), True, 'import numpy as np\n'), ((8000, 8023), 'numpy.unique', 'np.unique', (['validity_map'], {}), '(validity_map)\n', (8009, 8023), True, 'import numpy as np\n'), ((8025, 8041), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (8033, 8041), True, 'import numpy as np\n'), ((8318, 8338), 'numpy.min', 'np.min', (['ground_truth'], {}), '(ground_truth)\n', (8324, 8338), True, 'import numpy as np\n'), ((8348, 8368), 'numpy.max', 'np.max', (['ground_truth'], {}), '(ground_truth)\n', (8354, 8368), True, 'import numpy as np\n'), ((8503, 8541), 'numpy.where', 'np.where', (['(validity_map > 0.0)', '(1.0)', '(0.0)'], {}), '(validity_map > 0.0, 1.0, 0.0)\n', (8511, 8541), True, 'import numpy as np\n'), ((8736, 8774), 'numpy.where', 'np.where', (['(ground_truth > 0.0)', '(1.0)', '(0.0)'], {}), '(ground_truth > 0.0, 1.0, 0.0)\n', (8744, 8774), True, 'import numpy as np\n'), ((13893, 13930), 'os.path.join', 'os.path.join', (['image_sequence', '"""*.png"""'], {}), "(image_sequence, '*.png')\n", (13905, 13930), False, 'import os, sys, glob, cv2, argparse\n'), ((13985, 14022), 'os.path.join', 'os.path.join', (['depth_sequence', '"""*.png"""'], {}), "(depth_sequence, '*.png')\n", (13997, 14022), False, 'import os, sys, glob, cv2, argparse\n'), ((8680, 8718), 'numpy.where', 'np.where', (['(validity_map > 0.0)', '(1.0)', '(0.0)'], {}), '(validity_map > 0.0, 1.0, 0.0)\n', (8688, 8718), True, 'import numpy as np\n'), ((10838, 10861), 'os.path.exists', 'os.path.exists', (['dirpath'], {}), '(dirpath)\n', (10852, 10861), False, 'import os, sys, glob, cv2, argparse\n'), ((10879, 10914), 'os.makedirs', 'os.makedirs', (['dirpath'], {'exist_ok': '(True)'}), '(dirpath, exist_ok=True)\n', (10890, 10914), False, 'import os, sys, glob, cv2, argparse\n')] |
from abc import ABC, abstractmethod
import torch
from . import metric_utils_lightning
import scipy
import numpy as np
import torchmetrics
class StyleGANMetric(torchmetrics.Metric, ABC):
def __init__(self, detector_url: str, detector_kwargs: dict = None,
max_real=None, num_gen=None,):
super(StyleGANMetric, self).__init__(compute_on_step=False)
self.max_real = max_real
self.num_gen = num_gen
self.ds_feats = metric_utils_lightning.FeatStatsDataset(detector_url=detector_url,
detector_kwargs=detector_kwargs)
self.gen_feats = metric_utils_lightning.FeatStatsGenerator(detector_url=detector_url,
detector_kwargs=detector_kwargs)
@property
@abstractmethod
def name(self):
pass
def prepare(self, opts):
opts.update(num_items=self.max_real)
self.ds_feats.prepare_dataset_features(**opts)
opts.update(num_items=self.num_gen)
self.gen_feats.prepare_generator_features(**opts)
def reset(self):
self.ds_feats.reset()
self.gen_feats.reset()
def update(self, images: torch.Tensor, z: torch.Tensor, c: torch.Tensor):
if not self.ds_feats.is_full():
self.ds_feats(images)
if not self.gen_feats.is_full():
self.gen_feats(z, c)
def is_full(self):
return self.ds_feats.is_full() and self.gen_feats.is_full()
class FID(StyleGANMetric):
detector_url = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/inception-2015-12-05.pt'
detector_kwargs = dict(return_features=True)
def __init__(self, max_real=None, num_gen=None):
super(FID, self).__init__(detector_url=self.detector_url,
detector_kwargs=self.detector_kwargs,
max_real=max_real,
num_gen=num_gen)
def prepare(self, opts):
opts.update(capture_mean_cov=True)
super().prepare(opts)
def compute(self):
mu_real, sigma_real = self.ds_feats.compute().get_mean_cov()
mu_gen, sigma_gen = self.gen_feats.compute().get_mean_cov()
m = torch.square(mu_gen - mu_real).sum()
m = m.cpu().numpy()
s, _ = scipy.linalg.sqrtm(np.dot(sigma_gen, sigma_real), disp=False) # pylint: disable=no-member
fid = np.real(m + np.trace(sigma_gen + sigma_real - s * 2))
return float(fid)
def name(self):
return 'FID'
| [
"numpy.dot",
"numpy.trace",
"torch.square"
] | [((2400, 2429), 'numpy.dot', 'np.dot', (['sigma_gen', 'sigma_real'], {}), '(sigma_gen, sigma_real)\n', (2406, 2429), True, 'import numpy as np\n'), ((2301, 2331), 'torch.square', 'torch.square', (['(mu_gen - mu_real)'], {}), '(mu_gen - mu_real)\n', (2313, 2331), False, 'import torch\n'), ((2498, 2538), 'numpy.trace', 'np.trace', (['(sigma_gen + sigma_real - s * 2)'], {}), '(sigma_gen + sigma_real - s * 2)\n', (2506, 2538), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# pylint: disable=wrong-import-position
"""
Cascade hypothesis class to generate photons expected from a cascade.
"""
from __future__ import absolute_import, division, print_function
__all__ = ["EM_CASCADE_PHOTONS_PER_GEV", "CascadeModel", "CascadeHypo"]
__author__ = "<NAME>, <NAME>"
__license__ = """Copyright 2017-2018 <NAME> and <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from collections import Iterable, OrderedDict
import math
from numbers import Number
from os.path import abspath, dirname
import sys
import enum
import numpy as np
from scipy.stats import gamma, pareto
if __name__ == "__main__" and __package__ is None:
RETRO_DIR = dirname(dirname(abspath(__file__)))
if RETRO_DIR not in sys.path:
sys.path.append(RETRO_DIR)
from retro.const import (
EMPTY_SOURCES, SPEED_OF_LIGHT_M_PER_NS, SRC_OMNI, SRC_CKV_BETA1, SrcHandling,
dummy_pegleg_gens
)
from retro.hypo_future import Hypo
from retro.retro_types import SRC_T
from retro.utils.misc import check_kwarg_keys, validate_and_convert_enum
EM_CASCADE_PHOTONS_PER_GEV = 12818.970 #12805.3383311
"""Cascade photons per energy, in units of 1/GeV (see
``retro/i3info/track_and_cascade_photon_parameterizations.py``)"""
# TODO: a "pegleg"-like cascade which can add sources to an existing set to increase the
# energy of the cascade while maintaining more accurate topolgy with higher energy (as
# opposed to the scaling cascade, which has a fixed topology at one energy and simply
# scales luminosity of those sources to increase energy)
class CascadeModel(enum.IntEnum):
"""Cascade models defined"""
spherical = 0
"""spherical point-radiator"""
one_dim_v1 = 1
"""parameterization inspired by but not strictly following arXiv:1210.5140v2; set
num_sources to 1 to produce a point-like Chrenkov cascade"""
class CascadeHypo(Hypo):
"""
Kernel to produce light sources expected from a cascade hypothesis.
Parameters
----------
model : CascadeModel enum or convertible thereto
see :class:`CascadeModel` for valid values
param_mapping : dict-like (mapping)
see docs for :class:`retro.hypo.Hypo` for details
external_sph_pairs : tuple of 0 or more 2-tuples of strings, optional
see docs for :class:`retro.hypo.Hypo` for details
num_sources : int, optional
specify an integer >= 1 to fix the number of sources, or specify None or an
integer <= 0 to dynamically adjust the number of sources based on the energy of
the cascade. See `retro/notebooks/energy_dependent_cascade_num_samples.ipynb`
for the logic behind how `num_sources` is computed in this case
scaling_proto_energy : None or scalar > 0
specify None to disable or specify scalar > 0 to treat the cascade as
"scaling," i.e., a prototypical set of light sources are generated for the
energy specified and modifying the energy from that merely scales the luminosity
of each of those sources as opposed to generating an entirely new set of light
sources; in this case, the topology of the cascade will not be as accurate but
the speed of computing likelihoods increases
model_kwargs : mapping
Additional keyword arguments required by chosen cascade `model`; if parameters
in addition to those required by the model are passed, a ValueError will be
raised
* `spherical`, `one_dim_v1` take no additional parameters
"""
def __init__(
self,
model,
param_mapping,
external_sph_pairs=None,
num_sources=None,
scaling_proto_energy=None,
model_kwargs=None,
):
# -- Validation and translation of args -- #
# `model`
model = validate_and_convert_enum(val=model, enum_type=CascadeModel)
# `num_sources`
if not (
num_sources is None
or (
isinstance(num_sources, Number)
and int(num_sources) == num_sources
)
):
raise TypeError("`num_sources` must be an integer or None")
if num_sources is None or num_sources <= 0:
num_sources = -1
else:
num_sources = int(num_sources)
if model == CascadeModel.spherical:
if num_sources < 1:
num_sources = 1
elif num_sources != 1:
raise ValueError(
"spherical cascade only valid with `num_sources=1` or auto (< 0);"
" got `num_sources` = {}".format(num_sources)
)
internal_param_names = ("x", "y", "z", "time", "energy")
else: # model == CascadeModel.one_dim_v1
internal_param_names = ("x", "y", "z", "time", "energy", "azimuth", "zenith")
if not (
scaling_proto_energy is None
or isinstance(scaling_proto_energy, Number)
):
raise TypeError("`scaling_proto_energy` must be None or a scalar")
# `scaling_proto_energy`
if isinstance(scaling_proto_energy, Number) and scaling_proto_energy <= 0:
raise ValueError("If scalar, `scaling_proto_energy` must be > 0")
is_scaling = scaling_proto_energy is not None
if is_scaling:
internal_param_names = tuple(
p for p in internal_param_names if p != "energy"
)
# `model_kwargs`
if model_kwargs is None:
model_kwargs = {}
if model in (CascadeModel.spherical, CascadeModel.one_dim_v1):
required_keys = ()
else:
raise NotImplementedError(
"{} cascade model not implemented".format(model.name)
)
check_kwarg_keys(
required_keys=required_keys,
provided_kwargs=model_kwargs,
meta_name="`model_kwargs`",
message_pfx="{} cascade model:".format(model.name), # pylint: disable=no-member
)
# -- Initialize base class -- #
super(CascadeHypo, self).__init__(
param_mapping=param_mapping,
internal_param_names=internal_param_names,
external_sph_pairs=external_sph_pairs,
)
self.max_num_scalefactors = 1 if is_scaling else 0
# -- Store attrs unique to a cascade -- #
self.model = model
self.num_sources = num_sources
self.scaling_proto_energy = scaling_proto_energy
self.is_scaling = is_scaling
# -- Record configuration items unique to a muon hypo -- #
self.config["model"] = model.name # pylint: disable=no-member
self.config["num_sources"] = num_sources
self.config["scaling_proto_energy"] = scaling_proto_energy
# -- Create the self._get_sources attribute/callable -- #
self._create_get_sources_func()
def _create_get_sources_func(self):
"""Create the function that generates photon sources for a hypothesis.
The created function is attached to this class as the private attribute
`self._get_sources` and is intended to be called externally from
`self.get_sources` (which does the appropriate translations from
external names/values to internal names/values as used by _get_sources).
"""
is_scaling = self.is_scaling
if is_scaling:
src_handling = SrcHandling.scaling
else:
src_handling = SrcHandling.nonscaling
scaling_proto_energy = self.scaling_proto_energy
if self.model == CascadeModel.spherical:
def __get_sources(time, x, y, z, energy):
"""Point-like spherically-radiating cascade.
Parameters
----------
time, x, y, z, energy : scalars
Returns
-------
sources
sources_handling
num_pegleg_generators
pegleg_generators
"""
if energy == 0:
return (EMPTY_SOURCES,), (SrcHandling.none,), 0, dummy_pegleg_gens
sources = np.empty(shape=(1,), dtype=SRC_T)
sources[0]["kind"] = SRC_OMNI
sources[0]["time"] = time
sources[0]["x"] = x
sources[0]["y"] = y
sources[0]["z"] = z
sources[0]["photons"] = EM_CASCADE_PHOTONS_PER_GEV * energy
return (sources,), (SrcHandling.none,), 0, dummy_pegleg_gens
if is_scaling:
def ___get_sources(time, x, y, z): # pylint: disable=missing-docstring
return __get_sources(
time=time,
x=x,
y=y,
z=z,
energy=scaling_proto_energy,
)
___get_sources.__doc__ = (
__get_sources.__doc__.replace(", energy", "")
)
_get_sources = ___get_sources
else:
_get_sources = __get_sources
elif self.model == CascadeModel.one_dim_v1:
# TODO: use quasi-random (low discrepancy) numbers instead of pseudo-random
# (e.g., Sobol sequence)
# Create samples from angular zenith distribution.
max_num_sources = int(1e5)
if self.num_sources > max_num_sources:
raise ValueError(
"Can only produce up to {} sources".format(max_num_sources)
)
# Parameterizations from arXiv:1210.5140v2
zen_dist = pareto(b=1.91833423, loc=-22.82924369, scale=22.82924369)
random_state = np.random.RandomState(0)
precomputed_zen_samples = np.deg2rad(
np.clip(
zen_dist.rvs(size=max_num_sources, random_state=random_state),
a_min=0,
a_max=180,
)
)
# Create samples from angular azimuth distribution
random_state = np.random.RandomState(2)
precomputed_az_samples = random_state.uniform(
low=0,
high=2*np.pi,
size=max_num_sources,
)
param_alpha = 2.01849
param_beta = 1.45469
min_cascade_energy = np.ceil(10**(-param_alpha / param_beta) * 100) / 100
rad_len = 0.3975
param_b = 0.63207
rad_len_over_b = rad_len / param_b
# numba closure doesn't have access to attributes of `self`, so extract
# attributes we need as "regular" variables
num_sources = self.num_sources
def compute_actual_num_sources(num_sources, energy=None):
"""Compute actual number of sources to use given a specification
`num_sources` for number of sources to use; take "limits" (minimum
energy) into account and dynamically compute `actual_num_sources` if
`num_sources` is < 0.
Parameters
----------
num_sources : int
if < 0, compute num_sources based on `energy`
energy : scalar > 0, required only if `num_sources` < 0
Returns
-------
actual_num_sources : int > 0
"""
if num_sources < 0:
# Note that num_sources must be 1 for energy <= min_cascade_energy
# (param_a goes <= 0 at this value and below, causing an exception from
# gamma distribution)
if energy <= min_cascade_energy:
actual_num_sources = 1
else:
# See `retro/notebooks/energy_dependent_cascade_num_samples.ipynb`
actual_num_sources = int(np.round(
np.clip(
math.exp(0.77 * math.log(energy) + 2.3),
a_min=1,
a_max=None,
)
))
else:
actual_num_sources = num_sources
return actual_num_sources
def compute_longitudinal_samples(num_sources, energy):
"""Create longitudinal distribution of cascade's light sources.
See arXiv:1210.5140v2
Parameters
----------
num_sources : int
energy : scalar > 0
Returns
-------
longitudinal_samples : length-`n_samples` ndarray of dtype float64
"""
param_a = (
param_alpha
+ (
param_beta
* math.log10(max(min_cascade_energy, energy))
)
)
# ~70% of exec time:
longitudinal_dist = gamma(param_a, scale=rad_len_over_b)
# ~10% of execution time:
longitudinal_samples = longitudinal_dist.rvs(size=num_sources, random_state=1)
return longitudinal_samples
if is_scaling:
actual_num_sources = compute_actual_num_sources(
num_sources=num_sources,
energy=scaling_proto_energy,
)
precomputed_long_samples = compute_longitudinal_samples(
num_sources=actual_num_sources,
energy=scaling_proto_energy,
)
else:
# Define things just so they exist, even though we won't use them
# (needed for Numba-compiled closures)
actual_num_sources = num_sources
precomputed_long_samples = np.empty(0)
# TODO: speed up (~800 µs to generate sources at 10 GeV)...
# * 70% of the time is spent instantiating the gamma dist
# * 10% of the time is executing the `rvs` method of the gamma dist
def __get_sources(time, x, y, z, energy, azimuth, zenith):
"""Cascade with both longitudinal and angular distributions (but no
distribution off-axis). All emitters are located on the shower axis.
Use as a hypo_kernel with the DiscreteHypo class.
Note that the number of samples is proportional to the energy of the
cascade.
Parameters
----------
time, x, y, z, energy, azimuth, zenith
Returns
-------
sources
source_handling
num_pegleg_generators
pegleg_generators
"""
if energy == 0:
return (EMPTY_SOURCES,), (SrcHandling.none,), 0, dummy_pegleg_gens
if is_scaling:
n_sources = actual_num_sources
else:
n_sources = compute_actual_num_sources(
num_sources=num_sources,
energy=energy,
)
opposite_zenith = np.pi - zenith
opposite_azimuth = azimuth + np.pi
sin_zen = math.sin(opposite_zenith)
cos_zen = math.cos(opposite_zenith)
sin_az = math.sin(opposite_azimuth)
cos_az = math.cos(opposite_azimuth)
dir_x = sin_zen * cos_az
dir_y = sin_zen * sin_az
dir_z = cos_zen
if n_sources == 1:
sources = np.empty(shape=(1,), dtype=SRC_T)
sources[0]["kind"] = SRC_CKV_BETA1
sources[0]["time"] = time
sources[0]["x"] = x
sources[0]["y"] = y
sources[0]["z"] = z
sources[0]["photons"] = EM_CASCADE_PHOTONS_PER_GEV * energy
sources[0]["dir_costheta"] = cos_zen
sources[0]["dir_sintheta"] = sin_zen
sources[0]["dir_phi"] = opposite_azimuth
sources[0]["dir_cosphi"] = cos_az
sources[0]["dir_sinphi"] = sin_az
return (sources,), (src_handling,), 0, dummy_pegleg_gens
# Create rotation matrix
rot_mat = np.array(
[[cos_az * cos_zen, -sin_az, cos_az * sin_zen],
[sin_az * cos_zen, cos_zen, sin_az * sin_zen],
[-sin_zen, 0, cos_zen]]
)
if is_scaling:
longitudinal_samples = precomputed_long_samples
else:
longitudinal_samples = compute_longitudinal_samples(
num_sources=n_sources,
energy=energy,
)
# Grab precomputed samples from angular zenith distribution
zen_samples = precomputed_zen_samples[:n_sources]
# Grab precomputed samples from angular azimuth distribution
az_samples = precomputed_az_samples[:n_sources]
# Create angular vectors distribution
sin_zen = np.sin(zen_samples)
x_ang_dist = sin_zen * np.cos(az_samples)
y_ang_dist = sin_zen * np.sin(az_samples)
z_ang_dist = np.cos(zen_samples)
ang_dist = np.concatenate(
(
x_ang_dist[np.newaxis, :],
y_ang_dist[np.newaxis, :],
z_ang_dist[np.newaxis, :]
),
axis=0,
)
final_ang_dist = np.dot(rot_mat, ang_dist)
final_phi_dist = np.arctan2(final_ang_dist[1], final_ang_dist[0])
final_theta_dist = np.arccos(final_ang_dist[2])
# Define photons per sample
photons_per_sample = EM_CASCADE_PHOTONS_PER_GEV * energy / n_sources
# Create photon array
sources = np.empty(shape=n_sources, dtype=SRC_T)
sources["kind"] = SRC_CKV_BETA1
sources["time"] = time + longitudinal_samples / SPEED_OF_LIGHT_M_PER_NS
sources["x"] = x + longitudinal_samples * dir_x
sources["y"] = y + longitudinal_samples * dir_y
sources["z"] = z + longitudinal_samples * dir_z
sources["photons"] = photons_per_sample
sources["dir_costheta"] = final_ang_dist[2]
sources["dir_sintheta"] = np.sin(final_theta_dist)
sources["dir_phi"] = final_phi_dist
sources["dir_cosphi"] = np.cos(final_phi_dist)
sources["dir_sinphi"] = np.sin(final_phi_dist)
return (sources,), (src_handling,), 0, dummy_pegleg_gens
if is_scaling:
def ___get_sources(time, x, y, z, azimuth, zenith): # pylint: disable=missing-docstring
return __get_sources(
time=time,
x=x,
y=y,
z=z,
energy=scaling_proto_energy,
azimuth=azimuth,
zenith=zenith,
)
___get_sources.__doc__ = (
__get_sources.__doc__.replace(", energy", "")
)
_get_sources = ___get_sources
else:
_get_sources = __get_sources
else:
raise NotImplementedError(
"{} cascade model is not implemented".format(self.model.name) # pylint: disable=no-member
)
self._get_sources = _get_sources
def get_energy(self, pegleg_indices=None, scalefactors=None):
"""Get cascade energy.
Parameters
----------
pegleg_indices : must be None
scalefactors : scalar or iterable of one scalar; required if is_scaling
Returns
-------
energy
Energy of cascade in GeV
"""
assert pegleg_indices is None
if isinstance(scalefactors, Iterable):
scalefactors = tuple(scalefactors)[0]
if self.is_scaling:
assert scalefactors is not None
return self.scaling_proto_energy * scalefactors
return self.internal_params["energy"]
def get_derived_params(self, pegleg_indices=None, scalefactors=None):
"""Retrieve any derived params from component hypotheses.
Parameters
----------
pegleg_indices : optional
scalefactors : optional
Returns
-------
derived_params : OrderedDict
"""
derived_params = OrderedDict()
# If scaling, energy is derived from scaling_proto_energy & scalefactor
if self.is_scaling:
derived_params["energy"] = self.get_energy(
pegleg_indices=pegleg_indices,
scalefactors=scalefactors,
)
return derived_params
def test_CascadeHypo():
"""Unit tests for CascadeHypo class"""
dict_param_mapping = dict(
x="x", y="y", z="z", time="time", cascade_energy="energy",
cascade_azimuth="azimuth", cascade_zenith="zenith",
)
scaling_dict_param_mapping = {
k: v for k, v in dict_param_mapping.items() if v != "energy"
}
sph_dict_param_mapping = {
k: v for k, v in dict_param_mapping.items() if "zen" not in k and "az" not in k
}
sph_dict_scaling_param_mapping = {
k: v for k, v in scaling_dict_param_mapping.items() if "zen" not in k and "az" not in k
}
def callable_param_mapping(
x, y, z, time, cascade_energy, cascade_azimuth, cascade_zenith, **kwargs
): # pylint: disable=missing-docstring, unused-argument
return dict(x=x, y=y, z=z, time=time, energy=cascade_energy,
azimuth=cascade_azimuth, zenith=cascade_zenith)
def callable_scaling_param_mapping(
x, y, z, time, cascade_azimuth, cascade_zenith, **kwargs
): # pylint: disable=missing-docstring, unused-argument
return dict(x=x, y=y, z=z, time=time, azimuth=cascade_azimuth,
zenith=cascade_zenith)
params = dict(x=0, y=0, z=0, time=0, cascade_energy=50, cascade_azimuth=np.pi/2,
cascade_zenith=np.pi/4)
sph_params = {k: v for k, v in params.items() if "zen" not in k and "az" not in k}
# dict for param mapping, enum model, dynamic num sources, not scaling
cscd = CascadeHypo(
param_mapping=dict_param_mapping,
model=CascadeModel.one_dim_v1,
num_sources=-1,
scaling_proto_energy=None,
)
_, _, _, _ = cscd.get_sources(**params)
# dict for param mapping, enum model, dynamic num sources, not scaling
cscd = CascadeHypo(
param_mapping=callable_param_mapping,
model="one_dim_v1",
num_sources=100,
scaling_proto_energy=None,
)
_, _, _, _ = cscd.get_sources(**params)
# callable for param mapping, str model, fixed num sources, scaling
cscd = CascadeHypo(
param_mapping=callable_scaling_param_mapping,
model="one_dim_v1",
num_sources=100,
scaling_proto_energy=100,
)
_, _, _, _ = out = cscd.get_sources(**params)
print(out[0][0][:10])
print(out[1][0])
# dict for param mapping, int model, auto num sources, scaling
cscd = CascadeHypo(
param_mapping=sph_dict_scaling_param_mapping,
model=int(CascadeModel.spherical),
num_sources=-1,
scaling_proto_energy=100,
)
_, _, _, _ = cscd.get_sources(**sph_params)
# dict for param mapping, int model, fixed num sources, not scaling
cscd = CascadeHypo(
param_mapping=sph_dict_param_mapping,
model=CascadeModel.spherical,
num_sources=1,
)
_, _, _, _ = cscd.get_sources(**sph_params)
print("<< PASS : test_CascadeHypo >>")
if __name__ == "__main__":
test_CascadeHypo()
| [
"numpy.arctan2",
"numpy.empty",
"numpy.sin",
"retro.utils.misc.validate_and_convert_enum",
"scipy.stats.pareto",
"sys.path.append",
"os.path.abspath",
"numpy.random.RandomState",
"scipy.stats.gamma",
"math.cos",
"math.log",
"numpy.arccos",
"numpy.ceil",
"math.sin",
"numpy.cos",
"numpy.... | [((1246, 1272), 'sys.path.append', 'sys.path.append', (['RETRO_DIR'], {}), '(RETRO_DIR)\n', (1261, 1272), False, 'import sys\n'), ((4281, 4341), 'retro.utils.misc.validate_and_convert_enum', 'validate_and_convert_enum', ([], {'val': 'model', 'enum_type': 'CascadeModel'}), '(val=model, enum_type=CascadeModel)\n', (4306, 4341), False, 'from retro.utils.misc import check_kwarg_keys, validate_and_convert_enum\n'), ((21605, 21618), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (21616, 21618), False, 'from collections import Iterable, OrderedDict\n'), ((1184, 1201), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (1191, 1201), False, 'from os.path import abspath, dirname\n'), ((8669, 8702), 'numpy.empty', 'np.empty', ([], {'shape': '(1,)', 'dtype': 'SRC_T'}), '(shape=(1,), dtype=SRC_T)\n', (8677, 8702), True, 'import numpy as np\n'), ((10192, 10249), 'scipy.stats.pareto', 'pareto', ([], {'b': '(1.91833423)', 'loc': '(-22.82924369)', 'scale': '(22.82924369)'}), '(b=1.91833423, loc=-22.82924369, scale=22.82924369)\n', (10198, 10249), False, 'from scipy.stats import gamma, pareto\n'), ((10277, 10301), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (10298, 10301), True, 'import numpy as np\n'), ((10643, 10667), 'numpy.random.RandomState', 'np.random.RandomState', (['(2)'], {}), '(2)\n', (10664, 10667), True, 'import numpy as np\n'), ((10934, 10982), 'numpy.ceil', 'np.ceil', (['(10 ** (-param_alpha / param_beta) * 100)'], {}), '(10 ** (-param_alpha / param_beta) * 100)\n', (10941, 10982), True, 'import numpy as np\n'), ((13648, 13684), 'scipy.stats.gamma', 'gamma', (['param_a'], {'scale': 'rad_len_over_b'}), '(param_a, scale=rad_len_over_b)\n', (13653, 13684), False, 'from scipy.stats import gamma, pareto\n'), ((14510, 14521), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (14518, 14521), True, 'import numpy as np\n'), ((15993, 16018), 'math.sin', 'math.sin', (['opposite_zenith'], {}), '(opposite_zenith)\n', (16001, 16018), False, 'import math\n'), ((16045, 16070), 'math.cos', 'math.cos', (['opposite_zenith'], {}), '(opposite_zenith)\n', (16053, 16070), False, 'import math\n'), ((16096, 16122), 'math.sin', 'math.sin', (['opposite_azimuth'], {}), '(opposite_azimuth)\n', (16104, 16122), False, 'import math\n'), ((16148, 16174), 'math.cos', 'math.cos', (['opposite_azimuth'], {}), '(opposite_azimuth)\n', (16156, 16174), False, 'import math\n'), ((17121, 17253), 'numpy.array', 'np.array', (['[[cos_az * cos_zen, -sin_az, cos_az * sin_zen], [sin_az * cos_zen, cos_zen,\n sin_az * sin_zen], [-sin_zen, 0, cos_zen]]'], {}), '([[cos_az * cos_zen, -sin_az, cos_az * sin_zen], [sin_az * cos_zen,\n cos_zen, sin_az * sin_zen], [-sin_zen, 0, cos_zen]])\n', (17129, 17253), True, 'import numpy as np\n'), ((17999, 18018), 'numpy.sin', 'np.sin', (['zen_samples'], {}), '(zen_samples)\n', (18005, 18018), True, 'import numpy as np\n'), ((18164, 18183), 'numpy.cos', 'np.cos', (['zen_samples'], {}), '(zen_samples)\n', (18170, 18183), True, 'import numpy as np\n'), ((18211, 18320), 'numpy.concatenate', 'np.concatenate', (['(x_ang_dist[np.newaxis, :], y_ang_dist[np.newaxis, :], z_ang_dist[np.\n newaxis, :])'], {'axis': '(0)'}), '((x_ang_dist[np.newaxis, :], y_ang_dist[np.newaxis, :],\n z_ang_dist[np.newaxis, :]), axis=0)\n', (18225, 18320), True, 'import numpy as np\n'), ((18504, 18529), 'numpy.dot', 'np.dot', (['rot_mat', 'ang_dist'], {}), '(rot_mat, ang_dist)\n', (18510, 18529), True, 'import numpy as np\n'), ((18563, 18611), 'numpy.arctan2', 'np.arctan2', (['final_ang_dist[1]', 'final_ang_dist[0]'], {}), '(final_ang_dist[1], final_ang_dist[0])\n', (18573, 18611), True, 'import numpy as np\n'), ((18647, 18675), 'numpy.arccos', 'np.arccos', (['final_ang_dist[2]'], {}), '(final_ang_dist[2])\n', (18656, 18675), True, 'import numpy as np\n'), ((18871, 18909), 'numpy.empty', 'np.empty', ([], {'shape': 'n_sources', 'dtype': 'SRC_T'}), '(shape=n_sources, dtype=SRC_T)\n', (18879, 18909), True, 'import numpy as np\n'), ((19400, 19424), 'numpy.sin', 'np.sin', (['final_theta_dist'], {}), '(final_theta_dist)\n', (19406, 19424), True, 'import numpy as np\n'), ((19518, 19540), 'numpy.cos', 'np.cos', (['final_phi_dist'], {}), '(final_phi_dist)\n', (19524, 19540), True, 'import numpy as np\n'), ((19581, 19603), 'numpy.sin', 'np.sin', (['final_phi_dist'], {}), '(final_phi_dist)\n', (19587, 19603), True, 'import numpy as np\n'), ((16355, 16388), 'numpy.empty', 'np.empty', ([], {'shape': '(1,)', 'dtype': 'SRC_T'}), '(shape=(1,), dtype=SRC_T)\n', (16363, 16388), True, 'import numpy as np\n'), ((18058, 18076), 'numpy.cos', 'np.cos', (['az_samples'], {}), '(az_samples)\n', (18064, 18076), True, 'import numpy as np\n'), ((18116, 18134), 'numpy.sin', 'np.sin', (['az_samples'], {}), '(az_samples)\n', (18122, 18134), True, 'import numpy as np\n'), ((12597, 12613), 'math.log', 'math.log', (['energy'], {}), '(energy)\n', (12605, 12613), False, 'import math\n')] |
import numpy as np
import pytest
from ..utils import compute_spectral_radius, create_rng, chunk_data
from ..utils import standardize_traindata, scale_data, unscale_data
def test_compute_spectral_radius():
# Test that a non-square matrix yields an error
rng = np.random.RandomState(17)
X = rng.rand(5, 3)
with pytest.raises(AssertionError):
compute_spectral_radius(X)
# A matrix with zeros should have a spectral radius of zero
X = np.zeros((5, 5))
rho = compute_spectral_radius(X)
assert rho == 0.0
# A matrix with the form: X = [[9, -1, 2], [-2, 8, 4], [1, 1, 8]] has
# a spectral radius of 10
X = np.array([[9, -1, 2], [-2, 8, 4], [1, 1, 8]])
rho = compute_spectral_radius(X)
assert rho == 10.0
def test_create_rng():
# If None is passed make sure that it gives a RNG and that it yields the
# same array
rng0 = np.random.RandomState(17)
bytes0 = rng0.bytes(1)
rng1 = create_rng(None)
bytes1 = rng1.bytes(1)
assert bytes0 == bytes1
# Check if an integer is passed that it yields the same value
rng1 = create_rng(17)
bytes1 = rng1.bytes(1)
assert bytes0 == bytes1
# Check if RNG is passed that it yields the correct value
rng1 = create_rng(np.random.RandomState(17))
bytes1 = rng1.bytes(1)
assert bytes0 == bytes1
def test_chunk_data_1d():
# Test chunking 1d data
timeseries = np.array([0, 1, 2, 3, 4, 5, 6, 7])
# Test when data chunks evenly
chunkU1, chunkY1 = chunk_data(timeseries, 2, 4)
ansU1 = np.array([[[0, 1]], [[4, 5]]])
ansY1 = np.array([[[2, 3]], [[6, 7]]])
np.testing.assert_array_equal(chunkU1, ansU1)
np.testing.assert_array_equal(chunkY1, ansY1)
# Test when data does not chunk evenly
chunkU2, chunkY2 = chunk_data(timeseries, 2, 3)
ansU2 = np.array([[[0, 1]], [[3, 4]]])
ansY2 = np.array([[[2, 3]], [[5, 6]]])
np.testing.assert_array_equal(chunkU2, ansU2)
np.testing.assert_array_equal(chunkY2, ansY2)
def test_chunk_data_2d():
# Test chunking 2d data
timeseries = np.array([[0, 0],
[1, -1],
[2, -2],
[3, -3],
[4, -4],
[5, -5],
[6, -6],
[7, -7]])
# Test when data chunks evenly
chunkU1, chunkY1 = chunk_data(timeseries, 2, 4)
ansU1 = np.array([[[0, 1], [0, -1]], [[4, 5], [-4, -5]]])
ansY1 = np.array([[[2, 3], [-2, -3]], [[6, 7], [-6, -7]]])
np.testing.assert_array_equal(chunkU1, ansU1)
np.testing.assert_array_equal(chunkY1, ansY1)
# Test when data does not chunk evenly
chunkU2, chunkY2 = chunk_data(timeseries, 2, 3,
predict_cols=[0, 1])
ansU2 = np.array([[[0, 1], [0, -1]], [[3, 4], [-3, -4]]])
ansY2 = np.array([[[2, 3], [-2, -3]], [[5, 6], [-5, -6]]])
np.testing.assert_array_equal(chunkU2, ansU2)
np.testing.assert_array_equal(chunkY2, ansY2)
# Test chunking when predicting just column 0
chunkU3, chunkY3 = chunk_data(timeseries, 2, 4,
predict_cols=[0])
ansU3 = np.array([[[0, 1], [0, -1]], [[4, 5], [-4, -5]]])
ansY3 = np.array([[[2, 3]], [[6, 7]]])
np.testing.assert_array_equal(chunkU3, ansU3)
np.testing.assert_array_equal(chunkY3, ansY3)
def test_standardize_traindata():
# Create 1D data and standardize it
data = np.array([-1., 3.])
sdata, mu, sigma = standardize_traindata(data)
correct_sdata = np.array([-1, 1])
np.testing.assert_allclose(sdata, correct_sdata)
np.testing.assert_allclose(mu, np.array([1.]))
np.testing.assert_allclose(sigma, np.array([2.]))
# Create 2D data and standardize it
data = np.array([[-1., -4.], [3., 4.]])
sdata, mu_arr, sigma_arr = standardize_traindata(data)
correct_sdata = np.array([[-1., -1.], [1., 1.]])
np.testing.assert_allclose(sdata, correct_sdata)
np.testing.assert_allclose(mu_arr, np.array([1., 0.]))
np.testing.assert_allclose(sigma_arr, np.array([2., 4.]))
def test_scale_data():
# Scale 1D data
data = np.array([1., 2.])
mu = np.array([1.])
sigma = np.array([2.])
sdata = scale_data(data, mu, sigma)
correct_sdata = np.array([0., 0.5])
np.testing.assert_allclose(sdata, correct_sdata)
# Scale 2D data
data = np.array([[1., -1.], [2., -2.]])
mu_arr = np.array([1., 1.])
sigma_arr = np.array([2., 2.])
sdata = scale_data(data, mu_arr, sigma_arr)
print(sdata)
correct_sdata = np.array([[0., -1.], [0.5, -1.5]])
np.testing.assert_allclose(sdata, correct_sdata)
def test_unscale_data():
# Test scale/unscale 1D data
data = np.array([1., 2., 3.])
sdata, mu, sigma = standardize_traindata(data)
unsdata = unscale_data(sdata, mu, sigma)
np.testing.assert_allclose(data, unsdata)
# Test scale/unscale 2D data
data = np.array([[1., -1.], [2., -2.]])
sdata, mu, sigma = standardize_traindata(data)
unsdata = unscale_data(sdata, mu, sigma)
np.testing.assert_allclose(data, unsdata)
# Test scale/unscale 2D data with fewer columns
data = np.array([[1., -2., 4.], [-1., -8., 2.]])
sdata, mu, sigma = standardize_traindata(data)
unsdata = unscale_data(sdata[:, 1:], mu, sigma, predict_cols=[1, 2])
np.testing.assert_allclose(data[:, 1:], unsdata)
| [
"numpy.testing.assert_array_equal",
"numpy.zeros",
"numpy.random.RandomState",
"pytest.raises",
"numpy.array",
"numpy.testing.assert_allclose"
] | [((269, 294), 'numpy.random.RandomState', 'np.random.RandomState', (['(17)'], {}), '(17)\n', (290, 294), True, 'import numpy as np\n'), ((466, 482), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {}), '((5, 5))\n', (474, 482), True, 'import numpy as np\n'), ((655, 700), 'numpy.array', 'np.array', (['[[9, -1, 2], [-2, 8, 4], [1, 1, 8]]'], {}), '([[9, -1, 2], [-2, 8, 4], [1, 1, 8]])\n', (663, 700), True, 'import numpy as np\n'), ((891, 916), 'numpy.random.RandomState', 'np.random.RandomState', (['(17)'], {}), '(17)\n', (912, 916), True, 'import numpy as np\n'), ((1415, 1449), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7])\n', (1423, 1449), True, 'import numpy as np\n'), ((1550, 1580), 'numpy.array', 'np.array', (['[[[0, 1]], [[4, 5]]]'], {}), '([[[0, 1]], [[4, 5]]])\n', (1558, 1580), True, 'import numpy as np\n'), ((1593, 1623), 'numpy.array', 'np.array', (['[[[2, 3]], [[6, 7]]]'], {}), '([[[2, 3]], [[6, 7]]])\n', (1601, 1623), True, 'import numpy as np\n'), ((1628, 1673), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['chunkU1', 'ansU1'], {}), '(chunkU1, ansU1)\n', (1657, 1673), True, 'import numpy as np\n'), ((1678, 1723), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['chunkY1', 'ansY1'], {}), '(chunkY1, ansY1)\n', (1707, 1723), True, 'import numpy as np\n'), ((1832, 1862), 'numpy.array', 'np.array', (['[[[0, 1]], [[3, 4]]]'], {}), '([[[0, 1]], [[3, 4]]])\n', (1840, 1862), True, 'import numpy as np\n'), ((1875, 1905), 'numpy.array', 'np.array', (['[[[2, 3]], [[5, 6]]]'], {}), '([[[2, 3]], [[5, 6]]])\n', (1883, 1905), True, 'import numpy as np\n'), ((1910, 1955), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['chunkU2', 'ansU2'], {}), '(chunkU2, ansU2)\n', (1939, 1955), True, 'import numpy as np\n'), ((1960, 2005), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['chunkY2', 'ansY2'], {}), '(chunkY2, ansY2)\n', (1989, 2005), True, 'import numpy as np\n'), ((2079, 2164), 'numpy.array', 'np.array', (['[[0, 0], [1, -1], [2, -2], [3, -3], [4, -4], [5, -5], [6, -6], [7, -7]]'], {}), '([[0, 0], [1, -1], [2, -2], [3, -3], [4, -4], [5, -5], [6, -6], [7,\n -7]])\n', (2087, 2164), True, 'import numpy as np\n'), ((2450, 2499), 'numpy.array', 'np.array', (['[[[0, 1], [0, -1]], [[4, 5], [-4, -5]]]'], {}), '([[[0, 1], [0, -1]], [[4, 5], [-4, -5]]])\n', (2458, 2499), True, 'import numpy as np\n'), ((2512, 2562), 'numpy.array', 'np.array', (['[[[2, 3], [-2, -3]], [[6, 7], [-6, -7]]]'], {}), '([[[2, 3], [-2, -3]], [[6, 7], [-6, -7]]])\n', (2520, 2562), True, 'import numpy as np\n'), ((2567, 2612), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['chunkU1', 'ansU1'], {}), '(chunkU1, ansU1)\n', (2596, 2612), True, 'import numpy as np\n'), ((2617, 2662), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['chunkY1', 'ansY1'], {}), '(chunkY1, ansY1)\n', (2646, 2662), True, 'import numpy as np\n'), ((2826, 2875), 'numpy.array', 'np.array', (['[[[0, 1], [0, -1]], [[3, 4], [-3, -4]]]'], {}), '([[[0, 1], [0, -1]], [[3, 4], [-3, -4]]])\n', (2834, 2875), True, 'import numpy as np\n'), ((2888, 2938), 'numpy.array', 'np.array', (['[[[2, 3], [-2, -3]], [[5, 6], [-5, -6]]]'], {}), '([[[2, 3], [-2, -3]], [[5, 6], [-5, -6]]])\n', (2896, 2938), True, 'import numpy as np\n'), ((2943, 2988), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['chunkU2', 'ansU2'], {}), '(chunkU2, ansU2)\n', (2972, 2988), True, 'import numpy as np\n'), ((2993, 3038), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['chunkY2', 'ansY2'], {}), '(chunkY2, ansY2)\n', (3022, 3038), True, 'import numpy as np\n'), ((3206, 3255), 'numpy.array', 'np.array', (['[[[0, 1], [0, -1]], [[4, 5], [-4, -5]]]'], {}), '([[[0, 1], [0, -1]], [[4, 5], [-4, -5]]])\n', (3214, 3255), True, 'import numpy as np\n'), ((3268, 3298), 'numpy.array', 'np.array', (['[[[2, 3]], [[6, 7]]]'], {}), '([[[2, 3]], [[6, 7]]])\n', (3276, 3298), True, 'import numpy as np\n'), ((3303, 3348), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['chunkU3', 'ansU3'], {}), '(chunkU3, ansU3)\n', (3332, 3348), True, 'import numpy as np\n'), ((3353, 3398), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['chunkY3', 'ansY3'], {}), '(chunkY3, ansY3)\n', (3382, 3398), True, 'import numpy as np\n'), ((3486, 3507), 'numpy.array', 'np.array', (['[-1.0, 3.0]'], {}), '([-1.0, 3.0])\n', (3494, 3507), True, 'import numpy as np\n'), ((3577, 3594), 'numpy.array', 'np.array', (['[-1, 1]'], {}), '([-1, 1])\n', (3585, 3594), True, 'import numpy as np\n'), ((3599, 3647), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sdata', 'correct_sdata'], {}), '(sdata, correct_sdata)\n', (3625, 3647), True, 'import numpy as np\n'), ((3805, 3841), 'numpy.array', 'np.array', (['[[-1.0, -4.0], [3.0, 4.0]]'], {}), '([[-1.0, -4.0], [3.0, 4.0]])\n', (3813, 3841), True, 'import numpy as np\n'), ((3917, 3953), 'numpy.array', 'np.array', (['[[-1.0, -1.0], [1.0, 1.0]]'], {}), '([[-1.0, -1.0], [1.0, 1.0]])\n', (3925, 3953), True, 'import numpy as np\n'), ((3954, 4002), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sdata', 'correct_sdata'], {}), '(sdata, correct_sdata)\n', (3980, 4002), True, 'import numpy as np\n'), ((4180, 4200), 'numpy.array', 'np.array', (['[1.0, 2.0]'], {}), '([1.0, 2.0])\n', (4188, 4200), True, 'import numpy as np\n'), ((4208, 4223), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (4216, 4223), True, 'import numpy as np\n'), ((4235, 4250), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (4243, 4250), True, 'import numpy as np\n'), ((4310, 4330), 'numpy.array', 'np.array', (['[0.0, 0.5]'], {}), '([0.0, 0.5])\n', (4318, 4330), True, 'import numpy as np\n'), ((4334, 4382), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sdata', 'correct_sdata'], {}), '(sdata, correct_sdata)\n', (4360, 4382), True, 'import numpy as np\n'), ((4415, 4451), 'numpy.array', 'np.array', (['[[1.0, -1.0], [2.0, -2.0]]'], {}), '([[1.0, -1.0], [2.0, -2.0]])\n', (4423, 4451), True, 'import numpy as np\n'), ((4461, 4481), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (4469, 4481), True, 'import numpy as np\n'), ((4496, 4516), 'numpy.array', 'np.array', (['[2.0, 2.0]'], {}), '([2.0, 2.0])\n', (4504, 4516), True, 'import numpy as np\n'), ((4600, 4636), 'numpy.array', 'np.array', (['[[0.0, -1.0], [0.5, -1.5]]'], {}), '([[0.0, -1.0], [0.5, -1.5]])\n', (4608, 4636), True, 'import numpy as np\n'), ((4639, 4687), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['sdata', 'correct_sdata'], {}), '(sdata, correct_sdata)\n', (4665, 4687), True, 'import numpy as np\n'), ((4759, 4784), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (4767, 4784), True, 'import numpy as np\n'), ((4882, 4923), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['data', 'unsdata'], {}), '(data, unsdata)\n', (4908, 4923), True, 'import numpy as np\n'), ((4969, 5005), 'numpy.array', 'np.array', (['[[1.0, -1.0], [2.0, -2.0]]'], {}), '([[1.0, -1.0], [2.0, -2.0]])\n', (4977, 5005), True, 'import numpy as np\n'), ((5102, 5143), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['data', 'unsdata'], {}), '(data, unsdata)\n', (5128, 5143), True, 'import numpy as np\n'), ((5208, 5255), 'numpy.array', 'np.array', (['[[1.0, -2.0, 4.0], [-1.0, -8.0, 2.0]]'], {}), '([[1.0, -2.0, 4.0], [-1.0, -8.0, 2.0]])\n', (5216, 5255), True, 'import numpy as np\n'), ((5378, 5426), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['data[:, 1:]', 'unsdata'], {}), '(data[:, 1:], unsdata)\n', (5404, 5426), True, 'import numpy as np\n'), ((327, 356), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (340, 356), False, 'import pytest\n'), ((1260, 1285), 'numpy.random.RandomState', 'np.random.RandomState', (['(17)'], {}), '(17)\n', (1281, 1285), True, 'import numpy as np\n'), ((3683, 3698), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (3691, 3698), True, 'import numpy as np\n'), ((3737, 3752), 'numpy.array', 'np.array', (['[2.0]'], {}), '([2.0])\n', (3745, 3752), True, 'import numpy as np\n'), ((4042, 4062), 'numpy.array', 'np.array', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (4050, 4062), True, 'import numpy as np\n'), ((4104, 4124), 'numpy.array', 'np.array', (['[2.0, 4.0]'], {}), '([2.0, 4.0])\n', (4112, 4124), True, 'import numpy as np\n')] |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains model definitions."""
import math
import numpy as np
import models
import tensorflow as tf
import utils
from tensorflow import flags
import tensorflow.contrib.slim as slim
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"MoNN_num_experts", 4,
"The number of mixtures (excluding the dummy 'expert') used for MoNNs.")
#%% helper functions
def weight_variable(shape):
"""Create a weight variable with appropriate initialization."""
initial = tf.truncated_normal(shape, stddev=1.0/np.sqrt(2*shape[0]))
return tf.Variable(initial)
def bias_variable(shape):
"""Create a bias variable with appropriate initialization."""
initial = tf.constant(0.1/shape[0], shape=shape)
return tf.Variable(initial)
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
"""Reusable code for making a simple neural net layer.
It does a matrix multiply, bias add, and then uses relu to nonlinearize.
It also sets up name scoping so that the resultant graph is easy to read,
and adds a number of summary ops.
"""
# Adding a name scope ensures logical grouping of the layers in the graph.
with tf.name_scope(layer_name):
# This Variable will hold the state of the weights for the layer
with tf.name_scope('weights'):
weights = weight_variable([input_dim, output_dim])
variable_summaries(weights)
regularizer = tf.nn.l2_loss(weights)
with tf.name_scope('biases'):
biases = bias_variable([output_dim])
variable_summaries(biases)
with tf.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases
tf.summary.histogram('pre_activations', preactivate)
activations = act(preactivate, name='activation')
tf.summary.histogram('activations', activations)
return activations, regularizer
#
# First part contains models we have used,
# later there are some models we have tried/experimented with
#
# MoNN3L
# MoNN2Lw
# MoNN3Lw
# MoNN4Ln
#
class MoNN3L(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-6,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
a1Units = 4096
A1 = slim.fully_connected(
model_input, a1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA1')
a2Units = 4096
A2 = slim.fully_connected(
A1, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA2')
a2Units = 4096
A3 = slim.fully_connected(
A2, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA3')
expert_activations = slim.fully_connected(
A3,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
# a wide model hoping to memorize rare labels better
class MoNN2Lw(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="MoN2w_gates")
h1Units = 2305 * 6
A1 = slim.fully_connected(
model_input, h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='MoN2w_H1')
h2Units = 2305 * 3
A2 = slim.fully_connected(
A1, h2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='MoN2w_H2')
#
expert_activations = slim.fully_connected(
A2,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="MoN2_experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN3Lw(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
a1Units = 2305*8
A1 = slim.fully_connected(
model_input, a1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA1')
a2Units = 2305
A2 = slim.fully_connected(
A1, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA2')
a2Units = 2305*4
A3 = slim.fully_connected(
A2, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA3')
expert_activations = slim.fully_connected(
A3,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN4Ln(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-6,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
a1Units = 2048
A1 = slim.fully_connected(
model_input, a1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA1')
a2Units = 2048
A2 = slim.fully_connected(
A1, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA2')
a2Units = 2048
A3 = slim.fully_connected(
A2, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA3')
a2Units = 2048
A4 = slim.fully_connected(
A3, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA4')
expert_activations = slim.fully_connected(
A4,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
#
# Abandoned Experiments
#
#%%
class MyNNModel0(models.BaseModel):
"""Logistic model with L2 regularization."""
def create_model(self, model_input, vocab_size, l2_penalty=1e-4, **unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes."""
with tf.name_scope('MyNNModel0'):
h1Units = 2400
a1 = slim.fully_connected(
model_input, h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC1')
output = slim.fully_connected(
a1, vocab_size, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC2')
return {"predictions": output}
#%%
class MyNNModel1(models.BaseModel):
"""A simple NN models (with L2 regularization)."""
def create_model(self, model_input, vocab_size, l2_penalty=1e-4,
is_train=True, **unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes."""
with tf.name_scope('MyNNModel1'):
h1Units = 1152
h2Units = 2248
h3Units = 3096
keep_prob = 0.90
A1 = slim.fully_connected(
model_input, h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H1')
A2 = slim.fully_connected(
A1, h2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H2')
A3 = slim.fully_connected(
A2, h3Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H3')
#A4 = tf.nn.dropout(A3, keep_prob)
output = slim.fully_connected(
A3, vocab_size, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_P')
return {"predictions": output}
#%%
class MyNNModel2(models.BaseModel):
"""A simple NN models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-4,
**unused_params):
"""Creates a simple one-hidden-layer Neural Network model.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
#A1 = slim.fully_connected(
# model_input, 800, activation_fn=tf.nn.sigmoid,
# weights_regularizer=slim.l2_regularizer(l2_penalty),
# scope='hidden1')
# output = slim.fully_connected(
# A1, vocab_size, activation_fn=tf.nn.sigmoid,
# weights_regularizer=slim.l2_regularizer(l2_penalty))
h1Units = 3600
A1, reg1 = nn_layer(model_input, 1024+128, h1Units, 'Hidden1', act=tf.nn.relu)
h2Units = 3600
A2, reg2 = nn_layer(A1, h1Units, h2Units, 'Hidden2', act=tf.nn.relu)
output, reg3 = nn_layer(A2, h2Units, vocab_size, 'Pred', act=tf.nn.sigmoid)
return {"predictions": output,
"regularization_loss":l2_penalty*(reg1+reg2+reg3)}
#%%
def nn_layer2( input_tensor, input_dim, output_dim, var_scope, act=tf.nn.relu):
with tf.variable_scope(var_scope):
weights = weight_variable([input_dim, output_dim])
regularizer = tf.nn.l2_loss(weights)
biases = bias_variable([output_dim])
preactivate = tf.matmul(input_tensor, weights) + biases
activations = act(preactivate, name='activation')
return activations, regularizer
class MyNNModel3(models.BaseModel):
"""A simple NN models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-4,
**unused_params):
"""Creates a simple one-hidden-layer Neural Network model.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
#A1 = slim.fully_connected(
# model_input, 800, activation_fn=tf.nn.sigmoid,
# weights_regularizer=slim.l2_regularizer(l2_penalty),
# scope='hidden1')
# output = slim.fully_connected(
# A1, vocab_size, activation_fn=tf.nn.sigmoid,
# weights_regularizer=slim.l2_regularizer(l2_penalty))
with tf.variable_scope('MyNNModel3'):
h1Units = 3600
A1,reg1 = nn_layer2(model_input, 1024+128, h1Units, 'Hidden1', act=tf.nn.relu)
h2Units = 2400
A2, reg2 = nn_layer2(A1, h1Units, h2Units, 'Hidden2', act=tf.nn.relu)
h3Units = 2400
A3, reg3 = nn_layer2(A2, h2Units, h3Units, 'Hidden3', act=tf.nn.relu)
output, reg4 = nn_layer(A3, h3Units, vocab_size, 'ProdictionLayer', act=tf.nn.sigmoid)
return {"predictions": output,
"regularization_loss":l2_penalty*(reg1+reg2+reg3+reg4)}
#%%
class MoNN2L(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-6,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
h1Units = 4096
A1 = slim.fully_connected(
model_input, h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H1')
h2Units = 4096
A2 = slim.fully_connected(
A1, h2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H2')
#
expert_activations = slim.fully_connected(
A2,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN2L_L1(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-6,
**unused_params):
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
h1Units = 4096
A1 = slim.fully_connected(
model_input, h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H1')
h2Units = 4096
A2 = slim.fully_connected(
A1, h2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l1_l2_regularizer(l2_penalty),
scope='FC_H2')
#
expert_activations = slim.fully_connected(
A2,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
from tensorflow import logging
class MoNN2Drop(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
layers_keep_probs,
num_mixtures=None,
l2_penalty=1e-6,
**unused_params):
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
logging.info("MoNN2Drop " + str(layers_keep_probs))
drop_out = tf.nn.dropout(model_input, layers_keep_probs[0],name="var_dropout")
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
h1Units = 4096
A1 = slim.fully_connected(
model_input, h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H1')
h2Units = 4096
A1a = tf.nn.dropout(A1, layers_keep_probs[1])
A2 = slim.fully_connected(
A1a, h2Units, activation_fn=tf.nn.crelu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H2')
A2a = tf.nn.dropout(A2, layers_keep_probs[2])
expert_activations = slim.fully_connected(
A2a,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN2DropBNorm(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
layers_keep_probs,
num_mixtures=None,
l2_penalty=1e-6,
is_training=True,
**unused_params):
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
logging.info("MoNN2Drop " + str(layers_keep_probs))
drop_out = tf.nn.dropout(model_input, layers_keep_probs[0],name="input/dropout")
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
model_input_norm = slim.batch_norm(
model_input,
center=True,
scale=True,
is_training=is_training,
scope='input/batch_norm')
h1Units = 4096
A1 = slim.fully_connected(
model_input_norm, h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H1')
h2Units = 4096
A1a = tf.nn.dropout(A1, layers_keep_probs[1], name='layer1/dropout')
A1b = slim.batch_norm(
A1a,
center=True,
scale=True,
is_training=is_training,
scope='layer1/batch_norm')
A2 = slim.fully_connected(
A1b, h2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H2')
A2a = tf.nn.dropout(A2, layers_keep_probs[2], name='layer2/dropout')
expert_activations = slim.fully_connected(
A2a,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN2DropBNorm1Crelu(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
layers_keep_probs,
num_mixtures=None,
l2_penalty=1e-6,
is_training=True,
**unused_params):
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
logging.info("MoNN2Drop " + str(layers_keep_probs))
drop_out = tf.nn.dropout(model_input, layers_keep_probs[0],name="input/dropout")
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
model_input_norm = slim.batch_norm(
model_input,
center=True,
scale=True,
is_training=is_training,
scope='input/batch_norm')
h1Units = 4096
A1 = slim.fully_connected(
model_input, h1Units, activation_fn=tf.nn.crelu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H1')
h2Units = 4096
A1a = tf.nn.dropout(A1, layers_keep_probs[1], name='layer1/dropout')
A1b = slim.batch_norm(
A1a,
center=True,
scale=True,
is_training=is_training,
scope='layer1/batch_norm')
A2 = slim.fully_connected(
A1b, h2Units, activation_fn=tf.nn.crelu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H2')
A2a = tf.nn.dropout(A2, layers_keep_probs[2], name='layer2/dropout')
expert_activations = slim.fully_connected(
A2a,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN4L(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-6,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
a1Units = 4096
A1 = slim.fully_connected(
model_input, a1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA1')
a2Units = 4096
A2 = slim.fully_connected(
A1, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA2')
a2Units = 4096
A3 = slim.fully_connected(
A2, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA3')
a2Units = 4096
A4 = slim.fully_connected(
A3, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA4')
expert_activations = slim.fully_connected(
A4,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN4LDropG2L(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
layers_keep_probs,
num_mixtures=None,
l2_penalty=1e-6,
is_training=True,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
logging.info("MoNN4LDrop " + str(layers_keep_probs))
drop_model_input = tf.nn.dropout(model_input, layers_keep_probs[0])
#
# Added one more layer to gate
#
X1 = slim.fully_connected(
drop_model_input,
vocab_size ,
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates_l1")
gate_activations = slim.fully_connected(
X1,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates_l2")
a1Units = 4096
A1 = slim.fully_connected(
drop_model_input, a1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA1')
a2Units = 4096
A1d = tf.nn.dropout(A1, layers_keep_probs[1])
A2 = slim.fully_connected(
A1d, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA2')
a2Units = 4096
A2d = tf.nn.dropout(A1, layers_keep_probs[2])
A3 = slim.fully_connected(
A2d, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA3')
a2Units = 4096
A3d = tf.nn.dropout(A3, layers_keep_probs[3])
A4 = slim.fully_connected(
A3d, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA4')
expert_activations = slim.fully_connected(
A4,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN4LDropG3L(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
layers_keep_probs,
num_mixtures=None,
l2_penalty=1e-6,
is_training=True,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
logging.info("MoNN4LDrop " + str(layers_keep_probs))
drop_model_input = tf.nn.dropout(model_input, layers_keep_probs[0])
#
# Added one more layer to gate
#
X1 = slim.fully_connected(
drop_model_input,
vocab_size ,
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates_l1")
X2 = slim.fully_connected(
X1,
vocab_size,
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates_l2")
gate_activations = slim.fully_connected(
X2,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates_activation")
a1Units = 4096
A1 = slim.fully_connected(
drop_model_input, a1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA1')
a2Units = 4096
A1d = tf.nn.dropout(A1, layers_keep_probs[1])
A2 = slim.fully_connected(
A1d, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA2')
a2Units = 4096
A2d = tf.nn.dropout(A2, layers_keep_probs[2])
A3 = slim.fully_connected(
A2d, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA3')
a2Units = 4096
A3d = tf.nn.dropout(A3, layers_keep_probs[3])
A4 = slim.fully_connected(
A3d, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA4')
expert_activations = slim.fully_connected(
A4,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
from tensorflow import logging
class MoNN2a128r1024G1L(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
layers_keep_probs,
num_mixtures=None,
l2_penalty=1e-6,
is_training=True,
**unused_params):
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
logging.info("MoNN2Drop " + str(layers_keep_probs))
drop_out = tf.nn.dropout(model_input, layers_keep_probs[0],name="var_dropout")
logging.info(model_input.shape)
inputA = model_input[:,0:128]
inputB = model_input[:,128:1152]
inputAd = tf.nn.dropout(inputA, layers_keep_probs[0])
inputBd = tf.nn.dropout(inputB, layers_keep_probs[0])
inputAdn = slim.batch_norm(
inputAd,
center=True,
scale=True,
is_training=is_training,
scope='inputAd/batch_norm')
inputBdn = slim.batch_norm(
inputBd,
center=True,
scale=True,
is_training=is_training,
scope='inputBd/batch_norm')
X1 = slim.fully_connected(
tf.concat([inputAdn,inputBdn],1),
vocab_size ,
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates_l1")
gate_activations = slim.fully_connected(
X1,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
a_h1Units = 512
A1 = slim.fully_connected(
inputAdn, a_h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H1_audio')
a_h2Units = 512
A1n = slim.batch_norm(
A1,
center=True,
scale=True,
is_training=is_training,
scope='A1/batch_norm')
A1a = tf.nn.dropout(A1n, layers_keep_probs[1])
logging.info("A1a")
logging.info(A1a.shape)
A2 = slim.fully_connected(
A1a, a_h2Units, activation_fn=tf.nn.crelu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H2_audio')
A2n = slim.batch_norm(
A2,
center=True,
scale=True,
is_training=is_training,
scope='A2/batch_norm')
logging.info("A2")
logging.info(A2.shape)
b_h1Units = 2048
B1 = slim.fully_connected(
inputBdn, b_h1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H1_rgb')
B1n = slim.batch_norm(
B1,
center=True,
scale=True,
is_training=is_training,
scope='B1/batch_norm')
b_h2Units = 2048
B1a = tf.nn.dropout(B1n, layers_keep_probs[1])
logging.info("B1a")
logging.info(B1a.shape)
B2 = slim.fully_connected(
B1a, b_h2Units, activation_fn=tf.nn.crelu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H2_rgb')
B2n = slim.batch_norm(
B2,
center=True,
scale=True,
is_training=is_training,
scope='B2/batch_norm')
A2na = tf.nn.dropout(A2n, layers_keep_probs[2])
B2na = tf.nn.dropout(B2n, layers_keep_probs[2])
logging.info(A2.shape)
logging.info(B2.shape)
C3 = tf.concat([inputAdn, inputBdn, A2na, B2na], 1)
h3Units = 4096
C3a = slim.fully_connected(
C3, h3Units, activation_fn=tf.nn.crelu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H3_concat')
C3ad = tf.nn.dropout(C3a, layers_keep_probs[3])
h4Units = 4096
C4a = slim.fully_connected(
C3a, h4Units, activation_fn=tf.nn.crelu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_H4_concat')
expert_activations = slim.fully_connected(
C4a,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN4Lw(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
a1Units = 2305*8
A1 = slim.fully_connected(
model_input, a1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA1')
a2Units = 2305
A2 = slim.fully_connected(
A1, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA2')
a2Units = 2305*4
A3 = slim.fully_connected(
A2, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA3')
a2Units = 2305*2
A4 = slim.fully_connected(
A3, a2Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA4')
expert_activations = slim.fully_connected(
A4,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoNN1Lvw(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
num_mixtures = num_mixtures or FLAGS.MoNN_num_experts
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
a1Units = 2305*64
A1 = slim.fully_connected(
model_input, a1Units, activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope='FC_HA1')
expert_activations = slim.fully_connected(
A1,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(final_probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
| [
"tensorflow.reduce_sum",
"tensorflow.logging.info",
"tensorflow.reshape",
"tensorflow.matmul",
"tensorflow.Variable",
"tensorflow.reduce_max",
"tensorflow.contrib.slim.batch_norm",
"tensorflow.contrib.slim.l2_regularizer",
"tensorflow.variable_scope",
"tensorflow.concat",
"tensorflow.summary.his... | [((806, 926), 'tensorflow.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""MoNN_num_experts"""', '(4)', '"""The number of mixtures (excluding the dummy \'expert\') used for MoNNs."""'], {}), '(\'MoNN_num_experts\', 4,\n "The number of mixtures (excluding the dummy \'expert\') used for MoNNs.")\n', (826, 926), False, 'from tensorflow import flags\n'), ((1133, 1153), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (1144, 1153), True, 'import tensorflow as tf\n'), ((1261, 1301), 'tensorflow.constant', 'tf.constant', (['(0.1 / shape[0])'], {'shape': 'shape'}), '(0.1 / shape[0], shape=shape)\n', (1272, 1301), True, 'import tensorflow as tf\n'), ((1311, 1331), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (1322, 1331), True, 'import tensorflow as tf\n'), ((1452, 1478), 'tensorflow.name_scope', 'tf.name_scope', (['"""summaries"""'], {}), "('summaries')\n", (1465, 1478), True, 'import tensorflow as tf\n'), ((1495, 1514), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['var'], {}), '(var)\n', (1509, 1514), True, 'import tensorflow as tf\n'), ((1523, 1554), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mean"""', 'mean'], {}), "('mean', mean)\n", (1540, 1554), True, 'import tensorflow as tf\n'), ((1564, 1587), 'tensorflow.name_scope', 'tf.name_scope', (['"""stddev"""'], {}), "('stddev')\n", (1577, 1587), True, 'import tensorflow as tf\n'), ((1661, 1696), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""stddev"""', 'stddev'], {}), "('stddev', stddev)\n", (1678, 1696), True, 'import tensorflow as tf\n'), ((1811, 1849), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""histogram"""', 'var'], {}), "('histogram', var)\n", (1831, 1849), True, 'import tensorflow as tf\n'), ((2278, 2303), 'tensorflow.name_scope', 'tf.name_scope', (['layer_name'], {}), '(layer_name)\n', (2291, 2303), True, 'import tensorflow as tf\n'), ((5742, 5819), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(gating_distribution[:, :num_mixtures] * expert_distribution)', '(1)'], {}), '(gating_distribution[:, :num_mixtures] * expert_distribution, 1)\n', (5755, 5819), True, 'import tensorflow as tf\n'), ((5855, 5923), 'tensorflow.reshape', 'tf.reshape', (['final_probabilities_by_class_and_batch', '[-1, vocab_size]'], {}), '(final_probabilities_by_class_and_batch, [-1, vocab_size])\n', (5865, 5923), True, 'import tensorflow as tf\n'), ((8469, 8546), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(gating_distribution[:, :num_mixtures] * expert_distribution)', '(1)'], {}), '(gating_distribution[:, :num_mixtures] * expert_distribution, 1)\n', (8482, 8546), True, 'import tensorflow as tf\n'), ((8582, 8650), 'tensorflow.reshape', 'tf.reshape', (['final_probabilities_by_class_and_batch', '[-1, vocab_size]'], {}), '(final_probabilities_by_class_and_batch, [-1, vocab_size])\n', (8592, 8650), True, 'import tensorflow as tf\n'), ((10479, 10556), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(gating_distribution[:, :num_mixtures] * expert_distribution)', '(1)'], {}), '(gating_distribution[:, :num_mixtures] * expert_distribution, 1)\n', (10492, 10556), True, 'import tensorflow as tf\n'), ((10592, 10660), 'tensorflow.reshape', 'tf.reshape', (['final_probabilities_by_class_and_batch', '[-1, vocab_size]'], {}), '(final_probabilities_by_class_and_batch, [-1, vocab_size])\n', (10602, 10660), True, 'import tensorflow as tf\n'), ((13725, 13802), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(gating_distribution[:, :num_mixtures] * expert_distribution)', '(1)'], {}), '(gating_distribution[:, :num_mixtures] * expert_distribution, 1)\n', (13738, 13802), True, 'import tensorflow as tf\n'), ((13846, 13914), 'tensorflow.reshape', 'tf.reshape', (['final_probabilities_by_class_and_batch', '[-1, vocab_size]'], {}), '(final_probabilities_by_class_and_batch, [-1, vocab_size])\n', (13856, 13914), True, 'import tensorflow as tf\n'), ((18390, 18418), 'tensorflow.variable_scope', 'tf.variable_scope', (['var_scope'], {}), '(var_scope)\n', (18407, 18418), True, 'import tensorflow as tf\n'), ((18499, 18521), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['weights'], {}), '(weights)\n', (18512, 18521), True, 'import tensorflow as tf\n'), ((22914, 22991), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(gating_distribution[:, :num_mixtures] * expert_distribution)', '(1)'], {}), '(gating_distribution[:, :num_mixtures] * expert_distribution, 1)\n', (22927, 22991), True, 'import tensorflow as tf\n'), ((23027, 23095), 'tensorflow.reshape', 'tf.reshape', (['final_probabilities_by_class_and_batch', '[-1, vocab_size]'], {}), '(final_probabilities_by_class_and_batch, [-1, vocab_size])\n', (23037, 23095), True, 'import tensorflow as tf\n'), ((24744, 24821), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(gating_distribution[:, :num_mixtures] * expert_distribution)', '(1)'], {}), '(gating_distribution[:, :num_mixtures] * expert_distribution, 1)\n', (24757, 24821), True, 'import tensorflow as tf\n'), ((24857, 24925), 'tensorflow.reshape', 'tf.reshape', (['final_probabilities_by_class_and_batch', '[-1, vocab_size]'], {}), '(final_probabilities_by_class_and_batch, [-1, vocab_size])\n', (24867, 24925), True, 'import tensorflow as tf\n'), ((25523, 25591), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['model_input', 'layers_keep_probs[0]'], {'name': '"""var_dropout"""'}), "(model_input, layers_keep_probs[0], name='var_dropout')\n", (25536, 25591), True, 'import tensorflow as tf\n'), ((26075, 26114), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['A1', 'layers_keep_probs[1]'], {}), '(A1, layers_keep_probs[1])\n', (26088, 26114), True, 'import tensorflow as tf\n'), ((26289, 26328), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['A2', 'layers_keep_probs[2]'], {}), '(A2, layers_keep_probs[2])\n', (26302, 26328), True, 'import tensorflow as tf\n'), ((26884, 26961), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(gating_distribution[:, :num_mixtures] * expert_distribution)', '(1)'], {}), '(gating_distribution[:, :num_mixtures] * expert_distribution, 1)\n', (26897, 26961), True, 'import tensorflow as tf\n'), ((26997, 27065), 'tensorflow.reshape', 'tf.reshape', (['final_probabilities_by_class_and_batch', '[-1, vocab_size]'], {}), '(final_probabilities_by_class_and_batch, [-1, vocab_size])\n', (27007, 27065), True, 'import tensorflow as tf\n'), ((27675, 27745), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['model_input', 'layers_keep_probs[0]'], {'name': '"""input/dropout"""'}), "(model_input, layers_keep_probs[0], name='input/dropout')\n", (27688, 27745), True, 'import tensorflow as tf\n'), ((28022, 28131), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['model_input'], {'center': '(True)', 'scale': '(True)', 'is_training': 'is_training', 'scope': '"""input/batch_norm"""'}), "(model_input, center=True, scale=True, is_training=\n is_training, scope='input/batch_norm')\n", (28037, 28131), True, 'import tensorflow.contrib.slim as slim\n'), ((28395, 28457), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['A1', 'layers_keep_probs[1]'], {'name': '"""layer1/dropout"""'}), "(A1, layers_keep_probs[1], name='layer1/dropout')\n", (28408, 28457), True, 'import tensorflow as tf\n'), ((28468, 28569), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['A1a'], {'center': '(True)', 'scale': '(True)', 'is_training': 'is_training', 'scope': '"""layer1/batch_norm"""'}), "(A1a, center=True, scale=True, is_training=is_training,\n scope='layer1/batch_norm')\n", (28483, 28569), True, 'import tensorflow.contrib.slim as slim\n'), ((28792, 28854), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['A2', 'layers_keep_probs[2]'], {'name': '"""layer2/dropout"""'}), "(A2, layers_keep_probs[2], name='layer2/dropout')\n", (28805, 28854), True, 'import tensorflow as tf\n'), ((29414, 29491), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(gating_distribution[:, :num_mixtures] * expert_distribution)', '(1)'], {}), '(gating_distribution[:, :num_mixtures] * expert_distribution, 1)\n', (29427, 29491), True, 'import tensorflow as tf\n'), ((29527, 29595), 'tensorflow.reshape', 'tf.reshape', (['final_probabilities_by_class_and_batch', '[-1, vocab_size]'], {}), '(final_probabilities_by_class_and_batch, [-1, vocab_size])\n', (29537, 29595), True, 'import tensorflow as tf\n'), ((30210, 30280), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['model_input', 'layers_keep_probs[0]'], {'name': '"""input/dropout"""'}), "(model_input, layers_keep_probs[0], name='input/dropout')\n", (30223, 30280), True, 'import tensorflow as tf\n'), ((30557, 30666), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['model_input'], {'center': '(True)', 'scale': '(True)', 'is_training': 'is_training', 'scope': '"""input/batch_norm"""'}), "(model_input, center=True, scale=True, is_training=\n is_training, scope='input/batch_norm')\n", (30572, 30666), True, 'import tensorflow.contrib.slim as slim\n'), ((30926, 30988), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['A1', 'layers_keep_probs[1]'], {'name': '"""layer1/dropout"""'}), "(A1, layers_keep_probs[1], name='layer1/dropout')\n", (30939, 30988), True, 'import tensorflow as tf\n'), ((30999, 31100), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['A1a'], {'center': '(True)', 'scale': '(True)', 'is_training': 'is_training', 'scope': '"""layer1/batch_norm"""'}), "(A1a, center=True, scale=True, is_training=is_training,\n scope='layer1/batch_norm')\n", (31014, 31100), True, 'import tensorflow.contrib.slim as slim\n'), ((31323, 31385), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['A2', 'layers_keep_probs[2]'], {'name': '"""layer2/dropout"""'}), "(A2, layers_keep_probs[2], name='layer2/dropout')\n", (31336, 31385), True, 'import tensorflow as tf\n'), ((31941, 32018), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(gating_distribution[:, :num_mixtures] * expert_distribution)', '(1)'], {}), '(gating_distribution[:, :num_mixtures] * expert_distribution, 1)\n', (31954, 32018), True, 'import tensorflow as tf\n'), ((32054, 32122), 'tensorflow.reshape', 'tf.reshape', (['final_probabilities_by_class_and_batch', '[-1, vocab_size]'], {}), '(final_probabilities_by_class_and_batch, [-1, vocab_size])\n', (32064, 32122), True, 'import tensorflow as tf\n'), ((35188, 35265), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(gating_distribution[:, :num_mixtures] * expert_distribution)', '(1)'], {}), '(gating_distribution[:, :num_mixtures] * expert_distribution, 1)\n', (35201, 35265), True, 'import tensorflow as tf\n'), ((35309, 35377), 'tensorflow.reshape', 'tf.reshape', (['final_probabilities_by_class_and_batch', '[-1, vocab_size]'], {}), '(final_probabilities_by_class_and_batch, [-1, vocab_size])\n', (35319, 35377), True, 'import tensorflow as tf\n'), ((36924, 36972), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['model_input', 'layers_keep_probs[0]'], {}), '(model_input, layers_keep_probs[0])\n', (36937, 36972), True, 'import tensorflow as tf\n'), ((37818, 37857), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['A1', 'layers_keep_probs[1]'], {}), '(A1, layers_keep_probs[1])\n', (37831, 37857), True, 'import tensorflow as tf\n'), ((38076, 38115), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['A1', 'layers_keep_probs[2]'], {}), '(A1, layers_keep_probs[2])\n', (38089, 38115), True, 'import tensorflow as tf\n'), ((38334, 38373), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['A3', 'layers_keep_probs[3]'], {}), '(A3, layers_keep_probs[3])\n', (38347, 38373), True, 'import tensorflow as tf\n'), ((39160, 39237), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(gating_distribution[:, :num_mixtures] * expert_distribution)', '(1)'], {}), '(gating_distribution[:, :num_mixtures] * expert_distribution, 1)\n', (39173, 39237), True, 'import tensorflow as tf\n'), ((39281, 39349), 'tensorflow.reshape', 'tf.reshape', (['final_probabilities_by_class_and_batch', '[-1, vocab_size]'], {}), '(final_probabilities_by_class_and_batch, [-1, vocab_size])\n', (39291, 39349), True, 'import tensorflow as tf\n'), ((40896, 40944), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['model_input', 'layers_keep_probs[0]'], {}), '(model_input, layers_keep_probs[0])\n', (40909, 40944), True, 'import tensorflow as tf\n'), ((42038, 42077), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['A1', 'layers_keep_probs[1]'], {}), '(A1, layers_keep_probs[1])\n', (42051, 42077), True, 'import tensorflow as tf\n'), ((42296, 42335), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['A2', 'layers_keep_probs[2]'], {}), '(A2, layers_keep_probs[2])\n', (42309, 42335), True, 'import tensorflow as tf\n'), ((42554, 42593), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['A3', 'layers_keep_probs[3]'], {}), '(A3, layers_keep_probs[3])\n', (42567, 42593), True, 'import tensorflow as tf\n'), ((43380, 43457), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(gating_distribution[:, :num_mixtures] * expert_distribution)', '(1)'], {}), '(gating_distribution[:, :num_mixtures] * expert_distribution, 1)\n', (43393, 43457), True, 'import tensorflow as tf\n'), ((43501, 43569), 'tensorflow.reshape', 'tf.reshape', (['final_probabilities_by_class_and_batch', '[-1, vocab_size]'], {}), '(final_probabilities_by_class_and_batch, [-1, vocab_size])\n', (43511, 43569), True, 'import tensorflow as tf\n'), ((44222, 44290), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['model_input', 'layers_keep_probs[0]'], {'name': '"""var_dropout"""'}), "(model_input, layers_keep_probs[0], name='var_dropout')\n", (44235, 44290), True, 'import tensorflow as tf\n'), ((44295, 44326), 'tensorflow.logging.info', 'logging.info', (['model_input.shape'], {}), '(model_input.shape)\n', (44307, 44326), False, 'from tensorflow import logging\n'), ((44413, 44456), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['inputA', 'layers_keep_probs[0]'], {}), '(inputA, layers_keep_probs[0])\n', (44426, 44456), True, 'import tensorflow as tf\n'), ((44471, 44514), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['inputB', 'layers_keep_probs[0]'], {}), '(inputB, layers_keep_probs[0])\n', (44484, 44514), True, 'import tensorflow as tf\n'), ((44530, 44636), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['inputAd'], {'center': '(True)', 'scale': '(True)', 'is_training': 'is_training', 'scope': '"""inputAd/batch_norm"""'}), "(inputAd, center=True, scale=True, is_training=is_training,\n scope='inputAd/batch_norm')\n", (44545, 44636), True, 'import tensorflow.contrib.slim as slim\n'), ((44690, 44796), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['inputBd'], {'center': '(True)', 'scale': '(True)', 'is_training': 'is_training', 'scope': '"""inputBd/batch_norm"""'}), "(inputBd, center=True, scale=True, is_training=is_training,\n scope='inputBd/batch_norm')\n", (44705, 44796), True, 'import tensorflow.contrib.slim as slim\n'), ((45574, 45671), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['A1'], {'center': '(True)', 'scale': '(True)', 'is_training': 'is_training', 'scope': '"""A1/batch_norm"""'}), "(A1, center=True, scale=True, is_training=is_training, scope\n ='A1/batch_norm')\n", (45589, 45671), True, 'import tensorflow.contrib.slim as slim\n'), ((45718, 45758), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['A1n', 'layers_keep_probs[1]'], {}), '(A1n, layers_keep_probs[1])\n', (45731, 45758), True, 'import tensorflow as tf\n'), ((45763, 45782), 'tensorflow.logging.info', 'logging.info', (['"""A1a"""'], {}), "('A1a')\n", (45775, 45782), False, 'from tensorflow import logging\n'), ((45787, 45810), 'tensorflow.logging.info', 'logging.info', (['A1a.shape'], {}), '(A1a.shape)\n', (45799, 45810), False, 'from tensorflow import logging\n'), ((45993, 46090), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['A2'], {'center': '(True)', 'scale': '(True)', 'is_training': 'is_training', 'scope': '"""A2/batch_norm"""'}), "(A2, center=True, scale=True, is_training=is_training, scope\n ='A2/batch_norm')\n", (46008, 46090), True, 'import tensorflow.contrib.slim as slim\n'), ((46132, 46150), 'tensorflow.logging.info', 'logging.info', (['"""A2"""'], {}), "('A2')\n", (46144, 46150), False, 'from tensorflow import logging\n'), ((46155, 46177), 'tensorflow.logging.info', 'logging.info', (['A2.shape'], {}), '(A2.shape)\n', (46167, 46177), False, 'from tensorflow import logging\n'), ((46385, 46482), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['B1'], {'center': '(True)', 'scale': '(True)', 'is_training': 'is_training', 'scope': '"""B1/batch_norm"""'}), "(B1, center=True, scale=True, is_training=is_training, scope\n ='B1/batch_norm')\n", (46400, 46482), True, 'import tensorflow.contrib.slim as slim\n'), ((46550, 46590), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['B1n', 'layers_keep_probs[1]'], {}), '(B1n, layers_keep_probs[1])\n', (46563, 46590), True, 'import tensorflow as tf\n'), ((46595, 46614), 'tensorflow.logging.info', 'logging.info', (['"""B1a"""'], {}), "('B1a')\n", (46607, 46614), False, 'from tensorflow import logging\n'), ((46619, 46642), 'tensorflow.logging.info', 'logging.info', (['B1a.shape'], {}), '(B1a.shape)\n', (46631, 46642), False, 'from tensorflow import logging\n'), ((46824, 46921), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['B2'], {'center': '(True)', 'scale': '(True)', 'is_training': 'is_training', 'scope': '"""B2/batch_norm"""'}), "(B2, center=True, scale=True, is_training=is_training, scope\n ='B2/batch_norm')\n", (46839, 46921), True, 'import tensorflow.contrib.slim as slim\n'), ((46970, 47010), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['A2n', 'layers_keep_probs[2]'], {}), '(A2n, layers_keep_probs[2])\n', (46983, 47010), True, 'import tensorflow as tf\n'), ((47022, 47062), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['B2n', 'layers_keep_probs[2]'], {}), '(B2n, layers_keep_probs[2])\n', (47035, 47062), True, 'import tensorflow as tf\n'), ((47072, 47094), 'tensorflow.logging.info', 'logging.info', (['A2.shape'], {}), '(A2.shape)\n', (47084, 47094), False, 'from tensorflow import logging\n'), ((47099, 47121), 'tensorflow.logging.info', 'logging.info', (['B2.shape'], {}), '(B2.shape)\n', (47111, 47121), False, 'from tensorflow import logging\n'), ((47132, 47178), 'tensorflow.concat', 'tf.concat', (['[inputAdn, inputBdn, A2na, B2na]', '(1)'], {}), '([inputAdn, inputBdn, A2na, B2na], 1)\n', (47141, 47178), True, 'import tensorflow as tf\n'), ((47382, 47422), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['C3a', 'layers_keep_probs[3]'], {}), '(C3a, layers_keep_probs[3])\n', (47395, 47422), True, 'import tensorflow as tf\n'), ((48170, 48247), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(gating_distribution[:, :num_mixtures] * expert_distribution)', '(1)'], {}), '(gating_distribution[:, :num_mixtures] * expert_distribution, 1)\n', (48183, 48247), True, 'import tensorflow as tf\n'), ((48283, 48351), 'tensorflow.reshape', 'tf.reshape', (['final_probabilities_by_class_and_batch', '[-1, vocab_size]'], {}), '(final_probabilities_by_class_and_batch, [-1, vocab_size])\n', (48293, 48351), True, 'import tensorflow as tf\n'), ((50365, 50442), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(gating_distribution[:, :num_mixtures] * expert_distribution)', '(1)'], {}), '(gating_distribution[:, :num_mixtures] * expert_distribution, 1)\n', (50378, 50442), True, 'import tensorflow as tf\n'), ((50478, 50546), 'tensorflow.reshape', 'tf.reshape', (['final_probabilities_by_class_and_batch', '[-1, vocab_size]'], {}), '(final_probabilities_by_class_and_batch, [-1, vocab_size])\n', (50488, 50546), True, 'import tensorflow as tf\n'), ((52006, 52083), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(gating_distribution[:, :num_mixtures] * expert_distribution)', '(1)'], {}), '(gating_distribution[:, :num_mixtures] * expert_distribution, 1)\n', (52019, 52083), True, 'import tensorflow as tf\n'), ((52119, 52187), 'tensorflow.reshape', 'tf.reshape', (['final_probabilities_by_class_and_batch', '[-1, vocab_size]'], {}), '(final_probabilities_by_class_and_batch, [-1, vocab_size])\n', (52129, 52187), True, 'import tensorflow as tf\n'), ((1730, 1748), 'tensorflow.reduce_max', 'tf.reduce_max', (['var'], {}), '(var)\n', (1743, 1748), True, 'import tensorflow as tf\n'), ((1783, 1801), 'tensorflow.reduce_min', 'tf.reduce_min', (['var'], {}), '(var)\n', (1796, 1801), True, 'import tensorflow as tf\n'), ((2391, 2415), 'tensorflow.name_scope', 'tf.name_scope', (['"""weights"""'], {}), "('weights')\n", (2404, 2415), True, 'import tensorflow as tf\n'), ((2546, 2568), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['weights'], {}), '(weights)\n', (2559, 2568), True, 'import tensorflow as tf\n'), ((2582, 2605), 'tensorflow.name_scope', 'tf.name_scope', (['"""biases"""'], {}), "('biases')\n", (2595, 2605), True, 'import tensorflow as tf\n'), ((2708, 2734), 'tensorflow.name_scope', 'tf.name_scope', (['"""Wx_plus_b"""'], {}), "('Wx_plus_b')\n", (2721, 2734), True, 'import tensorflow as tf\n'), ((2816, 2868), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""pre_activations"""', 'preactivate'], {}), "('pre_activations', preactivate)\n", (2836, 2868), True, 'import tensorflow as tf\n'), ((2943, 2991), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""activations"""', 'activations'], {}), "('activations', activations)\n", (2963, 2991), True, 'import tensorflow as tf\n'), ((5438, 5490), 'tensorflow.reshape', 'tf.reshape', (['gate_activations', '[-1, num_mixtures + 1]'], {}), '(gate_activations, [-1, num_mixtures + 1])\n', (5448, 5490), True, 'import tensorflow as tf\n'), ((5591, 5641), 'tensorflow.reshape', 'tf.reshape', (['expert_activations', '[-1, num_mixtures]'], {}), '(expert_activations, [-1, num_mixtures])\n', (5601, 5641), True, 'import tensorflow as tf\n'), ((8165, 8217), 'tensorflow.reshape', 'tf.reshape', (['gate_activations', '[-1, num_mixtures + 1]'], {}), '(gate_activations, [-1, num_mixtures + 1])\n', (8175, 8217), True, 'import tensorflow as tf\n'), ((8318, 8368), 'tensorflow.reshape', 'tf.reshape', (['expert_activations', '[-1, num_mixtures]'], {}), '(expert_activations, [-1, num_mixtures])\n', (8328, 8368), True, 'import tensorflow as tf\n'), ((10175, 10227), 'tensorflow.reshape', 'tf.reshape', (['gate_activations', '[-1, num_mixtures + 1]'], {}), '(gate_activations, [-1, num_mixtures + 1])\n', (10185, 10227), True, 'import tensorflow as tf\n'), ((10328, 10378), 'tensorflow.reshape', 'tf.reshape', (['expert_activations', '[-1, num_mixtures]'], {}), '(expert_activations, [-1, num_mixtures])\n', (10338, 10378), True, 'import tensorflow as tf\n'), ((13397, 13449), 'tensorflow.reshape', 'tf.reshape', (['gate_activations', '[-1, num_mixtures + 1]'], {}), '(gate_activations, [-1, num_mixtures + 1])\n', (13407, 13449), True, 'import tensorflow as tf\n'), ((13562, 13612), 'tensorflow.reshape', 'tf.reshape', (['expert_activations', '[-1, num_mixtures]'], {}), '(expert_activations, [-1, num_mixtures])\n', (13572, 13612), True, 'import tensorflow as tf\n'), ((14589, 14616), 'tensorflow.name_scope', 'tf.name_scope', (['"""MyNNModel0"""'], {}), "('MyNNModel0')\n", (14602, 14616), True, 'import tensorflow as tf\n'), ((15667, 15694), 'tensorflow.name_scope', 'tf.name_scope', (['"""MyNNModel1"""'], {}), "('MyNNModel1')\n", (15680, 15694), True, 'import tensorflow as tf\n'), ((18587, 18619), 'tensorflow.matmul', 'tf.matmul', (['input_tensor', 'weights'], {}), '(input_tensor, weights)\n', (18596, 18619), True, 'import tensorflow as tf\n'), ((19996, 20027), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""MyNNModel3"""'], {}), "('MyNNModel3')\n", (20013, 20027), True, 'import tensorflow as tf\n'), ((22610, 22662), 'tensorflow.reshape', 'tf.reshape', (['gate_activations', '[-1, num_mixtures + 1]'], {}), '(gate_activations, [-1, num_mixtures + 1])\n', (22620, 22662), True, 'import tensorflow as tf\n'), ((22763, 22813), 'tensorflow.reshape', 'tf.reshape', (['expert_activations', '[-1, num_mixtures]'], {}), '(expert_activations, [-1, num_mixtures])\n', (22773, 22813), True, 'import tensorflow as tf\n'), ((24440, 24492), 'tensorflow.reshape', 'tf.reshape', (['gate_activations', '[-1, num_mixtures + 1]'], {}), '(gate_activations, [-1, num_mixtures + 1])\n', (24450, 24492), True, 'import tensorflow as tf\n'), ((24593, 24643), 'tensorflow.reshape', 'tf.reshape', (['expert_activations', '[-1, num_mixtures]'], {}), '(expert_activations, [-1, num_mixtures])\n', (24603, 24643), True, 'import tensorflow as tf\n'), ((26580, 26632), 'tensorflow.reshape', 'tf.reshape', (['gate_activations', '[-1, num_mixtures + 1]'], {}), '(gate_activations, [-1, num_mixtures + 1])\n', (26590, 26632), True, 'import tensorflow as tf\n'), ((26733, 26783), 'tensorflow.reshape', 'tf.reshape', (['expert_activations', '[-1, num_mixtures]'], {}), '(expert_activations, [-1, num_mixtures])\n', (26743, 26783), True, 'import tensorflow as tf\n'), ((29110, 29162), 'tensorflow.reshape', 'tf.reshape', (['gate_activations', '[-1, num_mixtures + 1]'], {}), '(gate_activations, [-1, num_mixtures + 1])\n', (29120, 29162), True, 'import tensorflow as tf\n'), ((29263, 29313), 'tensorflow.reshape', 'tf.reshape', (['expert_activations', '[-1, num_mixtures]'], {}), '(expert_activations, [-1, num_mixtures])\n', (29273, 29313), True, 'import tensorflow as tf\n'), ((31637, 31689), 'tensorflow.reshape', 'tf.reshape', (['gate_activations', '[-1, num_mixtures + 1]'], {}), '(gate_activations, [-1, num_mixtures + 1])\n', (31647, 31689), True, 'import tensorflow as tf\n'), ((31790, 31840), 'tensorflow.reshape', 'tf.reshape', (['expert_activations', '[-1, num_mixtures]'], {}), '(expert_activations, [-1, num_mixtures])\n', (31800, 31840), True, 'import tensorflow as tf\n'), ((34860, 34912), 'tensorflow.reshape', 'tf.reshape', (['gate_activations', '[-1, num_mixtures + 1]'], {}), '(gate_activations, [-1, num_mixtures + 1])\n', (34870, 34912), True, 'import tensorflow as tf\n'), ((35025, 35075), 'tensorflow.reshape', 'tf.reshape', (['expert_activations', '[-1, num_mixtures]'], {}), '(expert_activations, [-1, num_mixtures])\n', (35035, 35075), True, 'import tensorflow as tf\n'), ((38832, 38884), 'tensorflow.reshape', 'tf.reshape', (['gate_activations', '[-1, num_mixtures + 1]'], {}), '(gate_activations, [-1, num_mixtures + 1])\n', (38842, 38884), True, 'import tensorflow as tf\n'), ((38997, 39047), 'tensorflow.reshape', 'tf.reshape', (['expert_activations', '[-1, num_mixtures]'], {}), '(expert_activations, [-1, num_mixtures])\n', (39007, 39047), True, 'import tensorflow as tf\n'), ((43052, 43104), 'tensorflow.reshape', 'tf.reshape', (['gate_activations', '[-1, num_mixtures + 1]'], {}), '(gate_activations, [-1, num_mixtures + 1])\n', (43062, 43104), True, 'import tensorflow as tf\n'), ((43217, 43267), 'tensorflow.reshape', 'tf.reshape', (['expert_activations', '[-1, num_mixtures]'], {}), '(expert_activations, [-1, num_mixtures])\n', (43227, 43267), True, 'import tensorflow as tf\n'), ((44878, 44912), 'tensorflow.concat', 'tf.concat', (['[inputAdn, inputBdn]', '(1)'], {}), '([inputAdn, inputBdn], 1)\n', (44887, 44912), True, 'import tensorflow as tf\n'), ((47866, 47918), 'tensorflow.reshape', 'tf.reshape', (['gate_activations', '[-1, num_mixtures + 1]'], {}), '(gate_activations, [-1, num_mixtures + 1])\n', (47876, 47918), True, 'import tensorflow as tf\n'), ((48019, 48069), 'tensorflow.reshape', 'tf.reshape', (['expert_activations', '[-1, num_mixtures]'], {}), '(expert_activations, [-1, num_mixtures])\n', (48029, 48069), True, 'import tensorflow as tf\n'), ((50061, 50113), 'tensorflow.reshape', 'tf.reshape', (['gate_activations', '[-1, num_mixtures + 1]'], {}), '(gate_activations, [-1, num_mixtures + 1])\n', (50071, 50113), True, 'import tensorflow as tf\n'), ((50214, 50264), 'tensorflow.reshape', 'tf.reshape', (['expert_activations', '[-1, num_mixtures]'], {}), '(expert_activations, [-1, num_mixtures])\n', (50224, 50264), True, 'import tensorflow as tf\n'), ((51702, 51754), 'tensorflow.reshape', 'tf.reshape', (['gate_activations', '[-1, num_mixtures + 1]'], {}), '(gate_activations, [-1, num_mixtures + 1])\n', (51712, 51754), True, 'import tensorflow as tf\n'), ((51855, 51905), 'tensorflow.reshape', 'tf.reshape', (['expert_activations', '[-1, num_mixtures]'], {}), '(expert_activations, [-1, num_mixtures])\n', (51865, 51905), True, 'import tensorflow as tf\n'), ((1101, 1122), 'numpy.sqrt', 'np.sqrt', (['(2 * shape[0])'], {}), '(2 * shape[0])\n', (1108, 1122), True, 'import numpy as np\n'), ((1629, 1650), 'tensorflow.square', 'tf.square', (['(var - mean)'], {}), '(var - mean)\n', (1638, 1650), True, 'import tensorflow as tf\n'), ((2762, 2794), 'tensorflow.matmul', 'tf.matmul', (['input_tensor', 'weights'], {}), '(input_tensor, weights)\n', (2771, 2794), True, 'import tensorflow as tf\n'), ((4572, 4603), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (4591, 4603), True, 'import tensorflow.contrib.slim as slim\n'), ((4767, 4798), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (4786, 4798), True, 'import tensorflow.contrib.slim as slim\n'), ((4949, 4980), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (4968, 4980), True, 'import tensorflow.contrib.slim as slim\n'), ((5131, 5162), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (5150, 5162), True, 'import tensorflow.contrib.slim as slim\n'), ((5339, 5370), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (5358, 5370), True, 'import tensorflow.contrib.slim as slim\n'), ((7453, 7484), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (7472, 7484), True, 'import tensorflow.contrib.slim as slim\n'), ((7658, 7689), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (7677, 7689), True, 'import tensorflow.contrib.slim as slim\n'), ((7846, 7877), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (7865, 7877), True, 'import tensorflow.contrib.slim as slim\n'), ((8061, 8092), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (8080, 8092), True, 'import tensorflow.contrib.slim as slim\n'), ((9305, 9336), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (9324, 9336), True, 'import tensorflow.contrib.slim as slim\n'), ((9502, 9533), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (9521, 9533), True, 'import tensorflow.contrib.slim as slim\n'), ((9684, 9715), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (9703, 9715), True, 'import tensorflow.contrib.slim as slim\n'), ((9868, 9899), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (9887, 9899), True, 'import tensorflow.contrib.slim as slim\n'), ((10076, 10107), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (10095, 10107), True, 'import tensorflow.contrib.slim as slim\n'), ((12242, 12273), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (12261, 12273), True, 'import tensorflow.contrib.slim as slim\n'), ((12453, 12484), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (12472, 12484), True, 'import tensorflow.contrib.slim as slim\n'), ((12655, 12686), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (12674, 12686), True, 'import tensorflow.contrib.slim as slim\n'), ((12857, 12888), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (12876, 12888), True, 'import tensorflow.contrib.slim as slim\n'), ((13059, 13090), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (13078, 13090), True, 'import tensorflow.contrib.slim as slim\n'), ((13290, 13321), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (13309, 13321), True, 'import tensorflow.contrib.slim as slim\n'), ((21923, 21954), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (21942, 21954), True, 'import tensorflow.contrib.slim as slim\n'), ((22118, 22149), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (22137, 22149), True, 'import tensorflow.contrib.slim as slim\n'), ((22299, 22330), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (22318, 22330), True, 'import tensorflow.contrib.slim as slim\n'), ((22511, 22542), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (22530, 22542), True, 'import tensorflow.contrib.slim as slim\n'), ((23754, 23785), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (23773, 23785), True, 'import tensorflow.contrib.slim as slim\n'), ((23945, 23976), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (23964, 23976), True, 'import tensorflow.contrib.slim as slim\n'), ((24126, 24160), 'tensorflow.contrib.slim.l1_l2_regularizer', 'slim.l1_l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (24148, 24160), True, 'import tensorflow.contrib.slim as slim\n'), ((24341, 24372), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (24360, 24372), True, 'import tensorflow.contrib.slim as slim\n'), ((25792, 25823), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (25811, 25823), True, 'import tensorflow.contrib.slim as slim\n'), ((25988, 26019), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (26007, 26019), True, 'import tensorflow.contrib.slim as slim\n'), ((26223, 26254), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (26242, 26254), True, 'import tensorflow.contrib.slim as slim\n'), ((26481, 26512), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (26500, 26512), True, 'import tensorflow.contrib.slim as slim\n'), ((27942, 27973), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (27961, 27973), True, 'import tensorflow.contrib.slim as slim\n'), ((28309, 28340), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (28328, 28340), True, 'import tensorflow.contrib.slim as slim\n'), ((28726, 28757), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (28745, 28757), True, 'import tensorflow.contrib.slim as slim\n'), ((29011, 29042), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (29030, 29042), True, 'import tensorflow.contrib.slim as slim\n'), ((30477, 30508), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (30496, 30508), True, 'import tensorflow.contrib.slim as slim\n'), ((30840, 30871), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (30859, 30871), True, 'import tensorflow.contrib.slim as slim\n'), ((31257, 31288), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (31276, 31288), True, 'import tensorflow.contrib.slim as slim\n'), ((31538, 31569), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (31557, 31569), True, 'import tensorflow.contrib.slim as slim\n'), ((33705, 33736), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (33724, 33736), True, 'import tensorflow.contrib.slim as slim\n'), ((33916, 33947), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (33935, 33947), True, 'import tensorflow.contrib.slim as slim\n'), ((34118, 34149), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (34137, 34149), True, 'import tensorflow.contrib.slim as slim\n'), ((34320, 34351), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (34339, 34351), True, 'import tensorflow.contrib.slim as slim\n'), ((34522, 34553), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (34541, 34553), True, 'import tensorflow.contrib.slim as slim\n'), ((34753, 34784), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (34772, 34784), True, 'import tensorflow.contrib.slim as slim\n'), ((37224, 37255), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (37243, 37255), True, 'import tensorflow.contrib.slim as slim\n'), ((37499, 37530), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (37518, 37530), True, 'import tensorflow.contrib.slim as slim\n'), ((37718, 37749), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (37737, 37749), True, 'import tensorflow.contrib.slim as slim\n'), ((37977, 38008), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (37996, 38008), True, 'import tensorflow.contrib.slim as slim\n'), ((38235, 38266), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (38254, 38266), True, 'import tensorflow.contrib.slim as slim\n'), ((38493, 38524), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (38512, 38524), True, 'import tensorflow.contrib.slim as slim\n'), ((38725, 38756), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (38744, 38756), True, 'import tensorflow.contrib.slim as slim\n'), ((41196, 41227), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (41215, 41227), True, 'import tensorflow.contrib.slim as slim\n'), ((41436, 41467), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (41455, 41467), True, 'import tensorflow.contrib.slim as slim\n'), ((41711, 41742), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (41730, 41742), True, 'import tensorflow.contrib.slim as slim\n'), ((41938, 41969), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (41957, 41969), True, 'import tensorflow.contrib.slim as slim\n'), ((42197, 42228), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (42216, 42228), True, 'import tensorflow.contrib.slim as slim\n'), ((42455, 42486), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (42474, 42486), True, 'import tensorflow.contrib.slim as slim\n'), ((42713, 42744), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (42732, 42744), True, 'import tensorflow.contrib.slim as slim\n'), ((42945, 42976), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (42964, 42976), True, 'import tensorflow.contrib.slim as slim\n'), ((45038, 45069), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (45057, 45069), True, 'import tensorflow.contrib.slim as slim\n'), ((45289, 45320), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (45308, 45320), True, 'import tensorflow.contrib.slim as slim\n'), ((45480, 45511), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (45499, 45511), True, 'import tensorflow.contrib.slim as slim\n'), ((45921, 45952), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (45940, 45952), True, 'import tensorflow.contrib.slim as slim\n'), ((46314, 46345), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (46333, 46345), True, 'import tensorflow.contrib.slim as slim\n'), ((46753, 46784), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (46772, 46784), True, 'import tensorflow.contrib.slim as slim\n'), ((47307, 47338), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (47326, 47338), True, 'import tensorflow.contrib.slim as slim\n'), ((47552, 47583), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (47571, 47583), True, 'import tensorflow.contrib.slim as slim\n'), ((47767, 47798), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (47786, 47798), True, 'import tensorflow.contrib.slim as slim\n'), ((49009, 49040), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (49028, 49040), True, 'import tensorflow.contrib.slim as slim\n'), ((49202, 49233), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (49221, 49233), True, 'import tensorflow.contrib.slim as slim\n'), ((49384, 49415), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (49403, 49415), True, 'import tensorflow.contrib.slim as slim\n'), ((49568, 49599), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (49587, 49599), True, 'import tensorflow.contrib.slim as slim\n'), ((49753, 49784), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (49772, 49784), True, 'import tensorflow.contrib.slim as slim\n'), ((49962, 49993), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (49981, 49993), True, 'import tensorflow.contrib.slim as slim\n'), ((51201, 51232), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (51220, 51232), True, 'import tensorflow.contrib.slim as slim\n'), ((51395, 51426), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (51414, 51426), True, 'import tensorflow.contrib.slim as slim\n'), ((51603, 51634), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (51622, 51634), True, 'import tensorflow.contrib.slim as slim\n'), ((14776, 14807), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (14795, 14807), True, 'import tensorflow.contrib.slim as slim\n'), ((14974, 15005), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (14993, 15005), True, 'import tensorflow.contrib.slim as slim\n'), ((15925, 15956), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (15944, 15956), True, 'import tensorflow.contrib.slim as slim\n'), ((16115, 16146), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (16134, 16146), True, 'import tensorflow.contrib.slim as slim\n'), ((16305, 16336), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (16324, 16336), True, 'import tensorflow.contrib.slim as slim\n'), ((16548, 16579), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['l2_penalty'], {}), '(l2_penalty)\n', (16567, 16579), True, 'import tensorflow.contrib.slim as slim\n')] |
import os
import sys
import numpy as np
import h5py
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
# Download dataset for point cloud classification
DATA_DIR = os.path.join(BASE_DIR, 'data')
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
if not os.path.exists(os.path.join(DATA_DIR, 'modelnet40_ply_hdf5_2048')):
www = 'https://shapenet.cs.stanford.edu/media/modelnet40_ply_hdf5_2048.zip'
zipfile = os.path.basename(www)
os.system('wget %s; unzip %s' % (www, zipfile))
os.system('mv %s %s' % (zipfile[:-4], DATA_DIR))
os.system('rm %s' % (zipfile))
def shuffle_data(data, labels):
""" Shuffle data and labels.
Input:
data: B,N,... numpy array
label: B,... numpy array
Return:
shuffled data, label and shuffle indices
"""
idx = np.arange(len(labels))
np.random.shuffle(idx)
return data[idx, ...], labels[idx], idx
def rotate_point_cloud(batch_data):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in range(batch_data.shape[0]):
rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
shape_pc = batch_data[k, ...]
rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
return rotated_data
def rotate_point_cloud_by_angle(batch_data, rotation_angle):
""" Rotate the point cloud along up direction with certain angle.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotated_data = np.zeros(batch_data.shape, dtype=np.float32)
for k in range(batch_data.shape[0]):
#rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, 0, sinval],
[0, 1, 0],
[-sinval, 0, cosval]])
shape_pc = batch_data[k, ...]
rotated_data[k, ...] = np.dot(shape_pc.reshape((-1, 3)), rotation_matrix)
return rotated_data
def jitter_point_cloud(batch_data, sigma=0.01, clip=0.05):
""" Randomly jitter points. jittering is per point.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, jittered batch of point clouds
"""
B, N, C = batch_data.shape
assert(clip > 0)
jittered_data = np.clip(sigma * np.random.randn(B, N, C), -1*clip, clip)
jittered_data += batch_data
return jittered_data
def getDataFiles(list_filename):
return [line.rstrip() for line in open(list_filename)]
def load_h5(h5_filename):
f = h5py.File(h5_filename)
data = f['data'][:]
label = f['label'][:]
return (data, label)
def loadDataFile(filename):
return load_h5(filename)
def load_h5_data_label_seg(h5_filename):
f = h5py.File(h5_filename)
data = f['data'][:]
label = f['label'][:]
seg = f['pid'][:]
return (data, label, seg)
def loadDataFile_with_seg(filename):
return load_h5_data_label_seg(filename)
def cropout_point_cloud(batch_data, max_trans_dist, random_trans_dist=True, close=True):
batch_size = batch_data.shape[0]
num_points = batch_data.shape[1]
if random_trans_dist:
trans_dist = np.random.rand(batch_size)*max_trans_dist
else:
trans_dist = np.ones(batch_size)*max_trans_dist
translation = np.zeros((batch_size, 3))
translation[:, 2] = trans_dist #translation distance initized onto z
#rotate the translation vectors to random direction
for k in range(batch_size):
angle_x = np.random.uniform()*2*np.pi
angle_y = np.random.uniform()*2*np.pi
cos_x = np.cos(angle_x)
sin_x = np.sin(angle_x)
cos_y = np.cos(angle_y)
sin_y = np.sin(angle_y)
rotation_x = np.array([[1, 0, 0],
[0, cos_x, -sin_x],
[0, sin_x, cos_x]])
rotation_y = np.array([[cos_y, 0, sin_y],
[0, 1, 0],
[-sin_y, 0, cos_y]])
translation[k, :] = np.dot(np.dot(translation[k,:],rotation_x),rotation_y)
#apply translation
batch_data_t = batch_data + np.expand_dims(translation,1)
batch_dist = np.sqrt(np.sum(np.square(batch_data_t), 2))
if(close):
out_idx = np.where(batch_dist>1)
batch_data_t[out_idx[0],out_idx[1],:] = \
batch_data_t[out_idx[0],out_idx[1],:]/np.expand_dims(batch_dist[out_idx],1)
else:
for k in range(batch_size):
#mask out points outside the boundary
out_idx = np.where(batch_dist[k,:]>1)
out_num = len(out_idx[0])
mask = np.ones(num_points, dtype=bool)
mask[out_idx] = False
pcd_data = np.delete(batch_data_t[k,:,:], out_idx, axis=0)
#replace the deleted points with existing points
replace_idx = np.random.choice(np.arange(num_points-out_num), out_num)
replace_points = pcd_data[replace_idx,:]
pcd_data = np.concatenate((pcd_data, replace_points), axis=0)
batch_data_t[k,:,:] = pcd_data
return batch_data_t
def bubble_cropout(batch_data, max_bubble_radius, random_bubble_radius=True, close=True):
batch_size = batch_data.shape[0]
num_points = batch_data.shape[1]
#pick one point from each point cloud as bubble center
bubble_centers_idx = np.random.choice(np.arange(num_points), batch_size) #[32*1]
bubble_centers = batch_data[np.arange(batch_size), bubble_centers_idx, :] + 0.001 #[32*3]
bubble_centers = np.expand_dims(bubble_centers, 1) #[32,1,3]
#apply translation
batch_data_t = batch_data - bubble_centers
batch_dist = np.sqrt(np.sum(np.square(batch_data_t), 2))
if(close):
out_idx = np.where(batch_dist<max_bubble_radius)
batch_data_t[out_idx[0],out_idx[1],:] = \
batch_data_t[out_idx[0],out_idx[1],:]/np.expand_dims(batch_dist[out_idx],1) * max_bubble_radius
else:
# for k in range(batch_size):
# #mask out points outside the boundary
# out_idx = np.where(batch_dist[k,:]<max_bubble_radius)
# out_num = len(out_idx[0])
# mask = np.ones(num_points, dtype=bool)
# mask[out_idx] = False
# pcd_data = np.delete(batch_data_t[k,:,:], out_idx, axis=0)
# #replace the deleted points with existing points
# replace_idx = np.random.choice(np.arange(num_points-out_num), out_num)
# replace_points = pcd_data[replace_idx,:]
# pcd_data = np.concatenate((pcd_data, replace_points), axis=0)
# batch_data_t[k,:,:] = pcd_data
out_idx = np.where(batch_dist<max_bubble_radius)
batch_data_t[out_idx[0],out_idx[1],:] = 0
batch_data = batch_data_t + bubble_centers
return batch_data | [
"os.mkdir",
"numpy.ones",
"numpy.sin",
"numpy.arange",
"os.path.join",
"sys.path.append",
"os.path.abspath",
"numpy.random.randn",
"os.path.exists",
"numpy.random.shuffle",
"h5py.File",
"os.path.basename",
"numpy.square",
"os.system",
"numpy.cos",
"numpy.dot",
"numpy.delete",
"nump... | [((106, 131), 'sys.path.append', 'sys.path.append', (['BASE_DIR'], {}), '(BASE_DIR)\n', (121, 131), False, 'import sys\n'), ((194, 224), 'os.path.join', 'os.path.join', (['BASE_DIR', '"""data"""'], {}), "(BASE_DIR, 'data')\n", (206, 224), False, 'import os\n'), ((79, 104), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (94, 104), False, 'import os\n'), ((232, 256), 'os.path.exists', 'os.path.exists', (['DATA_DIR'], {}), '(DATA_DIR)\n', (246, 256), False, 'import os\n'), ((262, 280), 'os.mkdir', 'os.mkdir', (['DATA_DIR'], {}), '(DATA_DIR)\n', (270, 280), False, 'import os\n'), ((450, 471), 'os.path.basename', 'os.path.basename', (['www'], {}), '(www)\n', (466, 471), False, 'import os\n'), ((476, 523), 'os.system', 'os.system', (["('wget %s; unzip %s' % (www, zipfile))"], {}), "('wget %s; unzip %s' % (www, zipfile))\n", (485, 523), False, 'import os\n'), ((528, 576), 'os.system', 'os.system', (["('mv %s %s' % (zipfile[:-4], DATA_DIR))"], {}), "('mv %s %s' % (zipfile[:-4], DATA_DIR))\n", (537, 576), False, 'import os\n'), ((581, 609), 'os.system', 'os.system', (["('rm %s' % zipfile)"], {}), "('rm %s' % zipfile)\n", (590, 609), False, 'import os\n'), ((877, 899), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (894, 899), True, 'import numpy as np\n'), ((1267, 1311), 'numpy.zeros', 'np.zeros', (['batch_data.shape'], {'dtype': 'np.float32'}), '(batch_data.shape, dtype=np.float32)\n', (1275, 1311), True, 'import numpy as np\n'), ((2095, 2139), 'numpy.zeros', 'np.zeros', (['batch_data.shape'], {'dtype': 'np.float32'}), '(batch_data.shape, dtype=np.float32)\n', (2103, 2139), True, 'import numpy as np\n'), ((3204, 3226), 'h5py.File', 'h5py.File', (['h5_filename'], {}), '(h5_filename)\n', (3213, 3226), False, 'import h5py\n'), ((3410, 3432), 'h5py.File', 'h5py.File', (['h5_filename'], {}), '(h5_filename)\n', (3419, 3432), False, 'import h5py\n'), ((3955, 3980), 'numpy.zeros', 'np.zeros', (['(batch_size, 3)'], {}), '((batch_size, 3))\n', (3963, 3980), True, 'import numpy as np\n'), ((6185, 6218), 'numpy.expand_dims', 'np.expand_dims', (['bubble_centers', '(1)'], {}), '(bubble_centers, 1)\n', (6199, 6218), True, 'import numpy as np\n'), ((303, 353), 'os.path.join', 'os.path.join', (['DATA_DIR', '"""modelnet40_ply_hdf5_2048"""'], {}), "(DATA_DIR, 'modelnet40_ply_hdf5_2048')\n", (315, 353), False, 'import os\n'), ((1427, 1449), 'numpy.cos', 'np.cos', (['rotation_angle'], {}), '(rotation_angle)\n', (1433, 1449), True, 'import numpy as np\n'), ((1467, 1489), 'numpy.sin', 'np.sin', (['rotation_angle'], {}), '(rotation_angle)\n', (1473, 1489), True, 'import numpy as np\n'), ((1516, 1580), 'numpy.array', 'np.array', (['[[cosval, 0, sinval], [0, 1, 0], [-sinval, 0, cosval]]'], {}), '([[cosval, 0, sinval], [0, 1, 0], [-sinval, 0, cosval]])\n', (1524, 1580), True, 'import numpy as np\n'), ((2256, 2278), 'numpy.cos', 'np.cos', (['rotation_angle'], {}), '(rotation_angle)\n', (2262, 2278), True, 'import numpy as np\n'), ((2296, 2318), 'numpy.sin', 'np.sin', (['rotation_angle'], {}), '(rotation_angle)\n', (2302, 2318), True, 'import numpy as np\n'), ((2345, 2409), 'numpy.array', 'np.array', (['[[cosval, 0, sinval], [0, 1, 0], [-sinval, 0, cosval]]'], {}), '([[cosval, 0, sinval], [0, 1, 0], [-sinval, 0, cosval]])\n', (2353, 2409), True, 'import numpy as np\n'), ((4260, 4275), 'numpy.cos', 'np.cos', (['angle_x'], {}), '(angle_x)\n', (4266, 4275), True, 'import numpy as np\n'), ((4292, 4307), 'numpy.sin', 'np.sin', (['angle_x'], {}), '(angle_x)\n', (4298, 4307), True, 'import numpy as np\n'), ((4324, 4339), 'numpy.cos', 'np.cos', (['angle_y'], {}), '(angle_y)\n', (4330, 4339), True, 'import numpy as np\n'), ((4356, 4371), 'numpy.sin', 'np.sin', (['angle_y'], {}), '(angle_y)\n', (4362, 4371), True, 'import numpy as np\n'), ((4393, 4453), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, cos_x, -sin_x], [0, sin_x, cos_x]]'], {}), '([[1, 0, 0], [0, cos_x, -sin_x], [0, sin_x, cos_x]])\n', (4401, 4453), True, 'import numpy as np\n'), ((4531, 4591), 'numpy.array', 'np.array', (['[[cos_y, 0, sin_y], [0, 1, 0], [-sin_y, 0, cos_y]]'], {}), '([[cos_y, 0, sin_y], [0, 1, 0], [-sin_y, 0, cos_y]])\n', (4539, 4591), True, 'import numpy as np\n'), ((4788, 4818), 'numpy.expand_dims', 'np.expand_dims', (['translation', '(1)'], {}), '(translation, 1)\n', (4802, 4818), True, 'import numpy as np\n'), ((4914, 4938), 'numpy.where', 'np.where', (['(batch_dist > 1)'], {}), '(batch_dist > 1)\n', (4922, 4938), True, 'import numpy as np\n'), ((6026, 6047), 'numpy.arange', 'np.arange', (['num_points'], {}), '(num_points)\n', (6035, 6047), True, 'import numpy as np\n'), ((6396, 6436), 'numpy.where', 'np.where', (['(batch_dist < max_bubble_radius)'], {}), '(batch_dist < max_bubble_radius)\n', (6404, 6436), True, 'import numpy as np\n'), ((7305, 7345), 'numpy.where', 'np.where', (['(batch_dist < max_bubble_radius)'], {}), '(batch_dist < max_bubble_radius)\n', (7313, 7345), True, 'import numpy as np\n'), ((2978, 3002), 'numpy.random.randn', 'np.random.randn', (['B', 'N', 'C'], {}), '(B, N, C)\n', (2993, 3002), True, 'import numpy as np\n'), ((3829, 3855), 'numpy.random.rand', 'np.random.rand', (['batch_size'], {}), '(batch_size)\n', (3843, 3855), True, 'import numpy as np\n'), ((3902, 3921), 'numpy.ones', 'np.ones', (['batch_size'], {}), '(batch_size)\n', (3909, 3921), True, 'import numpy as np\n'), ((4684, 4721), 'numpy.dot', 'np.dot', (['translation[k, :]', 'rotation_x'], {}), '(translation[k, :], rotation_x)\n', (4690, 4721), True, 'import numpy as np\n'), ((4850, 4873), 'numpy.square', 'np.square', (['batch_data_t'], {}), '(batch_data_t)\n', (4859, 4873), True, 'import numpy as np\n'), ((5037, 5075), 'numpy.expand_dims', 'np.expand_dims', (['batch_dist[out_idx]', '(1)'], {}), '(batch_dist[out_idx], 1)\n', (5051, 5075), True, 'import numpy as np\n'), ((5193, 5223), 'numpy.where', 'np.where', (['(batch_dist[k, :] > 1)'], {}), '(batch_dist[k, :] > 1)\n', (5201, 5223), True, 'import numpy as np\n'), ((5278, 5309), 'numpy.ones', 'np.ones', (['num_points'], {'dtype': 'bool'}), '(num_points, dtype=bool)\n', (5285, 5309), True, 'import numpy as np\n'), ((5367, 5416), 'numpy.delete', 'np.delete', (['batch_data_t[k, :, :]', 'out_idx'], {'axis': '(0)'}), '(batch_data_t[k, :, :], out_idx, axis=0)\n', (5376, 5416), True, 'import numpy as np\n'), ((5635, 5685), 'numpy.concatenate', 'np.concatenate', (['(pcd_data, replace_points)'], {'axis': '(0)'}), '((pcd_data, replace_points), axis=0)\n', (5649, 5685), True, 'import numpy as np\n'), ((6332, 6355), 'numpy.square', 'np.square', (['batch_data_t'], {}), '(batch_data_t)\n', (6341, 6355), True, 'import numpy as np\n'), ((1378, 1397), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (1395, 1397), True, 'import numpy as np\n'), ((4170, 4189), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (4187, 4189), True, 'import numpy as np\n'), ((4216, 4235), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (4233, 4235), True, 'import numpy as np\n'), ((5519, 5550), 'numpy.arange', 'np.arange', (['(num_points - out_num)'], {}), '(num_points - out_num)\n', (5528, 5550), True, 'import numpy as np\n'), ((6102, 6123), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (6111, 6123), True, 'import numpy as np\n'), ((6535, 6573), 'numpy.expand_dims', 'np.expand_dims', (['batch_dist[out_idx]', '(1)'], {}), '(batch_dist[out_idx], 1)\n', (6549, 6573), True, 'import numpy as np\n')] |
import numpy
import matplotlib.pylab as plt
no_of_simulations = 1000
milestone_probabilities = [25, 50, 75, 90, 99]
milestone_current = 0
def birthday_paradox(no_of_people, simulations):
global milestone_probabilities, milestone_current
same_birthday_two_people = 0
#For simplicity, we assume that there are 365 days in all years.
for sim in range(simulations):
birthdays = numpy.random.choice(365, no_of_people, replace=True)
unique_birthdays = set(birthdays)
if len(unique_birthdays) < no_of_people:
same_birthday_two_people += 1
success_fraction = same_birthday_two_people/simulations
if milestone_current < len(milestone_probabilities) and success_fraction*100 > milestone_probabilities[milestone_current]:
print("P(Two people sharing birthday in a room with " + str(no_of_people) + " people) = " + str(success_fraction))
milestone_current += 1
return success_fraction
def main():
day = []
success = []
for i in range(1, 366): #Executing for all possible cases where can have unique birthdays, i.e. from 1 person to a maximum of 365 people in a room
day.append(i)
success.append(birthday_paradox(i, no_of_simulations))
outfile = open("results.csv", "w")
for i in range(365):
outfile.write(str(day[i]) + "," + str(success[i]) + "\n")
plt.plot(day, success)
plt.show()
main()
| [
"matplotlib.pylab.plot",
"numpy.random.choice",
"matplotlib.pylab.show"
] | [((1381, 1403), 'matplotlib.pylab.plot', 'plt.plot', (['day', 'success'], {}), '(day, success)\n', (1389, 1403), True, 'import matplotlib.pylab as plt\n'), ((1408, 1418), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (1416, 1418), True, 'import matplotlib.pylab as plt\n'), ((403, 455), 'numpy.random.choice', 'numpy.random.choice', (['(365)', 'no_of_people'], {'replace': '(True)'}), '(365, no_of_people, replace=True)\n', (422, 455), False, 'import numpy\n')] |
""" Module routines for pre-processing data for recommender training
"""
import argparse
from typing import Sequence, Optional
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelBinarizer
from scipy import sparse
from aizynthfinder.training.utils import (
Config,
split_and_save_data,
reactants_to_fingerprint,
split_reaction_smiles,
)
def _get_config(optional_args: Optional[Sequence[str]] = None) -> Config:
parser = argparse.ArgumentParser(
"Tool to pre-process a template library to be used to train a recommender network"
)
parser.add_argument("config", help="the filename to a configuration file")
args = parser.parse_args(optional_args)
return Config(args.config)
def _save_unique_templates(dataset: pd.DataFrame, config: Config) -> None:
dataset = dataset[[config["column_map"]["retro_template"], "template_code"]]
dataset = dataset.drop_duplicates(subset="template_code", keep="first")
dataset.set_index("template_code", inplace=True)
dataset = dataset.sort_index()
dataset.to_hdf(config.filename("unique_templates"), "table")
def main(optional_args: Optional[Sequence[str]] = None) -> None:
"""Entry-point for the preprocess_recommender tool"""
config = _get_config(optional_args)
filename = config.filename("library")
dataset = pd.read_csv(
filename,
index_col=False,
header=0 if config["in_csv_headers"] else None,
names=None if config["in_csv_headers"] else config["library_headers"],
sep=config["csv_sep"],
)
if config["reaction_smiles_column"]:
dataset = split_reaction_smiles(dataset, config)
print("Dataset loaded, generating Labels...", flush=True)
labelbin = LabelBinarizer(neg_label=0, pos_label=1, sparse_output=True)
labels = labelbin.fit_transform(dataset[config["column_map"]["template_hash"]])
split_and_save_data(labels, "labels", config)
print("Labels created and split, generating Inputs...", flush=True)
reactants = dataset[config["column_map"]["reactants"]].to_numpy()
inputs = np.apply_along_axis(reactants_to_fingerprint, 0, [reactants], config)
inputs = sparse.lil_matrix(inputs.T).tocsr()
split_and_save_data(inputs, "inputs", config)
print("Inputs created and split, splitting Full Dataset...", flush=True)
split_and_save_data(dataset, "library", config)
print("Full Dataset split, creating unique template set", flush=True)
_save_unique_templates(dataset, config)
if __name__ == "__main__":
main()
| [
"sklearn.preprocessing.LabelBinarizer",
"argparse.ArgumentParser",
"aizynthfinder.training.utils.Config",
"pandas.read_csv",
"numpy.apply_along_axis",
"aizynthfinder.training.utils.split_and_save_data",
"scipy.sparse.lil_matrix",
"aizynthfinder.training.utils.split_reaction_smiles"
] | [((470, 587), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Tool to pre-process a template library to be used to train a recommender network"""'], {}), "(\n 'Tool to pre-process a template library to be used to train a recommender network'\n )\n", (493, 587), False, 'import argparse\n'), ((727, 746), 'aizynthfinder.training.utils.Config', 'Config', (['args.config'], {}), '(args.config)\n', (733, 746), False, 'from aizynthfinder.training.utils import Config, split_and_save_data, reactants_to_fingerprint, split_reaction_smiles\n'), ((1356, 1546), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'index_col': '(False)', 'header': "(0 if config['in_csv_headers'] else None)", 'names': "(None if config['in_csv_headers'] else config['library_headers'])", 'sep': "config['csv_sep']"}), "(filename, index_col=False, header=0 if config['in_csv_headers']\n else None, names=None if config['in_csv_headers'] else config[\n 'library_headers'], sep=config['csv_sep'])\n", (1367, 1546), True, 'import pandas as pd\n'), ((1760, 1820), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {'neg_label': '(0)', 'pos_label': '(1)', 'sparse_output': '(True)'}), '(neg_label=0, pos_label=1, sparse_output=True)\n', (1774, 1820), False, 'from sklearn.preprocessing import LabelBinarizer\n'), ((1909, 1954), 'aizynthfinder.training.utils.split_and_save_data', 'split_and_save_data', (['labels', '"""labels"""', 'config'], {}), "(labels, 'labels', config)\n", (1928, 1954), False, 'from aizynthfinder.training.utils import Config, split_and_save_data, reactants_to_fingerprint, split_reaction_smiles\n'), ((2111, 2180), 'numpy.apply_along_axis', 'np.apply_along_axis', (['reactants_to_fingerprint', '(0)', '[reactants]', 'config'], {}), '(reactants_to_fingerprint, 0, [reactants], config)\n', (2130, 2180), True, 'import numpy as np\n'), ((2234, 2279), 'aizynthfinder.training.utils.split_and_save_data', 'split_and_save_data', (['inputs', '"""inputs"""', 'config'], {}), "(inputs, 'inputs', config)\n", (2253, 2279), False, 'from aizynthfinder.training.utils import Config, split_and_save_data, reactants_to_fingerprint, split_reaction_smiles\n'), ((2362, 2409), 'aizynthfinder.training.utils.split_and_save_data', 'split_and_save_data', (['dataset', '"""library"""', 'config'], {}), "(dataset, 'library', config)\n", (2381, 2409), False, 'from aizynthfinder.training.utils import Config, split_and_save_data, reactants_to_fingerprint, split_reaction_smiles\n'), ((1643, 1681), 'aizynthfinder.training.utils.split_reaction_smiles', 'split_reaction_smiles', (['dataset', 'config'], {}), '(dataset, config)\n', (1664, 1681), False, 'from aizynthfinder.training.utils import Config, split_and_save_data, reactants_to_fingerprint, split_reaction_smiles\n'), ((2194, 2221), 'scipy.sparse.lil_matrix', 'sparse.lil_matrix', (['inputs.T'], {}), '(inputs.T)\n', (2211, 2221), False, 'from scipy import sparse\n')] |
# -------------------------------------------------------------------------------------------------------------------- #
# Import packages
# -------------------------------------------------------------------------------------------------------------------- #
import numpy as np
from .nurbs_surface import NurbsSurface
# -------------------------------------------------------------------------------------------------------------------- #
# Define the bilinear NURBS surface class
# -------------------------------------------------------------------------------------------------------------------- #
class NurbsSurfaceRevolution:
""" Create the NURBS surface obtained by revolving a generatrix NURBS curve `C(u)` about an axis
Parameters
----------
generatrix : NURBS curve object
See NurbsSurface class documentation
axis_point : ndaray of with shape (3,)
Point that together with a direction defines the axis of rotation
axis_direction : ndarrays with shape (3,)
Direction that together with a point defines the axis of rotation
theta_start : scalar
Start angle, measured with respect to the generatrix
theta_end : scalar
End angle, measured with respect to the generatrix
References
----------
The NURBS book. Chapter 8.5
<NAME> and <NAME>
Springer, second edition
"""
def __init__(self, generatrix, axis_point, axis_direction, angle_start, angle_end):
# Set the data type used to initialize arrays (set `complex` if any argument is complex and `float` if not)
temp = generatrix.P
for item in locals().values():
data_type = np.asarray(item).dtype
if np.issubdtype(data_type, np.complex128):
self.data_type = np.complex128
break
else:
self.data_type = np.float64
# Declare input variables as instance variables (adopt the notation used in the NURBS book)
self.C = generatrix
self.S = axis_point
self.T = axis_direction/np.sum(axis_direction**2)**(1/2)
self.theta_start = angle_start
self.theta_end = angle_end
# Check the number of dimensions of the problem
if np.shape(generatrix.P)[0] != 3: raise Exception("The input NURBS must be three-dimensional")
if np.shape(axis_direction)[0] != 3: raise Exception("The axis direction must be a three-dimensional vector")
if np.shape(axis_point)[0] != 3: raise Exception("The axis point must be a three-dimensional vector")
# Make the extrusion surface NURBS representation
self.NurbsSurface = None
self.make_nurbs_surface()
def make_nurbs_surface(self):
""" Make a NURBS surface representation of the revolution surface """
# Rename variables for brevity
S = self.S
T = self.T
theta_start = self.theta_start
theta_end = self.theta_end
# Correct theta_end if necessary
if theta_end < theta_start: theta_end = theta_end + 2*np.pi
# Angle spanned by the circular arc
theta = theta_end - theta_start
# Get the number of NURBS segments used to represent the circular arc
if theta <= 1/2*np.pi: n_arcs = 1
elif theta <= 2/2*np.pi: n_arcs = 2
elif theta <= 3/2*np.pi: n_arcs = 3
elif theta <= 4/2*np.pi: n_arcs = 4
else: raise Exception('Opps, something went wrong...')
# Angle spanned by each segment
delta_theta = theta/n_arcs
# Initialize arrays of control points and weights
n = 2*n_arcs # Highest index of the u-direction control points
m = np.shape(self.C.P)[1] - 1 # Highest index of the v_direction control points
P_array = np.zeros((3, n + 1, m + 1), dtype=self.data_type) # Array of control points
W_array = np.zeros((n + 1, m + 1), dtype=self.data_type) # Weight of the control points
# Loop over the generatrix control points
for j in range(m+1):
# Current control point of the generatrix
P = self.C.P[:, j]
# Current weight of the generatrix
W = self.C.W[j]
# Compute the axis point that is closest to the control point
O = self.project_point_to_line(S, T, P)
# Compute the projected distance between the current control point and the axis
R = np.sum((P-O)**2)**(1/2)
# Compute the current section principal directions (use dummy directions in case P and O coincide)
if np.abs(np.sum(P-O)) < 1e-12:
X = np.asarray([1, 0, 0])
Y = np.asarray([0, 1, 0])
else:
X = (P-O)/np.sum((P-O)**2)**(1/2)
Y = np.cross(T, X)
# Get the coordinates and weight of the first control point
P0 = O + R * np.cos(theta_start) * X + R * np.sin(theta_start) * Y
T0 = -np.sin(theta_start) * X + np.cos(theta_start) * Y
# Store control points and weights
P_array[:, 0, j] = P0
W_array[0, j] = W
# Get the coordinates and weights of the other control points
index, angle = 0, theta_start
for i in range(n_arcs):
# Angle spanned by the current segment
angle = angle + delta_theta
# Get the end-point and end-tangent of the current segment
P2 = O + R * np.cos(angle) * X + R * np.sin(angle) * Y
T2 = -np.sin(angle) * X + np.cos(angle) * Y
# Solve the intersection between tangent lines T0 and T2 to compute the intermediate point
P1 = self.intersect_lines(P0, T0, P2, T2)
# Compute the weight of the intermediate point
W1 = np.cos(delta_theta / 2)
# Store control points and weights
P_array[:, index + 1, j] = P1
P_array[:, index + 2, j] = P2
W_array[index + 1, j] = W*W1
W_array[index + 2, j] = W
# Get ready for the next segment!
index = index + 2
P0 = P2
T0 = T2
# Define the order of the basis polynomials
p = 2
q = self.C.p
# Define the knot vectors
# Knot multiplicity p+1 at the endpoints
U = 5 + np.zeros((n + p + 2))
U[[0, 1, 2]] = 0
U[[-1, -2, -3]] = 1
# Set the multiplicity 2 at the interior knots to connect the segments
if n_arcs == 1:
pass
elif n_arcs == 2:
U[[3, 4]] = 1 / 2
elif n_arcs == 3:
U[[3, 4]], U[[5, 6]] = 1 / 3, 2 / 3
elif n_arcs == 4:
U[[3, 4]], U[[5, 6]], U[[7, 8]] = 1 / 4, 2 / 4, 3 / 4
else:
raise Exception('Opps, something went wrong...')
# The knot vector in the v-direction is given by the knot vector of the generatrix NURBS
V = self.C.U
# Create the NURBS surface
self.NurbsSurface = NurbsSurface(control_points=P_array, weights=W_array, u_degree=p, v_degree=q, u_knots=U, v_knots=V)
def intersect_lines(self, P0, T0, P2, T2):
""" Compute the point of intersection between two lines in 2D or 3D """
# Compute the intersection by reducing the 3x2 system to a 2x2 system using dot products
A = np.asarray([[np.sum(T0 * T0), -np.sum(T2 * T0)], [np.sum(T0 * T2), -np.sum(T2 * T2)]])
b = np.asarray([np.sum(P2 * T0) - np.sum(P0 * T0), np.sum(P2 * T2) - np.sum(P0 * T2)])
u, v = np.linalg.solve(A, b)
P1 = P0 + u * T0
if np.sum(np.abs(((P0 + u * T0) - (P2 + v * T2)))) > 1e-12:
raise Exception("Something went wrong computing the line intersection")
return P1
def project_point_to_line(self, S, T, P):
""" Compute the projection of a point ´P´ into the line given by ´S + u*T´ """
# Analytic formula (not hard to derive by hand)
P_projected = S + np.sum(T * (P - S)) / np.sum(T * T) * T
return P_projected | [
"numpy.sum",
"numpy.abs",
"numpy.asarray",
"numpy.zeros",
"numpy.cross",
"numpy.shape",
"numpy.sin",
"numpy.cos",
"numpy.linalg.solve",
"numpy.issubdtype"
] | [((3883, 3932), 'numpy.zeros', 'np.zeros', (['(3, n + 1, m + 1)'], {'dtype': 'self.data_type'}), '((3, n + 1, m + 1), dtype=self.data_type)\n', (3891, 3932), True, 'import numpy as np\n'), ((3979, 4025), 'numpy.zeros', 'np.zeros', (['(n + 1, m + 1)'], {'dtype': 'self.data_type'}), '((n + 1, m + 1), dtype=self.data_type)\n', (3987, 4025), True, 'import numpy as np\n'), ((7739, 7760), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (7754, 7760), True, 'import numpy as np\n'), ((1717, 1756), 'numpy.issubdtype', 'np.issubdtype', (['data_type', 'np.complex128'], {}), '(data_type, np.complex128)\n', (1730, 1756), True, 'import numpy as np\n'), ((6526, 6545), 'numpy.zeros', 'np.zeros', (['(n + p + 2)'], {}), '(n + p + 2)\n', (6534, 6545), True, 'import numpy as np\n'), ((1679, 1695), 'numpy.asarray', 'np.asarray', (['item'], {}), '(item)\n', (1689, 1695), True, 'import numpy as np\n'), ((2078, 2105), 'numpy.sum', 'np.sum', (['(axis_direction ** 2)'], {}), '(axis_direction ** 2)\n', (2084, 2105), True, 'import numpy as np\n'), ((2253, 2275), 'numpy.shape', 'np.shape', (['generatrix.P'], {}), '(generatrix.P)\n', (2261, 2275), True, 'import numpy as np\n'), ((2359, 2383), 'numpy.shape', 'np.shape', (['axis_direction'], {}), '(axis_direction)\n', (2367, 2383), True, 'import numpy as np\n'), ((2477, 2497), 'numpy.shape', 'np.shape', (['axis_point'], {}), '(axis_point)\n', (2485, 2497), True, 'import numpy as np\n'), ((3757, 3775), 'numpy.shape', 'np.shape', (['self.C.P'], {}), '(self.C.P)\n', (3765, 3775), True, 'import numpy as np\n'), ((4540, 4560), 'numpy.sum', 'np.sum', (['((P - O) ** 2)'], {}), '((P - O) ** 2)\n', (4546, 4560), True, 'import numpy as np\n'), ((4740, 4761), 'numpy.asarray', 'np.asarray', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (4750, 4761), True, 'import numpy as np\n'), ((4782, 4803), 'numpy.asarray', 'np.asarray', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (4792, 4803), True, 'import numpy as np\n'), ((4892, 4906), 'numpy.cross', 'np.cross', (['T', 'X'], {}), '(T, X)\n', (4900, 4906), True, 'import numpy as np\n'), ((5950, 5973), 'numpy.cos', 'np.cos', (['(delta_theta / 2)'], {}), '(delta_theta / 2)\n', (5956, 5973), True, 'import numpy as np\n'), ((7805, 7840), 'numpy.abs', 'np.abs', (['(P0 + u * T0 - (P2 + v * T2))'], {}), '(P0 + u * T0 - (P2 + v * T2))\n', (7811, 7840), True, 'import numpy as np\n'), ((4698, 4711), 'numpy.sum', 'np.sum', (['(P - O)'], {}), '(P - O)\n', (4704, 4711), True, 'import numpy as np\n'), ((5103, 5122), 'numpy.cos', 'np.cos', (['theta_start'], {}), '(theta_start)\n', (5109, 5122), True, 'import numpy as np\n'), ((7555, 7570), 'numpy.sum', 'np.sum', (['(T0 * T0)'], {}), '(T0 * T0)\n', (7561, 7570), True, 'import numpy as np\n'), ((7592, 7607), 'numpy.sum', 'np.sum', (['(T0 * T2)'], {}), '(T0 * T2)\n', (7598, 7607), True, 'import numpy as np\n'), ((7653, 7668), 'numpy.sum', 'np.sum', (['(P2 * T0)'], {}), '(P2 * T0)\n', (7659, 7668), True, 'import numpy as np\n'), ((7671, 7686), 'numpy.sum', 'np.sum', (['(P0 * T0)'], {}), '(P0 * T0)\n', (7677, 7686), True, 'import numpy as np\n'), ((7688, 7703), 'numpy.sum', 'np.sum', (['(P2 * T2)'], {}), '(P2 * T2)\n', (7694, 7703), True, 'import numpy as np\n'), ((7706, 7721), 'numpy.sum', 'np.sum', (['(P0 * T2)'], {}), '(P0 * T2)\n', (7712, 7721), True, 'import numpy as np\n'), ((8177, 8196), 'numpy.sum', 'np.sum', (['(T * (P - S))'], {}), '(T * (P - S))\n', (8183, 8196), True, 'import numpy as np\n'), ((8199, 8212), 'numpy.sum', 'np.sum', (['(T * T)'], {}), '(T * T)\n', (8205, 8212), True, 'import numpy as np\n'), ((4848, 4868), 'numpy.sum', 'np.sum', (['((P - O) ** 2)'], {}), '((P - O) ** 2)\n', (4854, 4868), True, 'import numpy as np\n'), ((5035, 5054), 'numpy.sin', 'np.sin', (['theta_start'], {}), '(theta_start)\n', (5041, 5054), True, 'import numpy as np\n'), ((5077, 5096), 'numpy.sin', 'np.sin', (['theta_start'], {}), '(theta_start)\n', (5083, 5096), True, 'import numpy as np\n'), ((5681, 5694), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (5687, 5694), True, 'import numpy as np\n'), ((7573, 7588), 'numpy.sum', 'np.sum', (['(T2 * T0)'], {}), '(T2 * T0)\n', (7579, 7588), True, 'import numpy as np\n'), ((7610, 7625), 'numpy.sum', 'np.sum', (['(T2 * T2)'], {}), '(T2 * T2)\n', (7616, 7625), True, 'import numpy as np\n'), ((5005, 5024), 'numpy.cos', 'np.cos', (['theta_start'], {}), '(theta_start)\n', (5011, 5024), True, 'import numpy as np\n'), ((5621, 5634), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (5627, 5634), True, 'import numpy as np\n'), ((5661, 5674), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (5667, 5674), True, 'import numpy as np\n'), ((5597, 5610), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (5603, 5610), True, 'import numpy as np\n')] |
import numpy as np
from ase.build import bulk
from ipyatom.repeat_cell import atoms_to_dict
from ipyatom.plot_mpl import plot_atoms_top, plot_slice
def test_plot_atoms_top():
import matplotlib
matplotlib.pyplot.switch_backend('agg')
fe = bulk("Fe").repeat((5, 5, 5))
dct = atoms_to_dict(fe)
plot_atoms_top(dct, linewidth=5)
def test_plot_slice():
import matplotlib
matplotlib.pyplot.switch_backend('agg')
instruct = {
"transforms": [],
"elements": [
{"type": "repeat_density",
"name": "Test",
"dtype": "other",
"transforms": [],
"color_bbox": None,
"centre": [.5, 1, .5],
"cell_vectors": {
"a": [1, 1, 0],
"b": [0, 1, 0],
"c": [0, 0, 1]},
"dcube": np.array([
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]],
[[2., 3., 4.],
[5., 6., 7.],
[8., 9., 11.]],
[[20., 20., 20.],
[20., 20., 20.],
[20., 20., 20.]]])
},
{"type": "repeat_density",
"name": "Test",
"dtype": "other2",
"transforms": [],
"color_bbox": None,
"centre": [.5, 1, .5],
"cell_vectors": {
"a": [1, 1, 0],
"b": [0, 1, 0],
"c": [0, 0, 1]},
"dcube": np.array([
[[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]],
[[2., 3., 4.],
[5., 6., 7.],
[8., 9., 10.]],
[[20., 20., 20.],
[20., 20., 20.],
[20., 20., 20.]]]) * 0.00001
},
{"type": "repeat_cell",
"name": "Test",
"transforms": [],
"bonds": [],
"color_bbox": None,
"centre": [.5, 1, .5],
"cell_vectors": {
"a": [1, 1, 0],
"b": [0, 1, 0],
"c": [0, 0, 1]},
"sites": [
{
"radius": .5,
"transparency": 1,
"label": "Fe",
"ccoord": [.5, 1, .5],
"color_fill": "red",
"color_outline": None
},
{
"radius": .5,
"transparency": 1,
"label": "S",
"ccoord": [.5, 0, .5],
"color_fill": "blue",
"color_outline": None
}
]
}
]
}
fig1, axes1 = plot_slice(instruct, (0.5, 0.5, 0.5), (0., 0., 1.),
cell_size=.001, min_voxels=10000, cmap_range=(None, None),
contourf=[True, True, False], bval=[np.nan, np.nan, 0],
show_corners=[False, True, False], cbar_tick_rot=45)
| [
"matplotlib.pyplot.switch_backend",
"ase.build.bulk",
"ipyatom.plot_mpl.plot_atoms_top",
"numpy.array",
"ipyatom.plot_mpl.plot_slice",
"ipyatom.repeat_cell.atoms_to_dict"
] | [((203, 242), 'matplotlib.pyplot.switch_backend', 'matplotlib.pyplot.switch_backend', (['"""agg"""'], {}), "('agg')\n", (235, 242), False, 'import matplotlib\n'), ((292, 309), 'ipyatom.repeat_cell.atoms_to_dict', 'atoms_to_dict', (['fe'], {}), '(fe)\n', (305, 309), False, 'from ipyatom.repeat_cell import atoms_to_dict\n'), ((314, 346), 'ipyatom.plot_mpl.plot_atoms_top', 'plot_atoms_top', (['dct'], {'linewidth': '(5)'}), '(dct, linewidth=5)\n', (328, 346), False, 'from ipyatom.plot_mpl import plot_atoms_top, plot_slice\n'), ((398, 437), 'matplotlib.pyplot.switch_backend', 'matplotlib.pyplot.switch_backend', (['"""agg"""'], {}), "('agg')\n", (430, 437), False, 'import matplotlib\n'), ((2847, 3082), 'ipyatom.plot_mpl.plot_slice', 'plot_slice', (['instruct', '(0.5, 0.5, 0.5)', '(0.0, 0.0, 1.0)'], {'cell_size': '(0.001)', 'min_voxels': '(10000)', 'cmap_range': '(None, None)', 'contourf': '[True, True, False]', 'bval': '[np.nan, np.nan, 0]', 'show_corners': '[False, True, False]', 'cbar_tick_rot': '(45)'}), '(instruct, (0.5, 0.5, 0.5), (0.0, 0.0, 1.0), cell_size=0.001,\n min_voxels=10000, cmap_range=(None, None), contourf=[True, True, False],\n bval=[np.nan, np.nan, 0], show_corners=[False, True, False],\n cbar_tick_rot=45)\n', (2857, 3082), False, 'from ipyatom.plot_mpl import plot_atoms_top, plot_slice\n'), ((252, 262), 'ase.build.bulk', 'bulk', (['"""Fe"""'], {}), "('Fe')\n", (256, 262), False, 'from ase.build import bulk\n'), ((855, 1044), 'numpy.array', 'np.array', (['[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[2.0, 3.0, 4.0], [\n 5.0, 6.0, 7.0], [8.0, 9.0, 11.0]], [[20.0, 20.0, 20.0], [20.0, 20.0, \n 20.0], [20.0, 20.0, 20.0]]]'], {}), '([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[2.0, 3.0, \n 4.0], [5.0, 6.0, 7.0], [8.0, 9.0, 11.0]], [[20.0, 20.0, 20.0], [20.0, \n 20.0, 20.0], [20.0, 20.0, 20.0]]])\n', (863, 1044), True, 'import numpy as np\n'), ((1537, 1726), 'numpy.array', 'np.array', (['[[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[2.0, 3.0, 4.0], [\n 5.0, 6.0, 7.0], [8.0, 9.0, 10.0]], [[20.0, 20.0, 20.0], [20.0, 20.0, \n 20.0], [20.0, 20.0, 20.0]]]'], {}), '([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], [[2.0, 3.0, \n 4.0], [5.0, 6.0, 7.0], [8.0, 9.0, 10.0]], [[20.0, 20.0, 20.0], [20.0, \n 20.0, 20.0], [20.0, 20.0, 20.0]]])\n', (1545, 1726), True, 'import numpy as np\n')] |
from DataReader import DataReader
from Preprocessor import Preprocessor
from Vectorizer import Vectorizer
from Classifier import Classifier
from DeepLearning import DeepLearner
from sklearn.model_selection import train_test_split as split
import numpy as np
dr = DataReader('./datasets/training-v1/offenseval-training-v1.tsv','A')
data,labels = dr.get_labelled_data()
data,labels = dr.shuffle(data,labels,'random')
data = data[:]
labels = labels[:]
prp = Preprocessor('remove_stopwords','lemmatize')
data = prp.clean(data)
tr_data,tst_data,tr_labels,tst_labels = split(np.array(data),labels,test_size=0.2,stratify=labels)
tr_data,tr_labels = dr.upsample(tr_data,tr_labels,label=1)
tr_data,tr_labels = dr.shuffle(tr_data,tr_labels,'random')
vct = Vectorizer('count')
vct.vectorize(tr_data)
model=DeepLearner(tr_data,tr_labels,vocab_length=vct.vocab_length,model_type='CNN')
model.train(epochs=20)
acc = model.test_and_plot(tst_data,tst_labels)
print('Accuracy:',acc) | [
"Preprocessor.Preprocessor",
"numpy.array",
"DeepLearning.DeepLearner",
"DataReader.DataReader",
"Vectorizer.Vectorizer"
] | [((264, 332), 'DataReader.DataReader', 'DataReader', (['"""./datasets/training-v1/offenseval-training-v1.tsv"""', '"""A"""'], {}), "('./datasets/training-v1/offenseval-training-v1.tsv', 'A')\n", (274, 332), False, 'from DataReader import DataReader\n'), ((458, 503), 'Preprocessor.Preprocessor', 'Preprocessor', (['"""remove_stopwords"""', '"""lemmatize"""'], {}), "('remove_stopwords', 'lemmatize')\n", (470, 503), False, 'from Preprocessor import Preprocessor\n'), ((751, 770), 'Vectorizer.Vectorizer', 'Vectorizer', (['"""count"""'], {}), "('count')\n", (761, 770), False, 'from Vectorizer import Vectorizer\n'), ((801, 886), 'DeepLearning.DeepLearner', 'DeepLearner', (['tr_data', 'tr_labels'], {'vocab_length': 'vct.vocab_length', 'model_type': '"""CNN"""'}), "(tr_data, tr_labels, vocab_length=vct.vocab_length, model_type='CNN'\n )\n", (812, 886), False, 'from DeepLearning import DeepLearner\n'), ((573, 587), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (581, 587), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import cv2
import numpy as np
def convolve(image, kernel):
(iH, iW) = image.shape[:2]
(kH, kW) = kernel.shape[:2]
pad = (kW - 1) / 2
image = cv2.copyMakeBorder(image, pad, pad, pad, pad,
cv2.BORDER_REPLICATE)
output = np.zeros((iH, iW), dtype="float32")
for y in np.arange(pad, iH + pad):
for x in np.arange(pad, iW + pad):
roi = image[y - pad:y + pad + 1, x - pad:x + pad + 1]
k = (roi * kernel).sum()
output[y - pad, x - pad] = k
return output
def readFile(filename, image, filter):
imageGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
input = np.loadtxt(filename, dtype='i', delimiter=',')
if(filter == 'blur'):
height = len(input)
width = len(input[0])
input = np.array(input, dtype="float") * (float(input[0][0]) / (height * width))
print(input)
else:
input = np.array(input, dtype="int")
convoleOutput = convolve(imageGray, input)
return convoleOutput
| [
"cv2.cvtColor",
"numpy.zeros",
"cv2.copyMakeBorder",
"numpy.arange",
"numpy.loadtxt",
"numpy.array"
] | [((194, 261), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['image', 'pad', 'pad', 'pad', 'pad', 'cv2.BORDER_REPLICATE'], {}), '(image, pad, pad, pad, pad, cv2.BORDER_REPLICATE)\n', (212, 261), False, 'import cv2\n'), ((306, 341), 'numpy.zeros', 'np.zeros', (['(iH, iW)'], {'dtype': '"""float32"""'}), "((iH, iW), dtype='float32')\n", (314, 341), True, 'import numpy as np\n'), ((360, 384), 'numpy.arange', 'np.arange', (['pad', '(iH + pad)'], {}), '(pad, iH + pad)\n', (369, 384), True, 'import numpy as np\n'), ((695, 734), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (707, 734), False, 'import cv2\n'), ((747, 793), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'dtype': '"""i"""', 'delimiter': '""","""'}), "(filename, dtype='i', delimiter=',')\n", (757, 793), True, 'import numpy as np\n'), ((403, 427), 'numpy.arange', 'np.arange', (['pad', '(iW + pad)'], {}), '(pad, iW + pad)\n', (412, 427), True, 'import numpy as np\n'), ((1016, 1044), 'numpy.array', 'np.array', (['input'], {'dtype': '"""int"""'}), "(input, dtype='int')\n", (1024, 1044), True, 'import numpy as np\n'), ((895, 925), 'numpy.array', 'np.array', (['input'], {'dtype': '"""float"""'}), "(input, dtype='float')\n", (903, 925), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.