code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 1 18:56:24 2019
@author: cpdsg
"""
import svect as sv
import numpy as np
from math import pi
import qneural as qn
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
# =============================================================================
# Simulating the iterations of a Quantum Stochastic Neural Map
# for a Quantum Recurrent Neural Network starting from an Eigendensity of
# the Noiseless Unitary Map with 3D plotting and Box-Counting Dimension
# calculation
# =============================================================================
# =============================================================================
# Part 1 - Definition of function needed for the stochastic updade
# =============================================================================
V = np.matrix([[0,-1],[1,0]])
P0 = sv.proj2x2(False)
P1 = sv.proj2x2(True)
one = sv.unit()
def get_unitaries(angle):
U_angle = np.cos(angle/2)*one + np.sin(angle/2)*V
U01 = sv.tensorProd([P0,one])+sv.tensorProd([P1,U_angle])
U10 = sv.tensorProd([one,P0])+sv.tensorProd([U_angle,P1])
return [U01, U10]
# =============================================================================
# Part 2 - Preparation of the neural network
# =============================================================================
# Initialize the network to one of the eigendensities of the noiseless map
r=0.99 # base parameter for average
unitaries = get_unitaries(r*pi) # noisless neural operators
Net = qn.initialize_network_eig(num_neurons=2,
neural_operators=unitaries,
eigenvector_index=2)
# Definition of the neural projectors
P01=sv.tensorProd([P0,P1])
P10=sv.tensorProd([P1,P0])
P11=sv.tensorProd([P1,P1])
# =============================================================================
# Part 3 - Implementation of the quantum stochastic map
# =============================================================================
# Map's parameters
max_it = 1010000 # maximum number of iterations
transient = 10000 # transient
seed_base=3 # seed base
step=10 # step for seed generation
coupling=0.001 # noise coupling level
points = []
P01_av = [] # quantum averages extracted for operator P01
P10_av = [] # quantum averages extracted for operator P10
P11_av = [] # quantum averages extracted for operator P11
# Iterate network extracting the quantum averages
for n in range(0,max_it):
z = qn.get_seed(seed_base,n,step)
angle=np.mod(r*2*pi+coupling*z.normal(),2*pi)/2
neural_operators = get_unitaries(angle)
NeuralMap = Net.build_quantum_neural_map(neural_operators)
Net.rho = sv.transformDensity(NeuralMap,Net.rho)
point = []
if n >= transient:
point.append(np.trace(np.dot(Net.rho,P01)).real)
point.append(np.trace(np.dot(Net.rho,P10)).real)
point.append(np.trace(np.dot(Net.rho,P11)).real)
points.append(point)
# Plot the results (including the Box Counting Dimension)
points = np.array(points)
qn.calculate_BoxCounting(sequence=points,max_bins=100,cutoff=None)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points[:,0],points[:,1],points[:,2],c='k',marker='.',s=0.0001)
ax.set_xlabel('<P01>')
ax.set_ylabel('<P10>')
ax.set_zlabel('<P11>')
ax.view_init(70)
plt.show()
| [
"svect.unit",
"numpy.matrix",
"matplotlib.pyplot.show",
"qneural.initialize_network_eig",
"matplotlib.pyplot.figure",
"svect.transformDensity",
"numpy.array",
"qneural.get_seed",
"numpy.cos",
"numpy.sin",
"svect.tensorProd",
"numpy.dot",
"qneural.calculate_BoxCounting",
"svect.proj2x2"
] | [((913, 941), 'numpy.matrix', 'np.matrix', (['[[0, -1], [1, 0]]'], {}), '([[0, -1], [1, 0]])\n', (922, 941), True, 'import numpy as np\n'), ((947, 964), 'svect.proj2x2', 'sv.proj2x2', (['(False)'], {}), '(False)\n', (957, 964), True, 'import svect as sv\n'), ((971, 987), 'svect.proj2x2', 'sv.proj2x2', (['(True)'], {}), '(True)\n', (981, 987), True, 'import svect as sv\n'), ((995, 1004), 'svect.unit', 'sv.unit', ([], {}), '()\n', (1002, 1004), True, 'import svect as sv\n'), ((1641, 1734), 'qneural.initialize_network_eig', 'qn.initialize_network_eig', ([], {'num_neurons': '(2)', 'neural_operators': 'unitaries', 'eigenvector_index': '(2)'}), '(num_neurons=2, neural_operators=unitaries,\n eigenvector_index=2)\n', (1666, 1734), True, 'import qneural as qn\n'), ((1843, 1866), 'svect.tensorProd', 'sv.tensorProd', (['[P0, P1]'], {}), '([P0, P1])\n', (1856, 1866), True, 'import svect as sv\n'), ((1871, 1894), 'svect.tensorProd', 'sv.tensorProd', (['[P1, P0]'], {}), '([P1, P0])\n', (1884, 1894), True, 'import svect as sv\n'), ((1899, 1922), 'svect.tensorProd', 'sv.tensorProd', (['[P1, P1]'], {}), '([P1, P1])\n', (1912, 1922), True, 'import svect as sv\n'), ((3205, 3221), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (3213, 3221), True, 'import numpy as np\n'), ((3225, 3293), 'qneural.calculate_BoxCounting', 'qn.calculate_BoxCounting', ([], {'sequence': 'points', 'max_bins': '(100)', 'cutoff': 'None'}), '(sequence=points, max_bins=100, cutoff=None)\n', (3249, 3293), True, 'import qneural as qn\n'), ((3301, 3313), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3311, 3313), True, 'from matplotlib import pyplot as plt\n'), ((3528, 3538), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3536, 3538), True, 'from matplotlib import pyplot as plt\n'), ((2630, 2661), 'qneural.get_seed', 'qn.get_seed', (['seed_base', 'n', 'step'], {}), '(seed_base, n, step)\n', (2641, 2661), True, 'import qneural as qn\n'), ((2837, 2876), 'svect.transformDensity', 'sv.transformDensity', (['NeuralMap', 'Net.rho'], {}), '(NeuralMap, Net.rho)\n', (2856, 2876), True, 'import svect as sv\n'), ((1100, 1124), 'svect.tensorProd', 'sv.tensorProd', (['[P0, one]'], {}), '([P0, one])\n', (1113, 1124), True, 'import svect as sv\n'), ((1124, 1152), 'svect.tensorProd', 'sv.tensorProd', (['[P1, U_angle]'], {}), '([P1, U_angle])\n', (1137, 1152), True, 'import svect as sv\n'), ((1163, 1187), 'svect.tensorProd', 'sv.tensorProd', (['[one, P0]'], {}), '([one, P0])\n', (1176, 1187), True, 'import svect as sv\n'), ((1187, 1215), 'svect.tensorProd', 'sv.tensorProd', (['[U_angle, P1]'], {}), '([U_angle, P1])\n', (1200, 1215), True, 'import svect as sv\n'), ((1049, 1066), 'numpy.cos', 'np.cos', (['(angle / 2)'], {}), '(angle / 2)\n', (1055, 1066), True, 'import numpy as np\n'), ((1071, 1088), 'numpy.sin', 'np.sin', (['(angle / 2)'], {}), '(angle / 2)\n', (1077, 1088), True, 'import numpy as np\n'), ((2947, 2967), 'numpy.dot', 'np.dot', (['Net.rho', 'P01'], {}), '(Net.rho, P01)\n', (2953, 2967), True, 'import numpy as np\n'), ((3005, 3025), 'numpy.dot', 'np.dot', (['Net.rho', 'P10'], {}), '(Net.rho, P10)\n', (3011, 3025), True, 'import numpy as np\n'), ((3063, 3083), 'numpy.dot', 'np.dot', (['Net.rho', 'P11'], {}), '(Net.rho, P11)\n', (3069, 3083), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from bci3wads.utils import data
from bci3wads.utils import constants
from bci3wads.features.epoch import Epoch
subject_match_signals = []
subject_mismatch_signals = []
for epoch_path in constants.REF_SUBJECT_PATH.glob('epoch_*.pickle'):
epoch = Epoch(data.load_pickle(epoch_path))
match_signals = epoch.get_match_signals()
mismatch_signals = epoch.get_mismatch_signals()
subject_match_signals.append(match_signals)
subject_mismatch_signals.append(mismatch_signals)
subject_match_signals = np.concatenate(subject_match_signals)
subject_mismatch_signals = np.concatenate(subject_mismatch_signals)
match_average = np.mean(subject_match_signals, axis=(0, 1))
mismatch_average = np.mean(subject_mismatch_signals, axis=(0, 1))
plt.plot(match_average, label='match')
plt.plot(mismatch_average, label='mismatch')
plt.legend()
plt.show()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.mean",
"bci3wads.utils.constants.REF_SUBJECT_PATH.glob",
"bci3wads.utils.data.load_pickle",
"numpy.concatenate"
] | [((240, 289), 'bci3wads.utils.constants.REF_SUBJECT_PATH.glob', 'constants.REF_SUBJECT_PATH.glob', (['"""epoch_*.pickle"""'], {}), "('epoch_*.pickle')\n", (271, 289), False, 'from bci3wads.utils import constants\n'), ((566, 603), 'numpy.concatenate', 'np.concatenate', (['subject_match_signals'], {}), '(subject_match_signals)\n', (580, 603), True, 'import numpy as np\n'), ((631, 671), 'numpy.concatenate', 'np.concatenate', (['subject_mismatch_signals'], {}), '(subject_mismatch_signals)\n', (645, 671), True, 'import numpy as np\n'), ((689, 732), 'numpy.mean', 'np.mean', (['subject_match_signals'], {'axis': '(0, 1)'}), '(subject_match_signals, axis=(0, 1))\n', (696, 732), True, 'import numpy as np\n'), ((752, 798), 'numpy.mean', 'np.mean', (['subject_mismatch_signals'], {'axis': '(0, 1)'}), '(subject_mismatch_signals, axis=(0, 1))\n', (759, 798), True, 'import numpy as np\n'), ((800, 838), 'matplotlib.pyplot.plot', 'plt.plot', (['match_average'], {'label': '"""match"""'}), "(match_average, label='match')\n", (808, 838), True, 'import matplotlib.pyplot as plt\n'), ((839, 883), 'matplotlib.pyplot.plot', 'plt.plot', (['mismatch_average'], {'label': '"""mismatch"""'}), "(mismatch_average, label='mismatch')\n", (847, 883), True, 'import matplotlib.pyplot as plt\n'), ((884, 896), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (894, 896), True, 'import matplotlib.pyplot as plt\n'), ((897, 907), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (905, 907), True, 'import matplotlib.pyplot as plt\n'), ((309, 337), 'bci3wads.utils.data.load_pickle', 'data.load_pickle', (['epoch_path'], {}), '(epoch_path)\n', (325, 337), False, 'from bci3wads.utils import data\n')] |
"""
Relief Visualization Toolbox – Visualization Functions
RVT Convert to 8bit esri raster function
rvt_py, rvt.vis.byte_scale
Credits:
<NAME> (<EMAIL>)
<NAME> (<EMAIL>)
<NAME>
<NAME>
<NAME>
<NAME>
<NAME>
Copyright:
2010-2020 Research Centre of the Slovenian Academy of Sciences and Arts
2016-2020 University of Ljubljana, Faculty of Civil and Geodetic Engineering
"""
import numpy as np
import rvt.vis
import gc
class RVTto8Bit:
def __init__(self):
self.name = "RVT Convert to 8bit"
self.description = "Convert image values 0-255 (Byte scale)."
def getParameterInfo(self):
return [
{
'name': 'raster',
'dataType': 'raster',
'value': None,
'required': True,
'displayName': "Input Raster",
'description': "Input raster to convert to 8bit."
}
]
def getConfiguration(self, **scalars):
self.prepare() # nothing to prepare
return {
'compositeRasters': False,
'inheritProperties': 4,
'invalidateProperties': 2 | 4 | 8,
'inputMask': False,
'resampling': False,
'padding': 0,
'resamplingType': 1
}
def updateRasterInfo(self, **kwargs):
r = kwargs['raster_info']
if int(r['bandCount']) == 3:
kwargs['output_info']['bandCount'] = 3
else:
kwargs['output_info']['bandCount'] = 1
kwargs['output_info']['noData'] = np.nan
kwargs['output_info']['pixelType'] = 'u1'
kwargs['output_info']['histogram'] = ()
kwargs['output_info']['statistics'] = ()
return kwargs
def updatePixels(self, tlc, shape, props, **pixelBlocks):
dem = np.array(pixelBlocks['raster_pixels'], dtype='f4', copy=False) # Input pixel array.
if dem.shape[0] == 1:
dem = dem[0]
pixel_size = props['cellSize']
if (pixel_size[0] <= 0) | (pixel_size[1] <= 0):
raise Exception("Input raster cell size is invalid.")
bytescl_raster = rvt.vis.byte_scale(data=dem)
pixelBlocks['output_pixels'] = bytescl_raster.astype(props['pixelType'], copy=False)
# release memory
del dem
del pixel_size
del bytescl_raster
gc.collect()
return pixelBlocks
def updateKeyMetadata(self, names, bandIndex, **keyMetadata):
if bandIndex == -1:
keyMetadata['datatype'] = 'Processed'
keyMetadata['productname'] = '<KEY>'
return keyMetadata
def prepare(self):
pass
| [
"gc.collect",
"numpy.array"
] | [((1832, 1894), 'numpy.array', 'np.array', (["pixelBlocks['raster_pixels']"], {'dtype': '"""f4"""', 'copy': '(False)'}), "(pixelBlocks['raster_pixels'], dtype='f4', copy=False)\n", (1840, 1894), True, 'import numpy as np\n'), ((2382, 2394), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2392, 2394), False, 'import gc\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
使用 LDA 算法重新实现一遍。
"""
import matplotlib.pyplot as plt
import numpy as np
import data_processing as dp
import model_selection as ms
from linear import LDA
if __name__ == '__main__':
data = np.loadtxt('ex2data2.txt', delimiter=',')
X = data[:, :2]
y = data[:, 2:3]
pos1 = (y == 0).ravel()
pos2 = (y == 1).ravel()
i1, = plt.plot(X[pos2, 0], X[pos2, 1], 'k+', linewidth=2, markersize=7)
i2, = plt.plot(X[pos1, 0], X[pos1, 1], 'yo', markersize=7)
plt.legend([i1, i2], ['y = 1', 'y = 0'], loc='upper right')
plt.xlabel('Microchip Test 1')
plt.ylabel('Microchip Test 2')
plt.show()
'Part 1: LDA classification'
X = dp.map_features(X, degrees=6)
print('映射后前5行:\n', X[:5, :])
ld = LDA()
ld.train(X, y)
p = ld.predict(X)
print('\nTrain Accuracy: %f' % (ms.accuracy(p, y) * 100))
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"data_processing.map_features",
"linear.LDA",
"model_selection.accuracy",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((247, 288), 'numpy.loadtxt', 'np.loadtxt', (['"""ex2data2.txt"""'], {'delimiter': '""","""'}), "('ex2data2.txt', delimiter=',')\n", (257, 288), True, 'import numpy as np\n'), ((397, 462), 'matplotlib.pyplot.plot', 'plt.plot', (['X[pos2, 0]', 'X[pos2, 1]', '"""k+"""'], {'linewidth': '(2)', 'markersize': '(7)'}), "(X[pos2, 0], X[pos2, 1], 'k+', linewidth=2, markersize=7)\n", (405, 462), True, 'import matplotlib.pyplot as plt\n'), ((473, 525), 'matplotlib.pyplot.plot', 'plt.plot', (['X[pos1, 0]', 'X[pos1, 1]', '"""yo"""'], {'markersize': '(7)'}), "(X[pos1, 0], X[pos1, 1], 'yo', markersize=7)\n", (481, 525), True, 'import matplotlib.pyplot as plt\n'), ((530, 589), 'matplotlib.pyplot.legend', 'plt.legend', (['[i1, i2]', "['y = 1', 'y = 0']"], {'loc': '"""upper right"""'}), "([i1, i2], ['y = 1', 'y = 0'], loc='upper right')\n", (540, 589), True, 'import matplotlib.pyplot as plt\n'), ((594, 624), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Microchip Test 1"""'], {}), "('Microchip Test 1')\n", (604, 624), True, 'import matplotlib.pyplot as plt\n'), ((629, 659), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Microchip Test 2"""'], {}), "('Microchip Test 2')\n", (639, 659), True, 'import matplotlib.pyplot as plt\n'), ((664, 674), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (672, 674), True, 'import matplotlib.pyplot as plt\n'), ((717, 746), 'data_processing.map_features', 'dp.map_features', (['X'], {'degrees': '(6)'}), '(X, degrees=6)\n', (732, 746), True, 'import data_processing as dp\n'), ((790, 795), 'linear.LDA', 'LDA', ([], {}), '()\n', (793, 795), False, 'from linear import LDA\n'), ((873, 890), 'model_selection.accuracy', 'ms.accuracy', (['p', 'y'], {}), '(p, y)\n', (884, 890), True, 'import model_selection as ms\n')] |
"""
Example of running PyTorch implementation of DDPG on HalfCheetah.
"""
import sys
sys.path.append('../')
import copy
from rlkit.torch.ddpg.dsfpg import DSFPGTrainer
from gym.envs.mujoco import HopperEnv
from rlkit.data_management.env_replay_buffer import EnvReplayBuffer
from rlkit.envs.wrappers import NormalizedBoxEnv
from rlkit.exploration_strategies.base import (
PolicyWrappedWithExplorationStrategy
)
from rlkit.exploration_strategies.ou_strategy import OUStrategy
from rlkit.launchers.launcher_util import setup_logger
from rlkit.samplers.data_collector import MdpPathCollector
from rlkit.torch.networks import ConcatMlp, TanhMlpPolicy
from rlkit.torch.networks import LinearMlp
from rlkit.torch.ddpg.ddpg import DDPGTrainer
import rlkit.torch.pytorch_util as ptu
from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm
import h5py
import d4rl, gym
import numpy as np
def load_hdf5(dataset, replay_buffer):
replay_buffer._observations = dataset['observations']
replay_buffer._next_obs = dataset['next_observations']
replay_buffer._actions = dataset['actions']
replay_buffer._rewards = np.expand_dims(np.squeeze(dataset['rewards']), 1)
replay_buffer._terminals = np.expand_dims(np.squeeze(dataset['terminals']), 1)
replay_buffer._size = dataset['terminals'].shape[0]
print ('Number of terminals on: ', replay_buffer._terminals.sum())
replay_buffer._top = replay_buffer._size
def experiment(variant):
eval_env = gym.make(variant['env_name'])
expl_env = eval_env
obs_dim = eval_env.observation_space.low.size
action_dim = eval_env.action_space.low.size
sf = ConcatMlp(
input_size=obs_dim + action_dim,
output_size=obs_dim,
**variant['sf_kwargs']
)
# qf can be either linear or non-linear function
qf = LinearMlp(
input_size=obs_dim,
output_size=1,
)
policy = TanhMlpPolicy(
input_size=obs_dim,
output_size=action_dim,
**variant['policy_kwargs']
)
target_sf = copy.deepcopy(sf)
target_qf = copy.deepcopy(qf)
target_policy = copy.deepcopy(policy)
eval_path_collector = MdpPathCollector(eval_env, policy)
exploration_policy = PolicyWrappedWithExplorationStrategy(
exploration_strategy=OUStrategy(action_space=expl_env.action_space),
policy=policy,
)
expl_path_collector = MdpPathCollector(expl_env, exploration_policy)
# replay_buffer = EnvReplayBuffer(variant['replay_buffer_size'], expl_env)
buffer_filename = None
if variant['buffer_filename'] is not None:
buffer_filename = variant['buffer_filename']
replay_buffer = EnvReplayBuffer(
variant['replay_buffer_size'],
expl_env,
)
if variant['load_buffer'] and buffer_filename is not None:
replay_buffer.load_buffer(buffer_filename)
else:
load_hdf5(d4rl.qlearning_dataset(eval_env), replay_buffer)
# need to change here
trainer = DSFPGTrainer(
sf=sf,
target_sf=target_sf,
qf=qf,
target_qf=target_qf,
policy=policy,
target_policy=target_policy,
**variant['trainer_kwargs']
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
offline=variant['load_buffer'],
**variant['algorithm_kwargs']
)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
# noinspection PyTypeChecker
variant = dict(
algorithm='OSFPG',
version='normal',
env_name='hopper-random-v0',
load_buffer=True, # True value makes the agent trained under offline setting
buffer_filename=None,
algorithm_kwargs=dict(
num_epochs=1000,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=10000,
max_path_length=1000,
batch_size=256,
),
trainer_kwargs=dict(
use_soft_update=True,
tau=5e-3,
discount=0.99,
qf_learning_rate=1e-4,
sf_learning_rate=2e-4,
policy_learning_rate=1e-4,
),
sf_kwargs=dict(
hidden_sizes=[256, 256],
),
policy_kwargs=dict(
hidden_sizes=[256, 256],
),
replay_buffer_size=int(1E6),
)
ptu.set_gpu_mode(True) # optionally set the GPU (default=False)
setup_logger('Test OSFPG offline in hopper env ---', variant=variant)
experiment(variant)
| [
"sys.path.append",
"rlkit.torch.networks.LinearMlp",
"copy.deepcopy",
"rlkit.torch.torch_rl_algorithm.TorchBatchRLAlgorithm",
"rlkit.torch.networks.TanhMlpPolicy",
"gym.make",
"rlkit.data_management.env_replay_buffer.EnvReplayBuffer",
"rlkit.torch.ddpg.dsfpg.DSFPGTrainer",
"rlkit.torch.networks.Conc... | [((85, 107), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (100, 107), False, 'import sys\n'), ((1477, 1506), 'gym.make', 'gym.make', (["variant['env_name']"], {}), "(variant['env_name'])\n", (1485, 1506), False, 'import d4rl, gym\n'), ((1640, 1732), 'rlkit.torch.networks.ConcatMlp', 'ConcatMlp', ([], {'input_size': '(obs_dim + action_dim)', 'output_size': 'obs_dim'}), "(input_size=obs_dim + action_dim, output_size=obs_dim, **variant[\n 'sf_kwargs'])\n", (1649, 1732), False, 'from rlkit.torch.networks import ConcatMlp, TanhMlpPolicy\n'), ((1820, 1864), 'rlkit.torch.networks.LinearMlp', 'LinearMlp', ([], {'input_size': 'obs_dim', 'output_size': '(1)'}), '(input_size=obs_dim, output_size=1)\n', (1829, 1864), False, 'from rlkit.torch.networks import LinearMlp\n'), ((1901, 1991), 'rlkit.torch.networks.TanhMlpPolicy', 'TanhMlpPolicy', ([], {'input_size': 'obs_dim', 'output_size': 'action_dim'}), "(input_size=obs_dim, output_size=action_dim, **variant[\n 'policy_kwargs'])\n", (1914, 1991), False, 'from rlkit.torch.networks import ConcatMlp, TanhMlpPolicy\n'), ((2033, 2050), 'copy.deepcopy', 'copy.deepcopy', (['sf'], {}), '(sf)\n', (2046, 2050), False, 'import copy\n'), ((2067, 2084), 'copy.deepcopy', 'copy.deepcopy', (['qf'], {}), '(qf)\n', (2080, 2084), False, 'import copy\n'), ((2105, 2126), 'copy.deepcopy', 'copy.deepcopy', (['policy'], {}), '(policy)\n', (2118, 2126), False, 'import copy\n'), ((2153, 2187), 'rlkit.samplers.data_collector.MdpPathCollector', 'MdpPathCollector', (['eval_env', 'policy'], {}), '(eval_env, policy)\n', (2169, 2187), False, 'from rlkit.samplers.data_collector import MdpPathCollector\n'), ((2383, 2429), 'rlkit.samplers.data_collector.MdpPathCollector', 'MdpPathCollector', (['expl_env', 'exploration_policy'], {}), '(expl_env, exploration_policy)\n', (2399, 2429), False, 'from rlkit.samplers.data_collector import MdpPathCollector\n'), ((2662, 2718), 'rlkit.data_management.env_replay_buffer.EnvReplayBuffer', 'EnvReplayBuffer', (["variant['replay_buffer_size']", 'expl_env'], {}), "(variant['replay_buffer_size'], expl_env)\n", (2677, 2718), False, 'from rlkit.data_management.env_replay_buffer import EnvReplayBuffer\n'), ((2975, 3121), 'rlkit.torch.ddpg.dsfpg.DSFPGTrainer', 'DSFPGTrainer', ([], {'sf': 'sf', 'target_sf': 'target_sf', 'qf': 'qf', 'target_qf': 'target_qf', 'policy': 'policy', 'target_policy': 'target_policy'}), "(sf=sf, target_sf=target_sf, qf=qf, target_qf=target_qf, policy\n =policy, target_policy=target_policy, **variant['trainer_kwargs'])\n", (2987, 3121), False, 'from rlkit.torch.ddpg.dsfpg import DSFPGTrainer\n'), ((3195, 3489), 'rlkit.torch.torch_rl_algorithm.TorchBatchRLAlgorithm', 'TorchBatchRLAlgorithm', ([], {'trainer': 'trainer', 'exploration_env': 'expl_env', 'evaluation_env': 'eval_env', 'exploration_data_collector': 'expl_path_collector', 'evaluation_data_collector': 'eval_path_collector', 'replay_buffer': 'replay_buffer', 'offline': "variant['load_buffer']"}), "(trainer=trainer, exploration_env=expl_env,\n evaluation_env=eval_env, exploration_data_collector=expl_path_collector,\n evaluation_data_collector=eval_path_collector, replay_buffer=\n replay_buffer, offline=variant['load_buffer'], **variant[\n 'algorithm_kwargs'])\n", (3216, 3489), False, 'from rlkit.torch.torch_rl_algorithm import TorchBatchRLAlgorithm\n'), ((4627, 4649), 'rlkit.torch.pytorch_util.set_gpu_mode', 'ptu.set_gpu_mode', (['(True)'], {}), '(True)\n', (4643, 4649), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((4696, 4765), 'rlkit.launchers.launcher_util.setup_logger', 'setup_logger', (['"""Test OSFPG offline in hopper env ---"""'], {'variant': 'variant'}), "('Test OSFPG offline in hopper env ---', variant=variant)\n", (4708, 4765), False, 'from rlkit.launchers.launcher_util import setup_logger\n'), ((1143, 1173), 'numpy.squeeze', 'np.squeeze', (["dataset['rewards']"], {}), "(dataset['rewards'])\n", (1153, 1173), True, 'import numpy as np\n'), ((1224, 1256), 'numpy.squeeze', 'np.squeeze', (["dataset['terminals']"], {}), "(dataset['terminals'])\n", (1234, 1256), True, 'import numpy as np\n'), ((2280, 2326), 'rlkit.exploration_strategies.ou_strategy.OUStrategy', 'OUStrategy', ([], {'action_space': 'expl_env.action_space'}), '(action_space=expl_env.action_space)\n', (2290, 2326), False, 'from rlkit.exploration_strategies.ou_strategy import OUStrategy\n'), ((2884, 2916), 'd4rl.qlearning_dataset', 'd4rl.qlearning_dataset', (['eval_env'], {}), '(eval_env)\n', (2906, 2916), False, 'import d4rl, gym\n')] |
import numpy as np
#program that displays 1000 entries of sin(x) vs x.
#also adds the same but for cos(x) as an extra and prep for propt 6.
def main():
x = np.linspace(0.0,2.0*np.pi,1000)
print(f"sin(x) cos(x)")
for i in range(1000):
print(f"{np.sin(x[i]):10.5e} {np.cos(x[i]):10.5e}")
if __name__ == '__main__':
main() | [
"numpy.sin",
"numpy.cos",
"numpy.linspace"
] | [((159, 194), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2.0 * np.pi)', '(1000)'], {}), '(0.0, 2.0 * np.pi, 1000)\n', (170, 194), True, 'import numpy as np\n'), ((260, 272), 'numpy.sin', 'np.sin', (['x[i]'], {}), '(x[i])\n', (266, 272), True, 'import numpy as np\n'), ((285, 297), 'numpy.cos', 'np.cos', (['x[i]'], {}), '(x[i])\n', (291, 297), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import print_function
import ceiltrack
import numpy as np
import struct
def rle(mask):
# assume run starts with zeros, output <skip length> <run length>
out = []
zero = True
run = 0
for i in range(len(mask)):
if zero:
if mask[i]:
out.append(run)
run = 1
zero = False
else:
run += 1
else:
if mask[i]:
run += 1
else:
out.append(run)
run = 1
zero = True
return np.array(out)
def main():
ceilmask, pts = ceiltrack.ceillut()
fname = "ceillut.bin"
f = open(fname, "wb")
# header:
# - uint16 image height
# - uint16 image width
# - uint32 number of pixels in mask (N)
# - uint32 rle-compressed mask size in bytes (M)
# followed by
# [uint16 skip, uint16 run] x (M/4)
# [fp16 u, fp16 v] x N
rlemask = rle(ceilmask.reshape(-1)).astype(np.uint16)
h, w = ceilmask.shape
hlen = 2 + 2 + 4 + 4
f.write(struct.pack("=4sIHHII", b'cmLU', hlen, h, w,
np.sum(ceilmask), len(rlemask)))
f.write(rlemask.tobytes())
f.write(pts.T.astype(np.float16).tobytes())
f.close()
print("wrote", fname, 'mask', len(rlemask)*2, 'bytes; pts x', pts.T.shape, '=',
pts.shape[1]*4, 'bytes', np.sum(ceilmask))
if __name__ == '__main__':
main()
| [
"ceiltrack.ceillut",
"numpy.array",
"numpy.sum"
] | [((617, 630), 'numpy.array', 'np.array', (['out'], {}), '(out)\n', (625, 630), True, 'import numpy as np\n'), ((665, 684), 'ceiltrack.ceillut', 'ceiltrack.ceillut', ([], {}), '()\n', (682, 684), False, 'import ceiltrack\n'), ((1428, 1444), 'numpy.sum', 'np.sum', (['ceilmask'], {}), '(ceilmask)\n', (1434, 1444), True, 'import numpy as np\n'), ((1183, 1199), 'numpy.sum', 'np.sum', (['ceilmask'], {}), '(ceilmask)\n', (1189, 1199), True, 'import numpy as np\n')] |
# Copyright (c) 2020, <NAME>
# See LICENSE file for details: <https://github.com/moble/scri/blob/master/LICENSE>
### NOTE: The functions in this file are intended purely for inclusion in the AsymptoticBondData
### class. In particular, they assume that the first argument, `self` is an instance of
### AsymptoticBondData. They should probably not be used outside of that class.
import numpy as np
from math import sqrt, pi
def mass_aspect(self, truncate_ell=max):
"""Compute the Bondi mass aspect of the AsymptoticBondiData.
The Bondi mass aspect is given by
M = -ℜ{ψ₂ + σ ∂ₜσ̄}
Note that the last term is a product between two fields. If, for example, these both have
ell_max=8, then their full product would have ell_max=16, meaning that we would go from
tracking 81 modes to 289. This shows that deciding how to truncate the output ell is
important, which is why this function has the extra argument that it does.
Parameters
==========
truncate_ell: int, or callable [defaults to `max`]
Determines how the ell_max value of the output is determined. If an integer is passed,
each term in the output is truncated to have at most that ell_max. (In particular,
terms that will not be used in the output are simply not computed, without incurring any
errors due to aliasing.) If a callable is passed, it is passed on to the
spherical_functions.Modes.multiply method. See that function's docstring for details.
The default behavior will result in the output having ell_max equal to the largest of
any of the individual Modes objects in the equation for M above -- but not the
product.
"""
if callable(truncate_ell):
return -(self.psi2 + self.sigma.multiply(self.sigma.bar.dot, truncator=truncate_ell)).real
elif truncate_ell:
return -(
self.psi2.truncate_ell(truncate_ell)
+ self.sigma.multiply(self.sigma.bar.dot, truncator=lambda tup: truncate_ell)
).real
else:
return -(self.psi2 + self.sigma * self.sigma.bar.dot).real
def charge_vector_from_aspect(charge):
"""Output the ell<=1 modes of a BMS charge aspect as the charge four-vector.
Considering the aspect as a function a(θ, ϕ), we express the corresponding
four-vector in terms of Cartesian components as
vᵝ = (1/4π) ∫ ℜ{a} (tᵝ + rᵝ) dΩ
where the integral is taken over the 2-sphere, and tᵝ + rᵝ has components
(1, sinθ cosϕ, sinθ sinϕ, cosθ).
"""
four_vector = np.empty(charge.shape, dtype=float)
four_vector[..., 0] = charge[..., 0].real
four_vector[..., 1] = (charge[..., 1] - charge[..., 3]).real / sqrt(6)
four_vector[..., 2] = (charge[..., 1] + charge[..., 3]).imag / sqrt(6)
four_vector[..., 3] = charge[..., 2].real / sqrt(3)
return four_vector / np.sqrt(4 * np.pi)
def bondi_rest_mass(self):
"""Compute the rest mass from the Bondi four-momentum"""
four_momentum = self.bondi_four_momentum()
rest_mass = np.sqrt(four_momentum[:, 0] ** 2 - np.sum(four_momentum[:, 1:] ** 2, axis=1))
return rest_mass
def bondi_four_momentum(self):
"""Compute the Bondi four-momentum
This is just the ell<2 component of the mass aspect, expressed as a four-vector.
"""
ell_max = 1 # Compute only the parts we need, ell<=1
charge_aspect = self.mass_aspect(ell_max).view(np.ndarray)
return charge_vector_from_aspect(charge_aspect)
def bondi_angular_momentum(self, output_dimensionless=False):
"""Compute the total Bondi angular momentum vector
i (ψ₁ + σ ðσ̄)
See Eq. (8) of Dray (1985) iopscience.iop.org/article/10.1088/0264-9381/2/1/002
"""
ell_max = 1 # Compute only the parts we need, ell<=1
charge_aspect = (
1j
* (self.psi1.truncate_ell(ell_max) + self.sigma.multiply(self.sigma.bar.eth_GHP, truncator=lambda tup: ell_max))
).view(np.ndarray)
return charge_vector_from_aspect(charge_aspect)[:, 1:]
def bondi_dimensionless_spin(self):
"""Compute the dimensionless Bondi spin vector"""
N = self.bondi_boost_charge()
J = self.bondi_angular_momentum()
P = self.bondi_four_momentum()
M_sqr = (P[:, 0] ** 2 - np.sum(P[:, 1:] ** 2, axis=1))[:, np.newaxis]
v = P[:, 1:] / (P[:, 0])[:, np.newaxis]
v_norm = np.linalg.norm(v, axis=1)
# To prevent dividing by zero, we compute the normalized velocity vhat ONLY at
# timesteps with a non-zero velocity.
vhat = v.copy()
t_idx = v_norm != 0 # Get the indices for timesteps with non-zero velocity
vhat[t_idx] = v[t_idx] / v_norm[t_idx, np.newaxis]
gamma = (1 / np.sqrt(1 - v_norm ** 2))[:, np.newaxis]
J_dot_vhat = np.einsum("ij,ij->i", J, vhat)[:, np.newaxis]
spin_charge = (gamma * (J + np.cross(v, N)) - (gamma - 1) * J_dot_vhat * vhat) / M_sqr
return spin_charge
def bondi_boost_charge(self):
"""Compute the Bondi boost charge vector
- [ψ₁ + σ ðσ̄ + ½ð(σ σ̄) - t ð ℜ{ψ₂ + σ ∂ₜσ̄}]
See Eq. (8) of Dray (1985) iopscience.iop.org/article/10.1088/0264-9381/2/1/002
"""
ell_max = 1 # Compute only the parts we need, ell<=1
charge_aspect = -(
self.psi1.truncate_ell(ell_max)
+ self.sigma.multiply(self.sigma.bar.eth_GHP, truncator=lambda tup: ell_max)
+ 0.5 * (self.sigma.multiply(self.sigma.bar, truncator=lambda tup: ell_max)).eth_GHP
- self.t
* (
self.psi2.truncate_ell(ell_max) + self.sigma.multiply(self.sigma.bar.dot, truncator=lambda tup: ell_max)
).real.eth_GHP
).view(np.ndarray)
return charge_vector_from_aspect(charge_aspect)[:, 1:]
def bondi_CoM_charge(self):
"""Compute the center-of-mass charge vector
Gⁱ = Nⁱ + t Pⁱ = - [ψ₁ + σ ðσ̄ + ½ð(σ σ̄)]
where Nⁱ is the boost charge and Pⁱ is the momentum. See Eq. (3.4) of arXiv:1912.03164.
"""
ell_max = 1 # Compute only the parts we need, ell<=1
charge_aspect = -(
self.psi1.truncate_ell(ell_max)
+ self.sigma.multiply(self.sigma.bar.eth_GHP, truncator=lambda tup: ell_max)
+ 0.5 * (self.sigma.multiply(self.sigma.bar, truncator=lambda tup: ell_max)).eth_GHP
).view(np.ndarray)
return charge_vector_from_aspect(charge_aspect)[:, 1:]
def supermomentum(self, supermomentum_def, **kwargs):
"""Compute the supermomentum
This function allows for several different definitions of the
supermomentum. These differences only apply to ell > 1 modes,
so they do not affect the Bondi four-momentum. See
Eqs. (7-9) in arXiv:1404.2475 for the different supermomentum
definitions and links to further references.
In the literature, there is an ambiguity of vocabulary. When
it comes to other BMS charges, we clearly distinuish between
the "charge" and the "aspect". However, the
term "supermomentum" is used for both. Accordingly, this
function provides two ways to compute the supermomentum.
1) By default, the supermomentum will be computed as
Ψ = ψ₂ + σ ∂ₜσ̄ + f(θ, ϕ)
(See below for the definitions of `f`.)
2) By passing the option `integrated=True`, the supermomentum
will instead be computed as
Pₗₘ = - (1/4π) ∫ Ψ(θ, ϕ) Yₗₘ(θ, ϕ) dΩ
Parameters
----------
supermomentum_def : str
The definition of the supermomentum to be computed. One of the
following (case-insensitive) options can be specified:
* 'Bondi-Sachs' or 'BS' for f = 0
* 'Moreschi' or 'M' for f = ð²σ̄
* 'Geroch' or 'G' for f = ½ (ð²σ̄ - ð̄²σ)
* 'Geroch-Winicour' or 'GW' for f = - ð̄²σ
integrated : bool, optional
If True, then return the integrated form of the supermomentum — see
Eq. (6) in arXiv:1404.2475. Default is False
working_ell_max: int, optional
The value of ell_max to be used to define the computation grid. The
number of theta points and the number of phi points are set to
2*working_ell_max+1. Defaults to 2*self.ell_max.
Returns
-------
ModesTimeSeries
"""
return_integrated = kwargs.pop("integrated", False)
if supermomentum_def.lower() in ["bondi-sachs", "bs"]:
supermomentum = self.psi2 + self.sigma.grid_multiply(self.sigma.bar.dot, **kwargs)
elif supermomentum_def.lower() in ["moreschi", "m"]:
supermomentum = self.psi2 + self.sigma.grid_multiply(self.sigma.bar.dot, **kwargs) + self.sigma.bar.eth_GHP.eth_GHP
elif supermomentum_def.lower() in ["geroch", "g"]:
supermomentum = (
self.psi2
+ self.sigma.grid_multiply(self.sigma.bar.dot, **kwargs)
+ 0.5 * (self.sigma.bar.eth_GHP.eth_GHP - self.sigma.ethbar_GHP.ethbar_GHP)
)
elif supermomentum_def.lower() in ["geroch-winicour", "gw"]:
supermomentum = self.psi2 + self.sigma.grid_multiply(self.sigma.bar.dot, **kwargs) - self.sigma.ethbar_GHP.ethbar_GHP
else:
raise ValueError(
f"Supermomentum defintion '{supermomentum_def}' not recognized. Please choose one of "
"the following options:\n"
" * 'Bondi-Sachs' or 'BS'\n"
" * 'Moreschi' or 'M'\n"
" * 'Geroch' or 'G'\n"
" * 'Geroch-Winicour' or 'GW'"
)
if return_integrated:
return -0.5 * supermomentum.bar / np.sqrt(np.pi)
else:
return supermomentum
| [
"numpy.sum",
"math.sqrt",
"numpy.empty",
"numpy.einsum",
"numpy.cross",
"numpy.linalg.norm",
"numpy.sqrt"
] | [((2553, 2588), 'numpy.empty', 'np.empty', (['charge.shape'], {'dtype': 'float'}), '(charge.shape, dtype=float)\n', (2561, 2588), True, 'import numpy as np\n'), ((4338, 4363), 'numpy.linalg.norm', 'np.linalg.norm', (['v'], {'axis': '(1)'}), '(v, axis=1)\n', (4352, 4363), True, 'import numpy as np\n'), ((2702, 2709), 'math.sqrt', 'sqrt', (['(6)'], {}), '(6)\n', (2706, 2709), False, 'from math import sqrt, pi\n'), ((2777, 2784), 'math.sqrt', 'sqrt', (['(6)'], {}), '(6)\n', (2781, 2784), False, 'from math import sqrt, pi\n'), ((2833, 2840), 'math.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (2837, 2840), False, 'from math import sqrt, pi\n'), ((2866, 2884), 'numpy.sqrt', 'np.sqrt', (['(4 * np.pi)'], {}), '(4 * np.pi)\n', (2873, 2884), True, 'import numpy as np\n'), ((4719, 4749), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'J', 'vhat'], {}), "('ij,ij->i', J, vhat)\n", (4728, 4749), True, 'import numpy as np\n'), ((3073, 3114), 'numpy.sum', 'np.sum', (['(four_momentum[:, 1:] ** 2)'], {'axis': '(1)'}), '(four_momentum[:, 1:] ** 2, axis=1)\n', (3079, 3114), True, 'import numpy as np\n'), ((4235, 4264), 'numpy.sum', 'np.sum', (['(P[:, 1:] ** 2)'], {'axis': '(1)'}), '(P[:, 1:] ** 2, axis=1)\n', (4241, 4264), True, 'import numpy as np\n'), ((4661, 4685), 'numpy.sqrt', 'np.sqrt', (['(1 - v_norm ** 2)'], {}), '(1 - v_norm ** 2)\n', (4668, 4685), True, 'import numpy as np\n'), ((9360, 9374), 'numpy.sqrt', 'np.sqrt', (['np.pi'], {}), '(np.pi)\n', (9367, 9374), True, 'import numpy as np\n'), ((4797, 4811), 'numpy.cross', 'np.cross', (['v', 'N'], {}), '(v, N)\n', (4805, 4811), True, 'import numpy as np\n')] |
import glob
import os
import cv2
import numpy as np
from random import shuffle
# data
def one_hot(label_array, num_classes):
return np.squeeze(np.eye(num_classes)[label_array.reshape(-1)])
def read_and_resize_image(dir_of_images, input_shape):
nrows, ncolumns, _ = input_shape
X = [] # list of images
for path in dir_of_images:
X.append(cv2.resize(cv2.imread(path, cv2.IMREAD_COLOR), (nrows, ncolumns), interpolation=cv2.INTER_CUBIC))
return X
def prepare_data(input_shape, args):
label_index = {args.CLASSES[i]: i for i in range(args.CLASSES_NO)}
labels_index = {args.CLASSES[i] + '_{}'.format(i): i for i in range(args.CLASSES_NO)} # with suffix for convenience
print('label_index:\n', labels_index)
def read_data(partition_name, args):
if args.cropped_faces:
# faces
data_path = os.path.join(args.data_path, 'cropped')
else:
data_path = os.path.join(args.data_path, 'env')
# evn
data_files = []
for class_ in args.CLASSES:
data_files += glob.glob(os.path.join(data_path, partition_name, class_, '*.png'),
recursive=True) + glob.glob(
os.path.join(data_path, partition_name, class_, '*.PNG'), recursive=True)
# Fiter out wrong classes
shuffle(data_files)
num_data_files = len(data_files)
print('num partition_name:\n', partition_name, data_files[:3])
data_labels = []
for file in data_files:
label = file.split('/')[-2] # adjust according to form of dir
data_labels.append(label_index[label])
print('\ndata_labels:\n', data_labels[:3])
assert num_data_files == len(data_labels)
data_labels = np.array(data_labels)
y = one_hot(data_labels, args.CLASSES_NO)
print('\nencoded_data_labels:\n', y[:3])
if args.TEST:
X = read_and_resize_image(data_files[:200], input_shape)
else:
X = read_and_resize_image(data_files, input_shape)
X = [img[:, :, [2, 1, 0]] for img in X] # BGR --> RGB
X = np.array(X) / 255 # range(0,255) --> (0,1)
if args.TEST:
y = np.array(y[:200])
else:
y = np.array(y)
return X, y
X_train, y_train = read_data('train', args)
X_val, y_val = read_data('val', args)
print("Shape of train images is:", X_train.shape)
print("Shape of validation images is:", X_val.shape)
print("Shape of labels is:", y_train.shape)
print("Shape of labels is:", y_val.shape)
return X_train, X_val, y_train, y_val, labels_index
def prediction_classes():
classes = [str(s) for s in [1, 2, 3, 4, 5, 6, 7, 8, 9]]
CLASS_NO = len(classes)
print('%s of classes are going to be predicted' % CLASS_NO)
return classes, CLASS_NO
| [
"random.shuffle",
"cv2.imread",
"numpy.array",
"numpy.eye",
"os.path.join"
] | [((1354, 1373), 'random.shuffle', 'shuffle', (['data_files'], {}), '(data_files)\n', (1361, 1373), False, 'from random import shuffle\n'), ((1795, 1816), 'numpy.array', 'np.array', (['data_labels'], {}), '(data_labels)\n', (1803, 1816), True, 'import numpy as np\n'), ((149, 168), 'numpy.eye', 'np.eye', (['num_classes'], {}), '(num_classes)\n', (155, 168), True, 'import numpy as np\n'), ((869, 908), 'os.path.join', 'os.path.join', (['args.data_path', '"""cropped"""'], {}), "(args.data_path, 'cropped')\n", (881, 908), False, 'import os\n'), ((947, 982), 'os.path.join', 'os.path.join', (['args.data_path', '"""env"""'], {}), "(args.data_path, 'env')\n", (959, 982), False, 'import os\n'), ((2161, 2172), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2169, 2172), True, 'import numpy as np\n'), ((2243, 2260), 'numpy.array', 'np.array', (['y[:200]'], {}), '(y[:200])\n', (2251, 2260), True, 'import numpy as np\n'), ((2291, 2302), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2299, 2302), True, 'import numpy as np\n'), ((378, 412), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_COLOR'], {}), '(path, cv2.IMREAD_COLOR)\n', (388, 412), False, 'import cv2\n'), ((1098, 1154), 'os.path.join', 'os.path.join', (['data_path', 'partition_name', 'class_', '"""*.png"""'], {}), "(data_path, partition_name, class_, '*.png')\n", (1110, 1154), False, 'import os\n'), ((1237, 1293), 'os.path.join', 'os.path.join', (['data_path', 'partition_name', 'class_', '"""*.PNG"""'], {}), "(data_path, partition_name, class_, '*.PNG')\n", (1249, 1293), False, 'import os\n')] |
import numpy as np
import pandas as pd
import os
import anndata as ad
# remove runtime warning (divided by zero)
np.seterr(divide='ignore', invalid='ignore')
class GeneExp:
"""
A class used to creat gene expression anndata along data trait including both genes and samples information.
:param species: species of the data you use i.e mouse, human
:type species: str
:param level: which type of data you use including gene, transcript (default: gene)
:type level: str
:param anndata: if the expression data is in anndata format you should pass it through this parameter. X should be expression matrix. var is a sample information and obs is a gene information.
:param anndata: anndata
:param geneExpr: expression matrix which genes are in the rows and samples are columns
:type geneExpr: pandas dataframe
:param geneExpPath: path of expression matrix
:type geneExpPath: str
:param sep: separation symbol to use for reading data in geneExpPath properly
:type sep: str
"""
def __init__(self, species=None, level='gene',
anndata=None, geneExp=None,
geneExpPath=None, sep=','):
self.species = species
self.level = level
if geneExpPath is not None:
if not os.path.isfile(geneExpPath):
raise ValueError("file does not exist!")
else:
expressionList = pd.read_csv(geneExpPath, sep=sep)
elif geneExp is not None:
if isinstance(geneExp, pd.DataFrame):
expressionList = geneExp
else:
raise ValueError("geneExp is not data frame!")
elif anndata is not None:
if isinstance(anndata, ad.AnnData):
self.geneExpr = anndata
return
else:
raise ValueError("geneExp is not data frame!")
else:
raise ValueError("all type of input can not be empty at the same time!")
column = 'id'
if level == 'gene':
column = 'gene_id'
elif level == 'transcript':
column = 'transcript_id'
geneInfo = pd.DataFrame(expressionList.columns[1:], columns=[column],
index=expressionList.columns[1:])
sampleInfo = pd.DataFrame(range(expressionList.shape[0]), columns=['sample_id'],
index=expressionList.values[:, 0])
expressionList.index = expressionList.iloc[:, 0] # sample_id
# drop sample id columns
expressionList = expressionList.drop([expressionList.columns[0]], axis=1)
self.geneExpr = ad.AnnData(X=expressionList, obs=sampleInfo, var=geneInfo)
@staticmethod
def updateGeneInfo(expr, geneInfo=None, path=None, sep=' ', order=True, level='gene'):
"""
add/update genes info in expr anndata
:param expr: expression data
:type expr: anndata
:param geneInfo: gene information table you want to add to your data
:type geneInfo: pandas dataframe
:param path: path of geneInfo
:type path: str
:param sep: separation symbol to use for reading data in path properly
:type sep: str
:param order: if you want to update/add gene information by keeping the order as the same as data. if you want to add gene infor from biomart you should set this to be false. (default: TRUE)
:type order: bool
:param level: indicated the expression data is at gene level or transcript level
:type level: str
"""
if path is not None:
if not os.path.isfile(path):
raise ValueError("path does not exist!")
geneInfo = pd.read_csv(path, sep=sep)
elif geneInfo is not None:
if not isinstance(geneInfo, pd.DataFrame):
raise ValueError("geneInfo is not pandas dataframe!")
else:
raise ValueError("path and geneInfo can not be empty at the same time!")
if order:
geneInfo.index = expr.var
expr.var = pd.concat([geneInfo, expr.var], axis=1)
expr.var = expr.var.loc[:, ~expr.var.columns.duplicated()]
else:
name = 'ensembl_gene_id'
replace = 'gene_id'
if level == 'transcript':
name = 'ensembl_transcript_id'
replace = 'transcript_id'
if 'external_gene_name' in geneInfo.columns:
geneInfo.rename(columns={'external_gene_name': 'gene_name', name: replace}, inplace=True)
else:
geneInfo.rename(columns={name: replace}, inplace=True)
expr.var.gene_id = expr.var.gene_id.str.split('\\.', expand=True)[0]
expr.var.index.name = None
rmv = [x for x in geneInfo.columns if x in expr.var.columns]
rmv.remove(replace)
expr.var.drop(rmv, axis=1, inplace=True)
expr.var = expr.var.merge(geneInfo, on=replace, how='left')
expr.var.index = expr.var[replace]
return expr
@staticmethod
def updateMetadata(expr, metaData=None, path=None, sep=' ', order=True):
"""
add/update metadata in expr anndata
:param expr: expression data
:type expr: anndata
:param metaData: Sample information table you want to add to your data
:type metaData: pandas dataframe
:param path: path of metaData
:type path: str
:param sep: separation symbol to use for reading data in path properly
:type sep: str
:param order: if you want to update/add gene information by keeping the order as the same as data. if you want to add gene infor from biomart you should set this to be false. (default: TRUE)
:type order: bool
"""
if path is not None:
if not os.path.isfile(path):
raise ValueError("path does not exist!")
metaData = pd.read_csv(path, sep=sep)
elif metaData is not None:
if not isinstance(metaData, pd.DataFrame):
raise ValueError("meta data is not pandas dataframe!")
else:
raise ValueError("path and metaData can not be empty at the same time!")
if order:
metaData.index = expr.obs.index
expr.obs = pd.concat([metaData, expr.obs], axis=1)
expr.obs = expr.obs.loc[:, ~expr.obs.columns.duplicated()]
else:
expr.obs['index'] = expr.obs.index
if 'sample_id' not in metaData.columns:
metaData['sample_id'] = range(metaData.shape[0])
rmv = [x for x in metaData.columns if x in expr.obs.columns]
rmv.remove('sample_id')
expr.obs.drop(rmv, axis=1, inplace=True)
expr.obs = expr.obs.merge(metaData, on='sample_id', how='left')
expr.obs.index = expr.obs['index']
expr.obs.drop(['index'], axis=1, inplace=True)
return expr
| [
"pandas.DataFrame",
"numpy.seterr",
"pandas.read_csv",
"os.path.isfile",
"anndata.AnnData",
"pandas.concat"
] | [((115, 159), 'numpy.seterr', 'np.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (124, 159), True, 'import numpy as np\n'), ((2167, 2264), 'pandas.DataFrame', 'pd.DataFrame', (['expressionList.columns[1:]'], {'columns': '[column]', 'index': 'expressionList.columns[1:]'}), '(expressionList.columns[1:], columns=[column], index=\n expressionList.columns[1:])\n', (2179, 2264), True, 'import pandas as pd\n'), ((2662, 2720), 'anndata.AnnData', 'ad.AnnData', ([], {'X': 'expressionList', 'obs': 'sampleInfo', 'var': 'geneInfo'}), '(X=expressionList, obs=sampleInfo, var=geneInfo)\n', (2672, 2720), True, 'import anndata as ad\n'), ((3738, 3764), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': 'sep'}), '(path, sep=sep)\n', (3749, 3764), True, 'import pandas as pd\n'), ((4104, 4143), 'pandas.concat', 'pd.concat', (['[geneInfo, expr.var]'], {'axis': '(1)'}), '([geneInfo, expr.var], axis=1)\n', (4113, 4143), True, 'import pandas as pd\n'), ((5983, 6009), 'pandas.read_csv', 'pd.read_csv', (['path'], {'sep': 'sep'}), '(path, sep=sep)\n', (5994, 6009), True, 'import pandas as pd\n'), ((6356, 6395), 'pandas.concat', 'pd.concat', (['[metaData, expr.obs]'], {'axis': '(1)'}), '([metaData, expr.obs], axis=1)\n', (6365, 6395), True, 'import pandas as pd\n'), ((1290, 1317), 'os.path.isfile', 'os.path.isfile', (['geneExpPath'], {}), '(geneExpPath)\n', (1304, 1317), False, 'import os\n'), ((1427, 1460), 'pandas.read_csv', 'pd.read_csv', (['geneExpPath'], {'sep': 'sep'}), '(geneExpPath, sep=sep)\n', (1438, 1460), True, 'import pandas as pd\n'), ((3636, 3656), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (3650, 3656), False, 'import os\n'), ((5881, 5901), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (5895, 5901), False, 'import os\n')] |
# scaling functions
import numpy as np
import copy
from VyPy.data.scaling import ScalingBunch
from VyPy.data.scaling import Linear as LinearFunction
class Linear(ScalingBunch):
def __init__(self,Train):
self.calc_scaling(Train)
return
def calc_scaling(self,Train):
# unpack
X = Train.X
Y = Train.Y
DY = Train.DY
XB = Train.XB
_,nx = X.shape
# calculate scaling parameters
Xref = np.array([ XB[:,1] - XB[:,0] ,
XB[:,0] ])
Yref = np.array([ np.max(Y,0)-np.min(Y,0) ,
np.min(Y,0) ])
DYref = np.array([ Yref[0]/Xref[0,:] ,
np.zeros([nx]) ])
# build scaling functions
XB_scaling = LinearFunction( Xref[0,:,None] , Xref[1,:,None] )
X_scaling = LinearFunction( Xref[None,0,:] , Xref[None,1,:] )
Y_scaling = LinearFunction( Yref[0], Yref[1] )
DY_scaling = LinearFunction( DYref[None,0,:] )
# set scaling data keys
self['XB'] = XB_scaling
self['X'] = X_scaling
self['Y'] = Y_scaling
self['DY'] = DY_scaling
self['XI'] = X_scaling
self['YI'] = Y_scaling
self['DYI'] = DY_scaling
self['CovYI'] = Y_scaling
self['CovDYI'] = DY_scaling
return
#: def calc_scaling()
def wrap_function(self,function):
return Scaled_Function(self,function)
class Scaled_Function(object):
def __init__(self,Scaling,function):
self.Scaling = Scaling
self.function = function
def __call__(self,X):
Scaling = self.Scaling
function = self.function
X = Scaling.X.unset_scaling(X)
Y = function(X)
Y = Scaling.Y.set_scaling(Y)
return Y
#def center_ci(self):
#''' Translate Y's scaling function to satisfy
#C*(X)=0 when C(X)=0
#'''
## current scaling functions
#Y_set = self.Y_set
#Y_unset = self.Y_unset
## rename
#self.C_set = Y_set
#self.C_unset = Y_unset
## center scaled data on 0.0
#C_set = lambda(Z): Y_set(Z) - Y_set(0.0)
#C_unset = lambda(Z): Y_unset( Z + Y_set(0.0) )
## store
#self.Y_set = C_set
#self.Y_unset = C_unset
#return
##: def center_ci()
#def uncenter_ci(self):
#self.Y_set = self.C_set
#self.Y_unset = self.C_unset
#return
##: def uncenter_ci()
| [
"VyPy.data.scaling.Linear",
"numpy.zeros",
"numpy.min",
"numpy.max",
"numpy.array"
] | [((519, 560), 'numpy.array', 'np.array', (['[XB[:, 1] - XB[:, 0], XB[:, 0]]'], {}), '([XB[:, 1] - XB[:, 0], XB[:, 0]])\n', (527, 560), True, 'import numpy as np\n'), ((856, 906), 'VyPy.data.scaling.Linear', 'LinearFunction', (['Xref[0, :, None]', 'Xref[1, :, None]'], {}), '(Xref[0, :, None], Xref[1, :, None])\n', (870, 906), True, 'from VyPy.data.scaling import Linear as LinearFunction\n'), ((927, 977), 'VyPy.data.scaling.Linear', 'LinearFunction', (['Xref[None, 0, :]', 'Xref[None, 1, :]'], {}), '(Xref[None, 0, :], Xref[None, 1, :])\n', (941, 977), True, 'from VyPy.data.scaling import Linear as LinearFunction\n'), ((998, 1030), 'VyPy.data.scaling.Linear', 'LinearFunction', (['Yref[0]', 'Yref[1]'], {}), '(Yref[0], Yref[1])\n', (1012, 1030), True, 'from VyPy.data.scaling import Linear as LinearFunction\n'), ((1054, 1087), 'VyPy.data.scaling.Linear', 'LinearFunction', (['DYref[None, 0, :]'], {}), '(DYref[None, 0, :])\n', (1068, 1087), True, 'from VyPy.data.scaling import Linear as LinearFunction\n'), ((680, 692), 'numpy.min', 'np.min', (['Y', '(0)'], {}), '(Y, 0)\n', (686, 692), True, 'import numpy as np\n'), ((782, 796), 'numpy.zeros', 'np.zeros', (['[nx]'], {}), '([nx])\n', (790, 796), True, 'import numpy as np\n'), ((626, 638), 'numpy.max', 'np.max', (['Y', '(0)'], {}), '(Y, 0)\n', (632, 638), True, 'import numpy as np\n'), ((638, 650), 'numpy.min', 'np.min', (['Y', '(0)'], {}), '(Y, 0)\n', (644, 650), True, 'import numpy as np\n')] |
import numpy as np
import torch
from estimation_methods.abstract_estimation_method import \
AbstractEstimationMethod
from utils.torch_utils import torch_to_np, BatchIter, np_to_tensor
class DeepSieve(AbstractEstimationMethod):
# Implements SMD algorithm using LBFGS optimizer and identity omega
def __init__(self, rho_generator, rho_dim, gamma, k_z_class, k_z_args,
f_network_class, f_network_args, optim_class, optim_args,
batch_size, max_num_epochs, eval_freq, max_no_improve=5,
burn_in_epochs=100, pretrain=True, cuda=False, device=None,
verbose=False):
AbstractEstimationMethod.__init__(self, rho_generator, rho_dim)
self.gamma = gamma
if isinstance(k_z_class, list):
self.k_z_list = [c_(**a_) for c_, a_ in zip(k_z_class, k_z_args)]
elif k_z_class is not None:
self.k_z_list = [k_z_class(**k_z_args) for _ in range(rho_dim)]
else:
self.k_z_list = None
self.f = f_network_class(**f_network_args)
self.optim = optim_class(list(self.f.parameters())
+ list(self.rho.parameters()), **optim_args)
self.batch_size = batch_size
self.max_num_epochs = max_num_epochs
self.eval_freq = eval_freq
self.max_no_improve = max_no_improve
self.burn_in_epochs = burn_in_epochs
self.pretrain = pretrain
self.cuda = cuda
self.device = device
self.verbose = verbose
def _loss_function(self, x, z, omega):
f_z = self.f(z)
f_z_np = torch_to_np(f_z.unsqueeze(2))
omega_inv_f_z = self._to_tensor(np.linalg.solve(omega, f_z_np))
f_errs = 2 * omega_inv_f_z.transpose(1, 2).matmul(f_z.unsqueeze(2))
f_loss = f_errs.mean(0).sum()
reg = ((self.rho(x, z) - f_z) ** 2).mean()
return f_loss + self.gamma * reg
def _fit_internal(self, x, z, x_dev, z_dev):
n = x.shape[0]
k = self.rho_dim
batch_iter = BatchIter(n, self.batch_size)
x_tensor = self._to_tensor(x)
z_tensor = self._to_tensor(z)
x_dev_tensor = self._to_tensor(x_dev)
z_dev_tensor = self._to_tensor(z_dev)
for k_z in self.k_z_list:
k_z.train(z)
if self.pretrain:
self._pretrain_rho(x=x_tensor, z=z_tensor)
min_dev_loss = float("inf")
num_no_improve = 0
if self.eval_freq > 0 and self.k_z_list is not None:
k_z_m_dev = np.stack([k_z(z_dev, z_dev)
for k_z in self.k_z_list], axis=0)
n_dev = x_dev.shape[0]
omega_dev = np.eye(k).reshape(1, k, k).repeat(n_dev, 0)
else:
k_z_m_dev = None
omega = np.eye(k).reshape(1, k, k).repeat(n, 0)
for epoch_i in range(self.max_num_epochs):
self.rho.train()
self.f.train()
if epoch_i > 0:
# update omega
omega = self.calc_rho_var(x_tensor, z_tensor)
if self.eval_freq > 0 and self.k_z_list is not None:
omega_dev = self.calc_rho_var(x_dev_tensor, z_dev_tensor)
for batch_idx in batch_iter:
# calculate game objectives
x_batch = x_tensor[batch_idx]
z_batch = z_tensor[batch_idx]
omega_batch = omega[batch_idx]
loss = self._loss_function(x_batch, z_batch, omega_batch)
# update networks
self.optim.zero_grad()
loss.backward(retain_graph=True)
self.optim.step()
if (k_z_m_dev is not None) and (epoch_i % self.eval_freq == 0):
dev_loss = self._calc_dev_mmr(x_dev_tensor, z_dev,
z_dev_tensor, k_z_m_dev)
if self.verbose:
dev_game_obj = self._loss_function(
x_dev_tensor, z_dev_tensor, omega_dev)
print("epoch %d, game-obj=%f, def-loss=%f"
% (epoch_i, float(dev_game_obj), dev_loss))
if dev_loss < min_dev_loss:
min_dev_loss = dev_loss
num_no_improve = 0
elif epoch_i > self.burn_in_epochs:
num_no_improve += 1
if num_no_improve == self.max_no_improve:
break
def _pretrain_rho(self, x, z):
optimizer = torch.optim.LBFGS(self.rho.parameters(),
line_search_fn="strong_wolfe")
def closure():
optimizer.zero_grad()
rho_x_z = self.rho(x, z)
loss = (rho_x_z ** 2).mean()
loss.backward()
return loss
optimizer.step(closure)
def _calc_dev_mmr(self, x_dev_tensor, z_dev, z_dev_tensor, k_z_m):
k = self.rho_dim
n = z_dev.shape[0]
rho_m = self.rho(x_dev_tensor, z_dev_tensor).detach().cpu().numpy()
rho_m = rho_m.reshape(n, k, 1).transpose(1, 0, 2)
dev_mmr = (k_z_m @ rho_m).transpose(0, 2, 1) @ rho_m
return float(dev_mmr.sum() / (n ** 2))
def calc_rho_var(self, x_tensor, z_tensor):
n = x_tensor.shape[0]
k = self.rho_dim
rho_x_z = torch_to_np(self.rho(x_tensor, z_tensor))
rho_residual = rho_x_z - rho_x_z.mean(0, keepdims=0)
var = (rho_residual.reshape(n, k, 1)
* rho_residual.reshape(n, 1, k)).mean(0)
return var.reshape(1, k, k).repeat(n, 0)
def _to_tensor(self, data_array):
return np_to_tensor(data_array, cuda=self.cuda, device=self.device)
| [
"estimation_methods.abstract_estimation_method.AbstractEstimationMethod.__init__",
"utils.torch_utils.BatchIter",
"utils.torch_utils.np_to_tensor",
"numpy.eye",
"numpy.linalg.solve"
] | [((648, 711), 'estimation_methods.abstract_estimation_method.AbstractEstimationMethod.__init__', 'AbstractEstimationMethod.__init__', (['self', 'rho_generator', 'rho_dim'], {}), '(self, rho_generator, rho_dim)\n', (681, 711), False, 'from estimation_methods.abstract_estimation_method import AbstractEstimationMethod\n'), ((2044, 2073), 'utils.torch_utils.BatchIter', 'BatchIter', (['n', 'self.batch_size'], {}), '(n, self.batch_size)\n', (2053, 2073), False, 'from utils.torch_utils import torch_to_np, BatchIter, np_to_tensor\n'), ((5648, 5708), 'utils.torch_utils.np_to_tensor', 'np_to_tensor', (['data_array'], {'cuda': 'self.cuda', 'device': 'self.device'}), '(data_array, cuda=self.cuda, device=self.device)\n', (5660, 5708), False, 'from utils.torch_utils import torch_to_np, BatchIter, np_to_tensor\n'), ((1687, 1717), 'numpy.linalg.solve', 'np.linalg.solve', (['omega', 'f_z_np'], {}), '(omega, f_z_np)\n', (1702, 1717), True, 'import numpy as np\n'), ((2792, 2801), 'numpy.eye', 'np.eye', (['k'], {}), '(k)\n', (2798, 2801), True, 'import numpy as np\n'), ((2688, 2697), 'numpy.eye', 'np.eye', (['k'], {}), '(k)\n', (2694, 2697), True, 'import numpy as np\n')] |
"""
Convert numpy data type to basic python data type.
"""
import numpy as np
def clean(d) -> dict:
new = dict()
def clean_item(item) -> object:
# Use this to detect something.
# if isinstance(x, tuple):
# raise Exception
if not isinstance(item, list):
try:
return np.asscalar(item)
except AttributeError:
return item
return [
clean_item(x)
for x in item
]
for (key, val) in d.items():
new[key] = clean_item(val)
return new
| [
"numpy.asscalar"
] | [((339, 356), 'numpy.asscalar', 'np.asscalar', (['item'], {}), '(item)\n', (350, 356), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import simplejson as json
import six
import os
from pandas.api.types import is_integer_dtype
from scipy.sparse import coo_matrix
import numpy as np
import pandas as pd
import h5py
from .core import (
get,
region_to_offset,
region_to_extent,
RangeSelector1D,
RangeSelector2D,
CSRReader,
query_rect,
)
from .util import parse_cooler_uri, parse_region, open_hdf5, closing_hdf5
from .fileops import list_coolers
__all__ = ["Cooler", "annotate"]
# The 4DN data portal and hic2cool store these weight vectors in divisive form
_4DN_DIVISIVE_WEIGHTS = {"KR", "VC", "VC_SQRT"}
class Cooler(object):
"""
A convenient interface to a cooler data collection.
Parameters
----------
store : str, :py:class:`h5py.File` or :py:class:`h5py.Group`
Path to a cooler file, URI string, or open handle to the root HDF5
group of a cooler data collection.
root : str, optional [deprecated]
HDF5 Group path to root of cooler group if ``store`` is a file.
This option is deprecated. Instead, use a URI string of the form
:file:`<file_path>::<group_path>`.
kwargs : optional
Options to be passed to :py:class:`h5py.File()` upon every access.
By default, the file is opened with the default driver and mode='r'.
Notes
-----
If ``store`` is a file path, the file will be opened temporarily in
when performing operations. This allows :py:class:`Cooler` objects to be
serialized for multiprocess and distributed computations.
Metadata is accessible as a dictionary through the :py:attr:`info`
property.
Table selectors, created using :py:meth:`chroms`, :py:meth:`bins`, and
:py:meth:`pixels`, perform range queries over table rows,
returning :py:class:`pd.DataFrame` and :py:class:`pd.Series`.
A matrix selector, created using :py:meth:`matrix`, performs 2D matrix
range queries, returning :py:class:`numpy.ndarray` or
:py:class:`scipy.sparse.coo_matrix`.
"""
def __init__(self, store, root=None, **kwargs):
if isinstance(store, six.string_types):
if root is None:
self.filename, self.root = parse_cooler_uri(store)
elif h5py.is_hdf5(store):
with open_hdf5(store, **kwargs) as h5:
self.filename = h5.file.filename
self.root = root
else:
raise ValueError("Not a valid path to a Cooler file")
self.uri = self.filename + "::" + self.root
self.store = self.filename
self.open_kws = kwargs
else:
# Assume an open HDF5 handle, ignore open_kws
self.filename = store.file.filename
self.root = store.name
self.uri = self.filename + "::" + self.root
self.store = store.file
self.open_kws = {}
self._refresh()
def _refresh(self):
try:
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
_ct = chroms(grp)
_ct["name"] = _ct["name"].astype(object)
self._chromsizes = _ct.set_index("name")["length"]
self._chromids = dict(zip(_ct["name"], range(len(_ct))))
self._info = info(grp)
mode = self._info.get("storage-mode", u"symmetric-upper")
self._is_symm_upper = mode == u"symmetric-upper"
except KeyError:
err_msg = "No cooler found at: {}.".format(self.store)
listing = list_coolers(self.store)
if len(listing):
err_msg += (
" Coolers found in {}. ".format(listing)
+ "Use '::' to specify a group path"
)
raise KeyError(err_msg)
def _load_dset(self, path):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return grp[path][:]
def _load_attrs(self, path):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return dict(grp[path].attrs)
def open(self, mode="r", **kwargs):
""" Open the HDF5 group containing the Cooler with :py:mod:`h5py`
Functions as a context manager. Any ``open_kws`` passed during
construction are ignored.
Parameters
----------
mode : str, optional [default: 'r']
* ``'r'`` (readonly)
* ``'r+'`` or ``'a'`` (read/write)
Notes
-----
For other parameters, see :py:class:`h5py.File`.
"""
grp = h5py.File(self.filename, mode, **kwargs)[self.root]
return closing_hdf5(grp)
@property
def storage_mode(self):
"""Indicates whether ordinary sparse matrix encoding is used
(``"square"``) or whether a symmetric matrix is encoded by storing only
the upper triangular elements (``"symmetric-upper"``).
"""
return self._info.get("storage-mode", u"symmetric-upper")
@property
def binsize(self):
""" Resolution in base pairs if uniform else None """
return self._info["bin-size"]
@property
def chromsizes(self):
""" Ordered mapping of reference sequences to their lengths in bp """
return self._chromsizes
@property
def chromnames(self):
""" List of reference sequence names """
return list(self._chromsizes.index)
def offset(self, region):
""" Bin ID containing the left end of a genomic region
Parameters
----------
region : str or tuple
Genomic range
Returns
-------
int
Examples
--------
>>> c.offset('chr3') # doctest: +SKIP
1311
"""
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return region_to_offset(
grp, self._chromids, parse_region(region, self._chromsizes)
)
def extent(self, region):
""" Bin IDs containing the left and right ends of a genomic region
Parameters
----------
region : str or tuple
Genomic range
Returns
-------
2-tuple of ints
Examples
--------
>>> c.extent('chr3') # doctest: +SKIP
(1311, 2131)
"""
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return region_to_extent(
grp, self._chromids, parse_region(region, self._chromsizes)
)
@property
def info(self):
""" File information and metadata
Returns
-------
dict
"""
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return info(grp)
@property
def shape(self):
return (self._info["nbins"],) * 2
def chroms(self, **kwargs):
""" Chromosome table selector
Returns
-------
Table selector
"""
def _slice(fields, lo, hi):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return chroms(grp, lo, hi, fields, **kwargs)
return RangeSelector1D(None, _slice, None, self._info["nchroms"])
def bins(self, **kwargs):
""" Bin table selector
Returns
-------
Table selector
"""
def _slice(fields, lo, hi):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return bins(grp, lo, hi, fields, **kwargs)
def _fetch(region):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return region_to_extent(
grp, self._chromids, parse_region(region, self._chromsizes)
)
return RangeSelector1D(None, _slice, _fetch, self._info["nbins"])
def pixels(self, join=False, **kwargs):
""" Pixel table selector
Parameters
----------
join : bool, optional
Whether to expand bin ID columns into chrom, start, and end
columns. Default is ``False``.
Returns
-------
Table selector
"""
def _slice(fields, lo, hi):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return pixels(grp, lo, hi, fields, join, **kwargs)
def _fetch(region):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
i0, i1 = region_to_extent(
grp, self._chromids, parse_region(region, self._chromsizes)
)
lo = grp["indexes"]["bin1_offset"][i0]
hi = grp["indexes"]["bin1_offset"][i1]
return lo, hi
return RangeSelector1D(None, _slice, _fetch, self._info["nnz"])
def matrix(
self,
field=None,
balance=True,
sparse=False,
as_pixels=False,
join=False,
ignore_index=True,
divisive_weights=None,
max_chunk=500000000,
):
""" Contact matrix selector
Parameters
----------
field : str, optional
Which column of the pixel table to fill the matrix with. By
default, the 'count' column is used.
balance : bool, optional
Whether to apply pre-calculated matrix balancing weights to the
selection. Default is True and uses a column named 'weight'.
Alternatively, pass the name of the bin table column containing
the desired balancing weights. Set to False to return untransformed
counts.
sparse: bool, optional
Return a scipy.sparse.coo_matrix instead of a dense 2D numpy array.
as_pixels: bool, optional
Return a DataFrame of the corresponding rows from the pixel table
instead of a rectangular sparse matrix. False by default.
join : bool, optional
If requesting pixels, specifies whether to expand the bin ID
columns into (chrom, start, end). Has no effect when requesting a
rectangular matrix. Default is True.
ignore_index : bool, optional
If requesting pixels, don't populate the index column with the
pixel IDs to improve performance. Default is True.
divisive_weights : bool, optional
Force balancing weights to be interpreted as divisive (True) or
multiplicative (False). Weights are always assumed to be
multiplicative by default unless named KR, VC or SQRT_VC, in which
case they are assumed to be divisive by default.
Returns
-------
Matrix selector
Notes
-----
If ``as_pixels=True``, only data explicitly stored in the pixel table
will be returned: if the cooler's storage mode is symmetric-upper,
lower triangular elements will not be generated. If
``as_pixels=False``, those missing non-zero elements will
automatically be filled in.
"""
if balance in _4DN_DIVISIVE_WEIGHTS and divisive_weights is None:
divisive_weights = True
def _slice(field, i0, i1, j0, j1):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
return matrix(
grp,
i0,
i1,
j0,
j1,
field,
balance,
sparse,
as_pixels,
join,
ignore_index,
divisive_weights,
max_chunk,
self._is_symm_upper,
)
def _fetch(region, region2=None):
with open_hdf5(self.store, **self.open_kws) as h5:
grp = h5[self.root]
if region2 is None:
region2 = region
region1 = parse_region(region, self._chromsizes)
region2 = parse_region(region2, self._chromsizes)
i0, i1 = region_to_extent(grp, self._chromids, region1)
j0, j1 = region_to_extent(grp, self._chromids, region2)
return i0, i1, j0, j1
return RangeSelector2D(field, _slice, _fetch, (self._info["nbins"],) * 2)
def __repr__(self):
if isinstance(self.store, six.string_types):
filename = os.path.basename(self.store)
container = "{}::{}".format(filename, self.root)
else:
container = repr(self.store)
return '<Cooler "{}">'.format(container)
def info(h5):
"""
File and user metadata dict.
Parameters
----------
h5 : :py:class:`h5py.File` or :py:class:`h5py.Group`
Open handle to cooler file.
Returns
-------
dict
"""
d = {}
for k, v in h5.attrs.items():
if isinstance(v, six.string_types):
try:
v = json.loads(v)
except ValueError:
pass
d[k] = v
return d
def chroms(h5, lo=0, hi=None, fields=None, **kwargs):
"""
Table describing the chromosomes/scaffolds/contigs used.
They appear in the same order they occur in the heatmap.
Parameters
----------
h5 : :py:class:`h5py.File` or :py:class:`h5py.Group`
Open handle to cooler file.
lo, hi : int, optional
Range of rows to select from the table.
fields : sequence of str, optional
Subset of columns to select from table.
Returns
-------
:py:class:`DataFrame`
"""
if fields is None:
fields = (
pd.Index(["name", "length"])
.append(pd.Index(h5["chroms"].keys()))
.drop_duplicates()
)
return get(h5["chroms"], lo, hi, fields, **kwargs)
def bins(h5, lo=0, hi=None, fields=None, **kwargs):
"""
Table describing the genomic bins that make up the axes of the heatmap.
Parameters
----------
h5 : :py:class:`h5py.File` or :py:class:`h5py.Group`
Open handle to cooler file.
lo, hi : int, optional
Range of rows to select from the table.
fields : sequence of str, optional
Subset of columns to select from table.
Returns
-------
:py:class:`DataFrame`
"""
if fields is None:
fields = (
pd.Index(["chrom", "start", "end"])
.append(pd.Index(h5["bins"].keys()))
.drop_duplicates()
)
# If convert_enum is not explicitly set to False, chrom IDs will get
# converted to categorical chromosome names, provided the ENUM header
# exists in bins/chrom. Otherwise, they will return as integers.
out = get(h5["bins"], lo, hi, fields, **kwargs)
# Handle the case where the ENUM header doesn't exist but we want to
# convert integer chrom IDs to categorical chromosome names.
if "chrom" in fields:
convert_enum = kwargs.get("convert_enum", True)
if isinstance(fields, six.string_types):
chrom_col = out
else:
chrom_col = out["chrom"]
if is_integer_dtype(chrom_col.dtype) and convert_enum:
chromnames = chroms(h5, fields="name")
chrom_col = pd.Categorical.from_codes(chrom_col, chromnames, ordered=True)
if isinstance(fields, six.string_types):
out = pd.Series(chrom_col, out.index)
else:
out["chrom"] = chrom_col
return out
def pixels(h5, lo=0, hi=None, fields=None, join=True, **kwargs):
"""
Table describing the nonzero upper triangular pixels of the Hi-C contact
heatmap.
Parameters
----------
h5 : :py:class:`h5py.File` or :py:class:`h5py.Group`
Open handle to cooler file.
lo, hi : int, optional
Range of rows to select from the table.
fields : sequence of str, optional
Subset of columns to select from table.
join : bool, optional
Whether or not to expand bin ID columns to their full bin description
(chrom, start, end). Default is True.
Returns
-------
:py:class:`DataFrame`
"""
if fields is None:
fields = (
pd.Index(["bin1_id", "bin2_id"])
.append(pd.Index(h5["pixels"].keys()))
.drop_duplicates()
)
df = get(h5["pixels"], lo, hi, fields, **kwargs)
if join:
bins = get(h5["bins"], 0, None, ["chrom", "start", "end"], **kwargs)
df = annotate(df, bins, replace=True)
return df
def annotate(pixels, bins, replace=False):
"""
Add bin annotations to a data frame of pixels.
This is done by performing a relational "join" against the bin IDs of a
table that describes properties of the genomic bins. New columns will be
appended on the left of the output data frame.
.. versionchanged:: 0.8.0
The default value of ``replace`` changed to False.
Parameters
----------
pixels : :py:class:`DataFrame`
A data frame containing columns named ``bin1_id`` and/or ``bin2_id``.
If columns ``bin1_id`` and ``bin2_id`` are both present in ``pixels``,
the adjoined columns will be suffixed with '1' and '2' accordingly.
bins : :py:class:`DataFrame` or DataFrame selector
Data structure that contains a full description of the genomic bins of
the contact matrix, where the index corresponds to bin IDs.
replace : bool, optional
Remove the original ``bin1_id`` and ``bin2_id`` columns from the
output. Default is False.
Returns
-------
:py:class:`DataFrame`
"""
columns = pixels.columns
ncols = len(columns)
if "bin1_id" in columns:
if len(bins) > len(pixels):
bin1 = pixels["bin1_id"]
lo = bin1.min()
hi = bin1.max() + 1
lo = 0 if np.isnan(lo) else lo
hi = 0 if np.isnan(hi) else hi
right = bins[lo:hi]
else:
right = bins[:]
pixels = pixels.merge(right, how="left", left_on="bin1_id", right_index=True)
if "bin2_id" in columns:
if len(bins) > len(pixels):
bin2 = pixels["bin2_id"]
lo = bin2.min()
hi = bin2.max() + 1
lo = 0 if np.isnan(lo) else lo
hi = 0 if np.isnan(hi) else hi
right = bins[lo:hi]
else:
right = bins[:]
pixels = pixels.merge(
right, how="left", left_on="bin2_id", right_index=True, suffixes=("1", "2")
)
# rearrange columns
pixels = pixels[list(pixels.columns[ncols:]) + list(pixels.columns[:ncols])]
# drop bin IDs
if replace:
cols_to_drop = [col for col in ("bin1_id", "bin2_id") if col in columns]
pixels = pixels.drop(cols_to_drop, axis=1)
return pixels
def matrix(
h5,
i0,
i1,
j0,
j1,
field=None,
balance=True,
sparse=False,
as_pixels=False,
join=True,
ignore_index=True,
divisive_weights=False,
max_chunk=500000000,
is_upper=True,
):
"""
Two-dimensional range query on the Hi-C contact heatmap.
Depending on the options, returns either a 2D NumPy array, a rectangular
sparse ``coo_matrix``, or a data frame of pixels.
Parameters
----------
h5 : :py:class:`h5py.File` or :py:class:`h5py.Group`
Open handle to cooler file.
i0, i1 : int, optional
Bin range along the 0th (row) axis of the heatap.
j0, j1 : int, optional
Bin range along the 1st (col) axis of the heatap.
field : str, optional
Which column of the pixel table to fill the matrix with. By default,
the 'count' column is used.
balance : bool, optional
Whether to apply pre-calculated matrix balancing weights to the
selection. Default is True and uses a column named 'weight'.
Alternatively, pass the name of the bin table column containing the
desired balancing weights. Set to False to return untransformed counts.
sparse: bool, optional
Return a scipy.sparse.coo_matrix instead of a dense 2D numpy array.
as_pixels: bool, optional
Return a DataFrame of the corresponding rows from the pixel table
instead of a rectangular sparse matrix. False by default.
join : bool, optional
If requesting pixels, specifies whether to expand the bin ID columns
into (chrom, start, end). Has no effect when requesting a rectangular
matrix. Default is True.
ignore_index : bool, optional
If requesting pixels, don't populate the index column with the pixel
IDs to improve performance. Default is True.
Returns
-------
ndarray, coo_matrix or DataFrame
Notes
-----
If ``as_pixels=True``, only data explicitly stored in the pixel table
will be returned: if the cooler's storage mode is symmetric-upper,
lower triangular elements will not be generated. If ``as_pixels=False``,
those missing non-zero elements will automatically be filled in.
"""
if field is None:
field = "count"
if isinstance(balance, str):
name = balance
elif balance:
name = "weight"
if balance and name not in h5["bins"]:
raise ValueError(
"No column 'bins/{}'".format(name)
+ "found. Use ``cooler.balance_cooler`` to "
+ "calculate balancing weights or set balance=False."
)
if as_pixels:
reader = CSRReader(h5, field, max_chunk)
index = None if ignore_index else reader.index_col(i0, i1, j0, j1)
i, j, v = reader.query(i0, i1, j0, j1)
cols = ["bin1_id", "bin2_id", field]
df = pd.DataFrame(dict(zip(cols, [i, j, v])), columns=cols, index=index)
if balance:
weights = Cooler(h5).bins()[[name]]
df2 = annotate(df, weights, replace=False)
if divisive_weights:
df2[name + "1"] = 1 / df2[name + "1"]
df2[name + "2"] = 1 / df2[name + "2"]
df["balanced"] = df2[name + "1"] * df2[name + "2"] * df2[field]
if join:
bins = Cooler(h5).bins()[["chrom", "start", "end"]]
df = annotate(df, bins, replace=True)
return df
elif sparse:
reader = CSRReader(h5, field, max_chunk)
if is_upper:
i, j, v = query_rect(reader.query, i0, i1, j0, j1, duplex=True)
else:
i, j, v = reader.query(i0, i1, j0, j1)
mat = coo_matrix((v, (i - i0, j - j0)), (i1 - i0, j1 - j0))
if balance:
weights = h5["bins"][name]
bias1 = weights[i0:i1]
bias2 = bias1 if (i0, i1) == (j0, j1) else weights[j0:j1]
if divisive_weights:
bias1 = 1 / bias1
bias2 = 1 / bias2
mat.data = bias1[mat.row] * bias2[mat.col] * mat.data
return mat
else:
reader = CSRReader(h5, field, max_chunk)
if is_upper:
i, j, v = query_rect(reader.query, i0, i1, j0, j1, duplex=True)
else:
i, j, v = reader.query(i0, i1, j0, j1)
arr = coo_matrix((v, (i - i0, j - j0)), (i1 - i0, j1 - j0)).toarray()
if balance:
weights = h5["bins"][name]
bias1 = weights[i0:i1]
bias2 = bias1 if (i0, i1) == (j0, j1) else weights[j0:j1]
if divisive_weights:
bias1 = 1 / bias1
bias2 = 1 / bias2
arr = arr * np.outer(bias1, bias2)
return arr
| [
"h5py.File",
"numpy.outer",
"os.path.basename",
"pandas.api.types.is_integer_dtype",
"pandas.Categorical.from_codes",
"numpy.isnan",
"pandas.Index",
"scipy.sparse.coo_matrix",
"pandas.Series",
"simplejson.loads",
"h5py.is_hdf5"
] | [((4737, 4777), 'h5py.File', 'h5py.File', (['self.filename', 'mode'], {}), '(self.filename, mode, **kwargs)\n', (4746, 4777), False, 'import h5py\n'), ((12838, 12866), 'os.path.basename', 'os.path.basename', (['self.store'], {}), '(self.store)\n', (12854, 12866), False, 'import os\n'), ((15534, 15567), 'pandas.api.types.is_integer_dtype', 'is_integer_dtype', (['chrom_col.dtype'], {}), '(chrom_col.dtype)\n', (15550, 15567), False, 'from pandas.api.types import is_integer_dtype\n'), ((15661, 15723), 'pandas.Categorical.from_codes', 'pd.Categorical.from_codes', (['chrom_col', 'chromnames'], {'ordered': '(True)'}), '(chrom_col, chromnames, ordered=True)\n', (15686, 15723), True, 'import pandas as pd\n'), ((22937, 22990), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(v, (i - i0, j - j0))', '(i1 - i0, j1 - j0)'], {}), '((v, (i - i0, j - j0)), (i1 - i0, j1 - j0))\n', (22947, 22990), False, 'from scipy.sparse import coo_matrix\n'), ((2313, 2332), 'h5py.is_hdf5', 'h5py.is_hdf5', (['store'], {}), '(store)\n', (2325, 2332), False, 'import h5py\n'), ((13382, 13395), 'simplejson.loads', 'json.loads', (['v'], {}), '(v)\n', (13392, 13395), True, 'import simplejson as json\n'), ((15799, 15830), 'pandas.Series', 'pd.Series', (['chrom_col', 'out.index'], {}), '(chrom_col, out.index)\n', (15808, 15830), True, 'import pandas as pd\n'), ((18286, 18298), 'numpy.isnan', 'np.isnan', (['lo'], {}), '(lo)\n', (18294, 18298), True, 'import numpy as np\n'), ((18329, 18341), 'numpy.isnan', 'np.isnan', (['hi'], {}), '(hi)\n', (18337, 18341), True, 'import numpy as np\n'), ((18696, 18708), 'numpy.isnan', 'np.isnan', (['lo'], {}), '(lo)\n', (18704, 18708), True, 'import numpy as np\n'), ((18739, 18751), 'numpy.isnan', 'np.isnan', (['hi'], {}), '(hi)\n', (18747, 18751), True, 'import numpy as np\n'), ((23579, 23632), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(v, (i - i0, j - j0))', '(i1 - i0, j1 - j0)'], {}), '((v, (i - i0, j - j0)), (i1 - i0, j1 - j0))\n', (23589, 23632), False, 'from scipy.sparse import coo_matrix\n'), ((23933, 23955), 'numpy.outer', 'np.outer', (['bias1', 'bias2'], {}), '(bias1, bias2)\n', (23941, 23955), True, 'import numpy as np\n'), ((14064, 14092), 'pandas.Index', 'pd.Index', (["['name', 'length']"], {}), "(['name', 'length'])\n", (14072, 14092), True, 'import pandas as pd\n'), ((14778, 14813), 'pandas.Index', 'pd.Index', (["['chrom', 'start', 'end']"], {}), "(['chrom', 'start', 'end'])\n", (14786, 14813), True, 'import pandas as pd\n'), ((16621, 16653), 'pandas.Index', 'pd.Index', (["['bin1_id', 'bin2_id']"], {}), "(['bin1_id', 'bin2_id'])\n", (16629, 16653), True, 'import pandas as pd\n')] |
import argparse
import os
import pathlib
import random
import timeit
import numpy as np
import pandas as pd
def stdin(n_repeat, n_number, i) -> list:
tmp = pathlib.Path('tmp')
for _ in range(n_repeat * n_number):
os.system('python stdin.py {} < data/N100000.txt >> {}'.format(i, tmp))
with tmp.open() as f:
data = [
sum([float(f.readline()) for _ in range(n_number)]) / n_number
for _ in range(n_repeat)
]
os.remove('tmp')
return data
def sort1(A):
A.sort()
def sort2(A):
sorted(A)
def sort3(A):
A.sort(key=lambda x: x[1])
def sort4(A):
from operator import itemgetter
A.sort(key=itemgetter(1))
def sort5(A):
sorted(A, key=lambda x: x[1])
def sort6(A):
from operator import itemgetter
sorted(A, key=itemgetter(1))
def loop1(N):
for _ in range(N):
pass
def loop2(N):
for i in range(N):
i
def loop3(N):
i = 0
while i < N:
i += 1
def loop4(A):
for i in range(len(A)):
A[i]
def loop5(A):
for a in A:
a
def list1(N):
[None] * N
def list2(N):
[None for _ in range(N)]
def list6(N):
[[None] * N for _ in range(N)]
def list7(N):
[[None for _ in range(N)] for _ in range(N)]
def list3(N):
A = []
for i in range(N):
A.append(i)
def list4(N):
A = [None] * N
for i in range(N):
A[i] = i
def list5(N):
[i for i in range(N)]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--n_repeat', type=int, default=10)
parser.add_argument('--n_number', type=int, default=10)
parser.add_argument('--out', default='record.csv')
args = parser.parse_args()
record = pd.DataFrame(columns=['time', 'exp', 'func', 'param1', 'param2'])
def to_df(data: list, exp: str, func: str, param1=None,
param2=None) -> pd.DataFrame:
N = len(data)
data = pd.DataFrame(
np.array([data, [exp] * N, [func] * N, [param1] * N,
[param2] * N]).T,
columns=record.columns)
data['time'] = data['time'].astype('float32')
return data
def do(df,
func,
param,
exp_name,
func_name,
param1_name=None,
param2_name=None):
res = timeit.repeat(
lambda: func(param), repeat=args.n_repeat, number=args.n_number)
data = [x / args.n_number for x in res]
df = pd.concat(
[df,
to_df(data, exp_name, func_name, param1_name, param2_name)],
ignore_index=True)
return df
# stdin
for i, func_name in [(1, 'input()'), (2, 'sys.stdin.readline()')]:
data = stdin(args.n_repeat, args.n_number, i)
record = pd.concat(
[record, to_df(data, 'stdin', func_name)], ignore_index=True)
# sort
N = 10**6
X = [random.random() for _ in range(N)]
Y = [random.random() for _ in range(N)]
for max_A in [10**4, 10**6]:
A = [int(x * max_A) for x in X]
for func, func_name in [(sort1, 'sort()'), (sort2, 'sorted()')]:
record = do(record, func, A, 'sort1', func_name, N, max_A)
A = [[int(x * max_A), int(y * max_A)] for x, y in zip(X, Y)]
for func, func_name in [(sort3, 'A.sort(key=lambda x: x[1])'),
(sort4, 'A.sort(key=itemgetter(1))'),
(sort5, 'sorted(A, key=lambda x: x[1])'),
(sort6, 'sorted(A, key=itemgetter(1))')]:
record = do(record, func, A, 'sort2', func_name, N, max_A)
# loop
for N in [10**5, 10**6, 10**7]:
loop_list = [(loop1, 'for _ in range(N)'),
(loop2, 'for i in range(N)'), (loop3, 'while i < N')]
for func, func_name in loop_list:
record = do(record, func, N, 'loop1', func_name, N)
A = [0] * N
loop_list = [(loop4, 'for i in range(len(A))'), (loop5, 'for a in A')]
for func, func_name in loop_list:
record = do(record, func, A, 'loop2', func_name, N)
# list
for N in [10**5, 10**6, 10**7]:
list_func = [(list1, '[None] * N'), (list2,
'[None for _ in range(N)]')]
for func, func_name in list_func:
record = do(record, func, N, 'list1', func_name, N)
list_func = [(list3, 'append()'), (list4, 'A[i] = i'),
(list5, '[i for i in range(N)]')]
for func, func_name in list_func:
record = do(record, func, N, 'list2', func_name, N)
for N in [10**3]:
list_func = [(list6, '[[None] * N for _ in range(N)]'),
(list7, '[[None for _ in range(N)] for _ in range(N)]')]
for func, func_name in list_func:
record = do(record, func, N, 'list3', func_name, N)
# output
record['time'] *= 1000
record.to_csv(args.out)
if __name__ == '__main__':
main()
| [
"pandas.DataFrame",
"os.remove",
"argparse.ArgumentParser",
"random.random",
"pathlib.Path",
"numpy.array",
"operator.itemgetter"
] | [((163, 182), 'pathlib.Path', 'pathlib.Path', (['"""tmp"""'], {}), "('tmp')\n", (175, 182), False, 'import pathlib\n'), ((473, 489), 'os.remove', 'os.remove', (['"""tmp"""'], {}), "('tmp')\n", (482, 489), False, 'import os\n'), ((1493, 1518), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1516, 1518), False, 'import argparse\n'), ((1739, 1804), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['time', 'exp', 'func', 'param1', 'param2']"}), "(columns=['time', 'exp', 'func', 'param1', 'param2'])\n", (1751, 1804), True, 'import pandas as pd\n'), ((2922, 2937), 'random.random', 'random.random', ([], {}), '()\n', (2935, 2937), False, 'import random\n'), ((2966, 2981), 'random.random', 'random.random', ([], {}), '()\n', (2979, 2981), False, 'import random\n'), ((679, 692), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (689, 692), False, 'from operator import itemgetter\n'), ((814, 827), 'operator.itemgetter', 'itemgetter', (['(1)'], {}), '(1)\n', (824, 827), False, 'from operator import itemgetter\n'), ((1973, 2040), 'numpy.array', 'np.array', (['[data, [exp] * N, [func] * N, [param1] * N, [param2] * N]'], {}), '([data, [exp] * N, [func] * N, [param1] * N, [param2] * N])\n', (1981, 2040), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 22:14:14 2018
@author: <NAME>
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder
from scipy import stats
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix
#%%
data = pd.read_csv("D:\\Hackathons\\Promotion\\train_LZdllcl.csv")
data.shape
data.info()
data.describe()
#%%
g = sns.FacetGrid(data, col = "is_promoted")
g.map(plt.hist, "avg_training_score")
#%%
pd.crosstab(data["gender"], data["is_promoted"]).plot(kind='bar')
#%%
numeric_columns = ["no_of_trainings", "age", "length_of_service", "avg_training_score"]
data["education"] = np.where(data["age"] > 40, "Does not matter", data["education"])
data["education"] = np.where(data["length_of_service"] > 5, "Does not matter", data["education"])
data["recruitment_channel"] = np.where(data["length_of_service"] > 5, "Does not matter", data["recruitment_channel"])
data.isnull().sum(axis = 0)
data["education"] = data["education"].fillna("Missing")
data["previous_year_rating"] = data["previous_year_rating"].fillna(3.0)
#%%
#data = data[(np.abs(stats.zscore(data[numeric_columns])) < 3).all(axis=1)]
#%%
#Changing the departments
data["department"] = np.where(data["department"].isin(['Finance', 'HR', 'Legal']), "Operational", data["department"])
data["department"] = np.where(data["department"].isin(['Analytics']), "Technology", data["department"])
#%%
processed_data = data[numeric_columns]
processed_data["department"] = LabelEncoder().fit(data["department"]).transform(data["department"])
processed_data["region"] = LabelEncoder().fit(data["region"]).transform(data["region"])
processed_data["education"] = LabelEncoder().fit(data["education"]).transform(data["education"])
processed_data["gender"] = LabelEncoder().fit(data["gender"]).transform(data["gender"])
processed_data["recruitment_channel"] = LabelEncoder().fit(data["recruitment_channel"]).transform(data["recruitment_channel"])
processed_data["is_promoted"] = data["is_promoted"]
processed_data["previous_year_rating"] = data["previous_year_rating"]
processed_data["length_of_service"] = data["length_of_service"]
processed_data["awards_won?"] = data["awards_won?"]
processed_data["KPIs_met"] = data["KPIs_met >80%"]
#%%
result = processed_data["is_promoted"]
processed_data = processed_data.drop(columns = ["is_promoted"])
#%%
x_train, x_test, y_train, y_test = train_test_split(processed_data, result)
#%%
model = DecisionTreeClassifier()
model.fit(x_train, y_train)
prediction = model.predict(x_test)
score = f1_score(y_test, prediction)
#%%
#For submission
submission_data = pd.read_csv("D:\\Hackathons\\Promotion\\test_2umaH9m.csv")
#%%
submission_data["education"] = np.where(submission_data["age"] > 40, "Does not matter", submission_data["education"])
submission_data["education"] = np.where(submission_data["length_of_service"] > 10, "Does not matter", submission_data["education"])
submission_data["recruitment_channel"] = np.where(submission_data["length_of_service"] > 5, "Does not matter", submission_data["recruitment_channel"])
submission_data["education"] = submission_data["education"].fillna("Missing")
submission_data["previous_year_rating"] = submission_data["previous_year_rating"].fillna(3.0)
sub_pd = submission_data[numeric_columns]
#%%
submission_data["department"] = np.where(submission_data["department"].isin(['Finance', 'HR', 'Legal']), "Operational", submission_data["department"])
submission_data["department"] = np.where(submission_data["department"].isin(['Analytics']), "Technology", submission_data["department"])
sub_pd["department"] = LabelEncoder().fit(data["department"]).transform(submission_data["department"])
sub_pd["region"] = LabelEncoder().fit(data["region"]).transform(submission_data["region"])
sub_pd["education"] = LabelEncoder().fit(data["education"]).transform(submission_data["education"])
sub_pd["gender"] = LabelEncoder().fit(data["gender"]).transform(submission_data["gender"])
sub_pd["recruitment_channel"] = LabelEncoder().fit(data["recruitment_channel"]).transform(submission_data["recruitment_channel"])
sub_pd["previous_year_rating"] = submission_data["previous_year_rating"]
sub_pd["length_of_service"] = submission_data["length_of_service"]
sub_pd["awards_won?"] = submission_data["awards_won?"]
sub_pd["KPIs_met"] = submission_data["KPIs_met >80%"]
#%%
model.fit(processed_data, result)
final_submission = pd.DataFrame(submission_data["employee_id"])
final_submission ["is_promoted"] = model.predict(sub_pd)
final_submission.to_csv("D:\\Hackathons\\Promotion\\submission_08_DT.csv", index = False)
| [
"pandas.DataFrame",
"sklearn.cross_validation.train_test_split",
"pandas.crosstab",
"pandas.read_csv",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.preprocessing.LabelEncoder",
"sklearn.metrics.f1_score",
"numpy.where",
"seaborn.FacetGrid"
] | [((746, 805), 'pandas.read_csv', 'pd.read_csv', (['"""D:\\\\Hackathons\\\\Promotion\\\\train_LZdllcl.csv"""'], {}), "('D:\\\\Hackathons\\\\Promotion\\\\train_LZdllcl.csv')\n", (757, 805), True, 'import pandas as pd\n'), ((860, 898), 'seaborn.FacetGrid', 'sns.FacetGrid', (['data'], {'col': '"""is_promoted"""'}), "(data, col='is_promoted')\n", (873, 898), True, 'import seaborn as sns\n'), ((1135, 1199), 'numpy.where', 'np.where', (["(data['age'] > 40)", '"""Does not matter"""', "data['education']"], {}), "(data['age'] > 40, 'Does not matter', data['education'])\n", (1143, 1199), True, 'import numpy as np\n'), ((1221, 1298), 'numpy.where', 'np.where', (["(data['length_of_service'] > 5)", '"""Does not matter"""', "data['education']"], {}), "(data['length_of_service'] > 5, 'Does not matter', data['education'])\n", (1229, 1298), True, 'import numpy as np\n'), ((1332, 1424), 'numpy.where', 'np.where', (["(data['length_of_service'] > 5)", '"""Does not matter"""', "data['recruitment_channel']"], {}), "(data['length_of_service'] > 5, 'Does not matter', data[\n 'recruitment_channel'])\n", (1340, 1424), True, 'import numpy as np\n'), ((2924, 2964), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['processed_data', 'result'], {}), '(processed_data, result)\n', (2940, 2964), False, 'from sklearn.cross_validation import train_test_split\n'), ((2981, 3005), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (3003, 3005), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((3082, 3110), 'sklearn.metrics.f1_score', 'f1_score', (['y_test', 'prediction'], {}), '(y_test, prediction)\n', (3090, 3110), False, 'from sklearn.metrics import f1_score\n'), ((3154, 3212), 'pandas.read_csv', 'pd.read_csv', (['"""D:\\\\Hackathons\\\\Promotion\\\\test_2umaH9m.csv"""'], {}), "('D:\\\\Hackathons\\\\Promotion\\\\test_2umaH9m.csv')\n", (3165, 3212), True, 'import pandas as pd\n'), ((3252, 3343), 'numpy.where', 'np.where', (["(submission_data['age'] > 40)", '"""Does not matter"""', "submission_data['education']"], {}), "(submission_data['age'] > 40, 'Does not matter', submission_data[\n 'education'])\n", (3260, 3343), True, 'import numpy as np\n'), ((3371, 3475), 'numpy.where', 'np.where', (["(submission_data['length_of_service'] > 10)", '"""Does not matter"""', "submission_data['education']"], {}), "(submission_data['length_of_service'] > 10, 'Does not matter',\n submission_data['education'])\n", (3379, 3475), True, 'import numpy as np\n'), ((3516, 3629), 'numpy.where', 'np.where', (["(submission_data['length_of_service'] > 5)", '"""Does not matter"""', "submission_data['recruitment_channel']"], {}), "(submission_data['length_of_service'] > 5, 'Does not matter',\n submission_data['recruitment_channel'])\n", (3524, 3629), True, 'import numpy as np\n'), ((4981, 5025), 'pandas.DataFrame', 'pd.DataFrame', (["submission_data['employee_id']"], {}), "(submission_data['employee_id'])\n", (4993, 5025), True, 'import pandas as pd\n'), ((950, 998), 'pandas.crosstab', 'pd.crosstab', (["data['gender']", "data['is_promoted']"], {}), "(data['gender'], data['is_promoted'])\n", (961, 998), True, 'import pandas as pd\n'), ((2002, 2016), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2014, 2016), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2099, 2113), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2111, 2113), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2191, 2205), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2203, 2205), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2286, 2300), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2298, 2300), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2388, 2402), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (2400, 2402), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((4170, 4184), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (4182, 4184), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((4270, 4284), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (4282, 4284), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((4365, 4379), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (4377, 4379), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((4463, 4477), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (4475, 4477), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((4568, 4582), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (4580, 4582), False, 'from sklearn.preprocessing import LabelEncoder\n')] |
import numpy as np
from annb.templates import *
class BatchSGD(Optimizer):
def __init__(self, learning_rate: float = None, decay: float = None, momentum: float = None) -> None:
super().__init__(learning_rate, decay, momentum)
self.manager = None
@staticmethod
def optimize(layer: Layer, learning_rate: float) -> None:
"""Optimiert Gewichte und Bias von gegebenem Layer"""
layer._weights -= learning_rate * layer._dw
layer._bias -= learning_rate * layer._db
def train(self, network, iterations: int, dataset: [np.array, np.array]) -> None:
"""Trainiert Das Netzwerk"""
if self._initial_lr is not None and self._decay is not None: # Prüfen, ob es _initial_lr und _decay überhaupt gibt
for layer in network:
# Die Attribute werden hier auf diese Weise erstellt anstatt sie direkt im Konsttruktor vom Layer zu erstellen,
# weil jeder optimizer andere Attribute braucht. Deshalb ist es einfacher wenn der Opimizer sie selbst erstellt
# Die Idee dazu Stammt aus "Neural Networks from Scratch in Python"
setattr(layer.layer, "w_mom", np.zeros_like(layer.layer._weights)) # Ein Attribut w_mom in layer erstellen.
setattr(layer.layer, "b_mom", np.zeros_like(layer.layer._bias)) # Ein Attribut b_mom in layer erstellen.
for i in range(iterations):
self.manager.guess(dataset[0])
self.manager.calc_gradient(dataset[1])
# Art Die Lernrate zu verkleinern aus "Neural Networks from Scratch in Python" übernommen.
self._lr = self._initial_lr * (1 / (1 + self._decay * i))
for layer in network:
if self._momentum:
w_mom = self._momentum * layer.layer.w_mom - self._lr * layer.layer._dw
b_mom = self._momentum * layer.layer.b_mom - self._lr * layer.layer._db
layer.layer.w_mom = w_mom
layer.layer.b_mom = b_mom
layer.layer._weights += w_mom
layer.layer._bias += b_mom
else:
layer.layer.optimize(self, self._lr)
#TODD: In eigene methode printer verschieben
print(f"Iteration:\t\t{i}")
print(f"LR:\t\t{self._lr}")
print(f"Accuracy:\t\t{self.manager.calc_acc(dataset[1])}")
print(f"Loss:\t\t{np.mean(self.manager.calc_loss(dataset[1]))}\n")
| [
"numpy.zeros_like"
] | [((1195, 1230), 'numpy.zeros_like', 'np.zeros_like', (['layer.layer._weights'], {}), '(layer.layer._weights)\n', (1208, 1230), True, 'import numpy as np\n'), ((1320, 1352), 'numpy.zeros_like', 'np.zeros_like', (['layer.layer._bias'], {}), '(layer.layer._bias)\n', (1333, 1352), True, 'import numpy as np\n')] |
from itertools import product
import cv2
import numpy as np
import aoc_helper
from aoc_helper.utils import extract_ints
class Scanner:
def __init__(self, coords):
self.coords = coords
self.distances = np.array(
list(map(set, np.linalg.norm(coords[:, None] - coords[None], axis=-1)))
)
def parse_raw():
raw = aoc_helper.day(19).split("\n\n")
scanners = [ ]
for scanner in raw:
_, data = scanner.split("\n", 1)
scanners.append(
Scanner(
np.fromiter(extract_ints(data), dtype=int).reshape(-1, 3)
)
)
return scanners
SCANNERS = parse_raw()
def coalesce(a, b):
js, ks = [ ], [ ]
for (j, p), (k, q) in product(
enumerate(a.distances),
enumerate(b.distances),
):
if len(p & q) >= 12:
js.append(j)
ks.append(k)
if len(js) == 4:
break
else:
return False
M = cv2.estimateAffine3D(b.coords[ks], a.coords[js])[1].round().astype(int)
orientation, translation = M[:, :3], M[:, 3]
transformed = b.coords @ orientation.T + translation
check = (a.coords[:, None] == transformed[None]).all(-1)
where_a_equal_b, where_b_equal_a = np.where(check)
b_not_equal_a_mask = ~check.any(0)
a.distances[where_a_equal_b] |= b.distances[where_b_equal_a]
a.distances = np.concatenate((a.distances, b.distances[b_not_equal_a_mask]))
a.coords = np.concatenate((a.coords, transformed[b_not_equal_a_mask]))
a.scanners.append(translation)
return True
def coalesce_all():
origin = SCANNERS[0]
origin.scanners = [np.zeros(3, dtype=int)]
unpaired = SCANNERS[1:]
while unpaired:
unpaired = [
scanner
for scanner in unpaired
if not coalesce(origin, scanner)
]
return origin
ORIGIN = coalesce_all()
def part_one():
return len(ORIGIN.coords)
def part_two():
scanners = np.array(ORIGIN.scanners)
return np.abs(scanners[:, None] - scanners[None]).sum(axis=-1).max()
aoc_helper.submit(19, part_one)
aoc_helper.submit(19, part_two)
| [
"aoc_helper.submit",
"aoc_helper.day",
"numpy.abs",
"aoc_helper.utils.extract_ints",
"numpy.zeros",
"numpy.where",
"numpy.array",
"numpy.linalg.norm",
"cv2.estimateAffine3D",
"numpy.concatenate"
] | [((2092, 2123), 'aoc_helper.submit', 'aoc_helper.submit', (['(19)', 'part_one'], {}), '(19, part_one)\n', (2109, 2123), False, 'import aoc_helper\n'), ((2124, 2155), 'aoc_helper.submit', 'aoc_helper.submit', (['(19)', 'part_two'], {}), '(19, part_two)\n', (2141, 2155), False, 'import aoc_helper\n'), ((1266, 1281), 'numpy.where', 'np.where', (['check'], {}), '(check)\n', (1274, 1281), True, 'import numpy as np\n'), ((1405, 1467), 'numpy.concatenate', 'np.concatenate', (['(a.distances, b.distances[b_not_equal_a_mask])'], {}), '((a.distances, b.distances[b_not_equal_a_mask]))\n', (1419, 1467), True, 'import numpy as np\n'), ((1483, 1542), 'numpy.concatenate', 'np.concatenate', (['(a.coords, transformed[b_not_equal_a_mask])'], {}), '((a.coords, transformed[b_not_equal_a_mask]))\n', (1497, 1542), True, 'import numpy as np\n'), ((1992, 2017), 'numpy.array', 'np.array', (['ORIGIN.scanners'], {}), '(ORIGIN.scanners)\n', (2000, 2017), True, 'import numpy as np\n'), ((1665, 1687), 'numpy.zeros', 'np.zeros', (['(3)'], {'dtype': 'int'}), '(3, dtype=int)\n', (1673, 1687), True, 'import numpy as np\n'), ((358, 376), 'aoc_helper.day', 'aoc_helper.day', (['(19)'], {}), '(19)\n', (372, 376), False, 'import aoc_helper\n'), ((261, 316), 'numpy.linalg.norm', 'np.linalg.norm', (['(coords[:, None] - coords[None])'], {'axis': '(-1)'}), '(coords[:, None] - coords[None], axis=-1)\n', (275, 316), True, 'import numpy as np\n'), ((2029, 2071), 'numpy.abs', 'np.abs', (['(scanners[:, None] - scanners[None])'], {}), '(scanners[:, None] - scanners[None])\n', (2035, 2071), True, 'import numpy as np\n'), ((986, 1034), 'cv2.estimateAffine3D', 'cv2.estimateAffine3D', (['b.coords[ks]', 'a.coords[js]'], {}), '(b.coords[ks], a.coords[js])\n', (1006, 1034), False, 'import cv2\n'), ((550, 568), 'aoc_helper.utils.extract_ints', 'extract_ints', (['data'], {}), '(data)\n', (562, 568), False, 'from aoc_helper.utils import extract_ints\n')] |
# -*- coding: utf-8 -*-
# Parser for PreCo dataset
#
# Author: <NAME> <<EMAIL>>
#
# For license information, see LICENSE
import gc
from collections import defaultdict, namedtuple
from enum import Enum
from itertools import chain
from multiprocessing import Pool
from typing import DefaultDict, List, Tuple
import numpy as np
import pandas as pd
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.utils import to_categorical
from nltk import pos_tag
from nltk.data import load
from progress.bar import IncrementalBar
import spacy
from neuralcorefres.feature_extraction.stanford_parse_api import StanfordParseAPI
EMBEDDING_DIM = 300
Cluster = List[str]
Tensor = List[float]
ClusterIndicies = namedtuple('ClusterIndicies', 'sent_idx begin_idx end_idx')
ClusteredSentence = namedtuple('ClusteredSentence', 'sentence clusters')
ClusteredDictKey = namedtuple('ClusteredDictKey', 'id sentence_index sentence')
SPACY_DEP_TAGS = ['acl', 'acomp', 'advcl', 'advmod', 'agent', 'amod', 'appos', 'attr', 'aux', 'auxpass', 'case', 'cc', 'ccomp', 'compound', 'conj', 'cop', 'csubj', 'csubjpass', 'dative', 'dep', 'det', 'dobj', 'expl',
'intj', 'mark', 'meta', 'neg', 'nn', 'nounmod', 'npmod', 'nsubj', 'nsubjpass', 'nummod', 'oprd', 'obj', 'obl', 'parataxis', 'pcomp', 'pobj', 'poss', 'preconj', 'prep', 'prt', 'punct', 'quantmod', 'relcl', 'root', 'xcomp']
nlp = spacy.load('en_core_web_sm')
POS_ONE_HOT_LEN = 45
class PreCoDataType(Enum):
TRAIN = 0
TEST = 1
class EntityCluster:
def __init__(self, entity: Cluster, indices: ClusterIndicies):
self.entity = entity
self.indices = ClusterIndicies(*indices)
def __str__(self):
return f'{self.entity} | {self.indices}'
class PreCoCoreferenceDatapoint:
def __init__(self, id, sents: List[Cluster], sorted_entity_clusters: EntityCluster):
self.id = id
self.sents = sents
self.sorted_entity_clusters = self._get_sorted_clusters(sorted_entity_clusters)
def _get_sorted_clusters(self, clusters) -> List[EntityCluster]:
return sorted(clusters, key=lambda cluster: cluster.indices.sent_idx)
@staticmethod
def parse_sorted_entity_clusters(sentences: List[List[str]], sorted_entity_clusters: List[List[List[int]]]):
"""
Per the PreCo website, mention clusters are in the following form:
[ [ [ sentence_idx, begin_idx, end_idx ] ] ]
Where the end index is one past the last word in the cluster, and all
indicies are zero-based.
Example:
Sentences:
[
[ 'Charlie', 'had', 'fun', 'at', 'the', 'park', '.' ],
[ 'He', 'slid', 'down', 'the', 'slide', '.' ]
]
Mention Clusters:
[
[ [0, 0, 1], [1, 0, 1] ], // Charlie, he
[ [0, 5, 6] ], // park
[ [1, 4, 5] ] // slide
]
"""
clusters = [[EntityCluster(sentences[sent_idx][begin_idx:end_idx], (sent_idx, begin_idx, end_idx))
for sent_idx, begin_idx, end_idx in cluster][0] for cluster in sorted_entity_clusters]
return clusters
def __str__(self):
sub_strs = '\t' + '\n\t'.join([str(cluster) for cluster in self.sorted_entity_clusters])
return f'{self.id}\n{sub_strs}'
_BASE_FILEPATH = '../data/PreCo_1.0/'
_FILE_TYPES = {
PreCoDataType.TRAIN: 'train.json',
PreCoDataType.TEST: 'dev.json'
}
class PreCoParser:
@staticmethod
def get_pos_onehot_map():
return pd.get_dummies(list(load('help/tagsets/upenn_tagset.pickle').keys()))
@staticmethod
def get_spacy_deps_onehot():
return pd.get_dummies(SPACY_DEP_TAGS)
@staticmethod
def get_preco_data(data_type: PreCoDataType, basepath: str = _BASE_FILEPATH, class_type: PreCoCoreferenceDatapoint = PreCoCoreferenceDatapoint):
ret_lst = []
full_filepath = basepath + _FILE_TYPES[data_type]
df = pd.read_json(full_filepath, lines=True, encoding='ascii')
bar = IncrementalBar('*\tReading and creating objects from PreCo dataset', max=len(df))
for index, el in df.iterrows():
ret_lst.append((el[u'sentences'], el[u'mention_clusters']))
bar.next()
gc.collect()
return ret_lst
@staticmethod
def flatten_tokenized(sents: List[PreCoCoreferenceDatapoint]):
""" Flattens tokenized lists of PreCo datapoints. """
return [tokens for sentences in [sent.sentences for sent in sents] for tokens in sentences]
@staticmethod
def get_embedding_for_sent(sent: List[str], embedding_model) -> List[Tensor]:
""" Get embeddings as array of embeddings. """
return embedding_model.get_embeddings(sent)
@staticmethod
def get_pos_onehot_map_for_sent(sent: List[str], pos_onehot) -> List[Tensor]:
""" Get POS as array of one-hot arrays. In same order as words from sentence (if used correctly). """
return np.asarray([pos_onehot[p].to_numpy() if p in pos_onehot.keys() else np.zeros(len(pos_onehot.keys())) for p in list(zip(*pos_tag(sent)))[1]])
@staticmethod
def get_dep_embeddings(sent: List[str], embedding_model) -> List[Tensor]:
doc = spacy.tokens.doc.Doc(nlp.vocab, words=sent)
for name, proc in nlp.pipeline:
doc = proc(doc)
assert len(doc) == len(sent)
sorted_deps = [token.head.text for token in doc]
return PreCoParser.get_embedding_for_sent(sorted_deps, embedding_model)
@staticmethod
def get_dep_distances(sent: List[str]):
doc = spacy.tokens.doc.Doc(nlp.vocab, words=sent)
for name, proc in nlp.pipeline:
doc = proc(doc)
assert len(doc) == len(sent)
sorted_deps = [token.head.text for token in doc]
print(sorted_deps)
@staticmethod
def get_deps_onehot(sent: List[str]):
deps_onehot = PreCoParser.get_spacy_deps_onehot()
doc = spacy.tokens.doc.Doc(nlp.vocab, words=sent)
for name, proc in nlp.pipeline:
doc = proc(doc)
assert len(doc) == len(sent)
return np.asarray([deps_onehot[p].to_numpy() if p in deps_onehot.keys() else np.zeros(len(deps_onehot.keys())) for p in [token.dep_ for token in doc]])
@staticmethod
def pad_1d_tensor(t, maxlen=EMBEDDING_DIM):
return sequence.pad_sequences([t], maxlen=EMBEDDING_DIM, dtype='float16', padding='post')[0]
@staticmethod
def _invalid_data(sentence_embeddings: Tensor, dep_embeddings: Tensor, curr_sent: List[str], maxinputlen: int, maxoutputlen: int) -> bool:
return sentence_embeddings.shape[0] == 0 or sentence_embeddings.shape[0] != len(curr_sent) \
or dep_embeddings.shape != sentence_embeddings.shape or len(curr_sent) > maxinputlen or len(dep_embeddings) > maxinputlen
@staticmethod
def prep_for_nn(preco_data: List[PreCoCoreferenceDatapoint]) -> DefaultDict[str, List[EntityCluster]]:
"""
Returns a dictionary with key: ClusteredDictKey(example_id, sent_idx) and value:
list of entity clusters for the given sentence with the example.
Example using __str__ on EntityCluster for visualization purposes:
{
dev_00001_0: [
['anything', 'else', 'you', 'need'] | ClusterIndicies(sent_idx=0, begin_idx=3, end_idx=7),
],
dev_00001_1: [
['three', 'twenty', 'dollar', 'bills'] | ClusterIndicies(sent_idx=1, begin_idx=7, end_idx=11)
['twenty', 'dollar'] | ClusterIndicies(sent_idx=1, begin_idx=8, end_idx=10)
['my', 'hand'] | ClusterIndicies(sent_idx=1, begin_idx=12, end_idx=14)
]
}
"""
organized_data = defaultdict(list)
for dp in preco_data:
[organized_data[ClusteredDictKey(dp.id, cluster.indices.sent_idx, tuple(
dp.sents[cluster.indices.sent_idx]))].append(cluster) for cluster in dp.sorted_entity_clusters]
return organized_data
@staticmethod
def get_train_data(data: DefaultDict[ClusteredDictKey, PreCoCoreferenceDatapoint], maxinputlen: int, maxoutputlen: int, embedding_model) -> Tuple[List[Tensor], List[Tensor]]:
"""
(n_samples, n_words, n_attributes (word embedding, pos, etc))
[ [ [ word_embedding, pos ] ] ]
xtrain[sentence_sample][word_position][attribute]
xtrain[37][5] -> sixth word's attributes in 38th sentence (np.ndarray containing two np.ndarrays)
xtrain[0][0][0] -> word_embedding (np.ndarray)
xtrain[0][0][1] -> pos one-hot encoding (np.ndarray)
"""
xtrain = np.empty((len(data), maxinputlen, 4, EMBEDDING_DIM))
ytrain = []
pos_onehot = PreCoParser.get_pos_onehot_map()
deps_onehot = PreCoParser.get_spacy_deps_onehot()
bar = IncrementalBar('*\tParsing data into xtrain, ytrain', max=len(data))
for sent_ndx, (key, value) in enumerate(data.items()):
curr_sent = key.sentence
sentence_embeddings = PreCoParser.get_embedding_for_sent(curr_sent, embedding_model)
sent_pos = PreCoParser.get_pos_onehot_map_for_sent(curr_sent, pos_onehot)
dep_embeddings = PreCoParser.get_dep_embeddings(curr_sent, embedding_model)
deps = PreCoParser.get_deps_onehot(curr_sent)
if PreCoParser._invalid_data(sentence_embeddings, dep_embeddings, curr_sent, maxinputlen, maxoutputlen):
# Unusable data
bar.next()
continue
assert len(curr_sent) == sentence_embeddings.shape[0]
assert sentence_embeddings.shape == dep_embeddings.shape
assert deps.shape[0] == dep_embeddings.shape[0]
assert sentence_embeddings.shape[0] == sent_pos.shape[0]
for word_ndx in range(len(sentence_embeddings)):
xtrain[sent_ndx][word_ndx][0] = sentence_embeddings[word_ndx]
xtrain[sent_ndx][word_ndx][1] = dep_embeddings[word_ndx]
xtrain[sent_ndx][word_ndx][2] = PreCoParser.pad_1d_tensor(sent_pos[word_ndx])
xtrain[sent_ndx][word_ndx][3] = PreCoParser.pad_1d_tensor(deps[word_ndx])
cluster_indices = list(sum([cluster.indices for cluster in value], ()))
# Delete every third element to remove sentence index
del cluster_indices[0::3]
assert len(cluster_indices) % 2 == 0
cluster_indices = sequence.pad_sequences(
[cluster_indices], maxlen=maxoutputlen, dtype='float16', padding='post')[0]
assert cluster_indices.shape == (maxoutputlen,)
ytrain.append(np.asarray(cluster_indices) / len(curr_sent))
bar.next()
gc.collect()
ytrain = np.asarray(ytrain, dtype='float16')
assert ytrain[0].shape == (maxoutputlen,)
return (xtrain, ytrain)
def main():
data = PreCoParser.prep_for_nn(data)
xtrain, ytrain = PreCoParser.get_train_data(data)
if __name__ == '__main__':
main()
| [
"spacy.tokens.doc.Doc",
"nltk.data.load",
"pandas.get_dummies",
"numpy.asarray",
"pandas.read_json",
"collections.defaultdict",
"spacy.load",
"gc.collect",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"collections.namedtuple",
"nltk.pos_tag"
] | [((719, 778), 'collections.namedtuple', 'namedtuple', (['"""ClusterIndicies"""', '"""sent_idx begin_idx end_idx"""'], {}), "('ClusterIndicies', 'sent_idx begin_idx end_idx')\n", (729, 778), False, 'from collections import defaultdict, namedtuple\n'), ((799, 851), 'collections.namedtuple', 'namedtuple', (['"""ClusteredSentence"""', '"""sentence clusters"""'], {}), "('ClusteredSentence', 'sentence clusters')\n", (809, 851), False, 'from collections import defaultdict, namedtuple\n'), ((871, 931), 'collections.namedtuple', 'namedtuple', (['"""ClusteredDictKey"""', '"""id sentence_index sentence"""'], {}), "('ClusteredDictKey', 'id sentence_index sentence')\n", (881, 931), False, 'from collections import defaultdict, namedtuple\n'), ((1397, 1425), 'spacy.load', 'spacy.load', (['"""en_core_web_sm"""'], {}), "('en_core_web_sm')\n", (1407, 1425), False, 'import spacy\n'), ((3682, 3712), 'pandas.get_dummies', 'pd.get_dummies', (['SPACY_DEP_TAGS'], {}), '(SPACY_DEP_TAGS)\n', (3696, 3712), True, 'import pandas as pd\n'), ((3973, 4030), 'pandas.read_json', 'pd.read_json', (['full_filepath'], {'lines': '(True)', 'encoding': '"""ascii"""'}), "(full_filepath, lines=True, encoding='ascii')\n", (3985, 4030), True, 'import pandas as pd\n'), ((4272, 4284), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4282, 4284), False, 'import gc\n'), ((5242, 5285), 'spacy.tokens.doc.Doc', 'spacy.tokens.doc.Doc', (['nlp.vocab'], {'words': 'sent'}), '(nlp.vocab, words=sent)\n', (5262, 5285), False, 'import spacy\n'), ((5606, 5649), 'spacy.tokens.doc.Doc', 'spacy.tokens.doc.Doc', (['nlp.vocab'], {'words': 'sent'}), '(nlp.vocab, words=sent)\n', (5626, 5649), False, 'import spacy\n'), ((5973, 6016), 'spacy.tokens.doc.Doc', 'spacy.tokens.doc.Doc', (['nlp.vocab'], {'words': 'sent'}), '(nlp.vocab, words=sent)\n', (5993, 6016), False, 'import spacy\n'), ((7761, 7778), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7772, 7778), False, 'from collections import defaultdict, namedtuple\n'), ((10780, 10792), 'gc.collect', 'gc.collect', ([], {}), '()\n', (10790, 10792), False, 'import gc\n'), ((10810, 10845), 'numpy.asarray', 'np.asarray', (['ytrain'], {'dtype': '"""float16"""'}), "(ytrain, dtype='float16')\n", (10820, 10845), True, 'import numpy as np\n'), ((6365, 6452), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['[t]'], {'maxlen': 'EMBEDDING_DIM', 'dtype': '"""float16"""', 'padding': '"""post"""'}), "([t], maxlen=EMBEDDING_DIM, dtype='float16', padding=\n 'post')\n", (6387, 6452), False, 'from tensorflow.keras.preprocessing import sequence\n'), ((10499, 10599), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['[cluster_indices]'], {'maxlen': 'maxoutputlen', 'dtype': '"""float16"""', 'padding': '"""post"""'}), "([cluster_indices], maxlen=maxoutputlen, dtype=\n 'float16', padding='post')\n", (10521, 10599), False, 'from tensorflow.keras.preprocessing import sequence\n'), ((10702, 10729), 'numpy.asarray', 'np.asarray', (['cluster_indices'], {}), '(cluster_indices)\n', (10712, 10729), True, 'import numpy as np\n'), ((3565, 3605), 'nltk.data.load', 'load', (['"""help/tagsets/upenn_tagset.pickle"""'], {}), "('help/tagsets/upenn_tagset.pickle')\n", (3569, 3605), False, 'from nltk.data import load\n'), ((5110, 5123), 'nltk.pos_tag', 'pos_tag', (['sent'], {}), '(sent)\n', (5117, 5123), False, 'from nltk import pos_tag\n')] |
# contains computational functions that are used for modelling
import torch as T
from torch.autograd import Variable
from torch import Tensor
from torch.nn.functional import softmax
import numpy as np
def masked_softmax(scores, mask=None, dim=-1):
"""
Normalizes scores using masked softmax operation.
:param scores: [batch_size, seq_len, *, 1]
:param mask: [batch_size, seq_len, *]
:param dim: over which dimension to perform normalization.
:return: normalized scores
"""
scores[mask == 0.] = np.float('-inf')
probs = softmax(scores, dim=dim)
return probs
def kld_gauss(mu_q, sigma_q, mu_p, sigma_p, dim=1, eps=0.):
"""
Computes the Kullback-Leibler divergence between two Gaussian distributions.
Namely, KL[N(z; mu_q, sigma_q) || N(mu_p, sigma_p)]. It's assumed that both
sigmas are diagonal matrices.
:param mu_q: [batch_size, *]
:param sigma_q: [batch_size, *]
:param mu_p: [batch_size, *]
:param sigma_p: [batch_size, *]
:param eps: constant to avoid log of zero.
:return [batch_size, *]
"""
d = mu_q.shape[dim]
sigma_p_inv = (sigma_p + eps) ** -1
tra = (sigma_p_inv * sigma_q).sum(dim=dim)
quadr = T.sum(sigma_p_inv * ((mu_p - mu_q) ** 2), dim=dim)
log_det_p = T.sum(T.log(sigma_p + eps), dim=dim)
log_det_q = T.sum(T.log(sigma_q + eps), dim=dim)
log_det = log_det_p - log_det_q
return 0.5 * (tra + quadr - d + log_det)
def kld_cat(q, p, dim=-1, eps=0.):
"""Computes KLD[q||p] ; for q and p being categorical distributions.
Args:
q (FloatTensor): [batch_size, *, 2]
p (FloatTensor): [batch_size, *, 2]
dim: dimension to sum over.
eps: to prevent from logs of 0.
Returns:
[batch_size, *]
"""
res = (q * (T.log(q + eps) - T.log(p + eps))).sum(dim=dim)
return res
def kld_normal(mu, sigma, dim=1):
"""KLD from a normal to a Gaussian assuming both have diag. covariances."""
return -0.5 * T.sum(1 + T.log(sigma) - mu.pow(2) - sigma, dim=dim)
def seq_log_prob(log_probs, seq, seq_mask):
"""
Computes the summed log probabilities over seq_lens.
:param log_probs: [batch_size, seq_len, vocab_size]
:param seq: [batch_size, seq_len, vocab_size]
:param seq_mask: [batch_size, seq_len]
:return: log_probs [batch_size, seq_len]
"""
bs, sl = log_probs.shape[:2]
# selecting log probs for specific (true) tokens
log_probs = log_probs[T.arange(bs, dtype=T.int64).reshape((-1, 1)),
T.arange(sl, dtype=T.int64),
seq]
log_probs = (log_probs * seq_mask).sum(dim=1)
return log_probs
def re_parameterize(mu, sigma):
"""Performs the re-parametrization based sampling from a Gaussian distr."""
std = sigma ** 0.5
eps = T.randn_like(std)
return mu + eps * std
def entropy(q, dim=-1, eps=1e-6):
"""
Computes entropy of categorical distribution.
:param q: [batch_size, len]
:param eps: self-explanatory.
"""
log_q = T.log(q + eps)
entr = - T.sum(q * log_q, dim=dim)
return entr
def kld_approx(q_word_log_probs, p_word_log_probs, mask):
"""
Computes an approximate KLD over summaries, where history is sampled
but the actual support is summed over. Distributions are assumed to be
categorical.
:param q_word_log_probs: [summs_nr, seq_len, vocab_size]
log probabilities over all words (except first <S>)
assigned by the summarizer (q) distribution.
:param p_word_log_probs [summs_nr, seq_len, vocab_size]
log probabilities assigned by the prior
:param mask: [summs_nr, seq_len]
"""
q_word_probs = q_word_log_probs.exp()
kld = q_word_probs * (q_word_log_probs - p_word_log_probs)
kld = kld.sum(-1) # summing over vocabulary
kld = (kld * mask).sum(-1) # summing over sequences
return kld
def sample_dropout_mask(shape, device, dropout_prob, training=True):
if training:
mask = T.empty(shape, device=device).bernoulli(1. - dropout_prob)
else:
mask = T.full(shape, 1. - dropout_prob, device=device)
return mask
def get_curr_budget(len_budget, curr_step):
"""
Returns current budget (floats) a model has to finish generation of seqs.
:param curr_step: scalar or [seq_len] scalars
:param len_budget: tensor [batch_size] defining how many
tokens a model should generate.
"""
if isinstance(curr_step, Tensor):
assert len(curr_step.shape) == 1
curr_step = curr_step.unsqueeze(0)
len_budget = len_budget.unsqueeze(-1)
curr_budget = len_budget - curr_step
curr_budget = curr_budget.float()
return curr_budget
def get_summ_budget(revs_mask, summ_rev_indxs, summ_rev_indxs_mask):
"""
Returns the summary length budget which is the average of reviews
associated with it. Budget is in integers.
"""
total_len = (revs_mask[summ_rev_indxs].sum(-1) * summ_rev_indxs_mask).sum(
-1)
summ_budget = (total_len / summ_rev_indxs_mask.sum(-1)).long()
return summ_budget
def normalize_att_mask(att_mask, inp_word_hots):
"""Computes a normalized mask by dividing it by frequency of soft words."""
masked_inp_hots = inp_word_hots * att_mask.unsqueeze(-1)
# summing over seq_len to count occurrences of words
soft_weights = masked_inp_hots.sum(dim=1)
soft_weights[soft_weights > 0] = 1. / soft_weights[soft_weights > 0]
new_att_mask = (masked_inp_hots * soft_weights.unsqueeze(1)).sum(-1)
return new_att_mask
def perpl_per_word(nll, lens):
"""Computes length normalized perplexity.
Args:
nll (FloatTensor): negative log-likelihood of sequences
(summed over log probs)
[batch_size]
lens (FloatTensor): lens of sequences.
[batch_size]
Returns:
ppl (FloatTensor): [batch_size]
"""
res = T.exp(nll / lens)
return res
| [
"torch.randn_like",
"torch.empty",
"numpy.float",
"torch.full",
"torch.nn.functional.softmax",
"torch.exp",
"torch.arange",
"torch.sum",
"torch.log"
] | [((529, 545), 'numpy.float', 'np.float', (['"""-inf"""'], {}), "('-inf')\n", (537, 545), True, 'import numpy as np\n'), ((558, 582), 'torch.nn.functional.softmax', 'softmax', (['scores'], {'dim': 'dim'}), '(scores, dim=dim)\n', (565, 582), False, 'from torch.nn.functional import softmax\n'), ((1210, 1258), 'torch.sum', 'T.sum', (['(sigma_p_inv * (mu_p - mu_q) ** 2)'], {'dim': 'dim'}), '(sigma_p_inv * (mu_p - mu_q) ** 2, dim=dim)\n', (1215, 1258), True, 'import torch as T\n'), ((2821, 2838), 'torch.randn_like', 'T.randn_like', (['std'], {}), '(std)\n', (2833, 2838), True, 'import torch as T\n'), ((3046, 3060), 'torch.log', 'T.log', (['(q + eps)'], {}), '(q + eps)\n', (3051, 3060), True, 'import torch as T\n'), ((6004, 6021), 'torch.exp', 'T.exp', (['(nll / lens)'], {}), '(nll / lens)\n', (6009, 6021), True, 'import torch as T\n'), ((1283, 1303), 'torch.log', 'T.log', (['(sigma_p + eps)'], {}), '(sigma_p + eps)\n', (1288, 1303), True, 'import torch as T\n'), ((1336, 1356), 'torch.log', 'T.log', (['(sigma_q + eps)'], {}), '(sigma_q + eps)\n', (1341, 1356), True, 'import torch as T\n'), ((3074, 3099), 'torch.sum', 'T.sum', (['(q * log_q)'], {'dim': 'dim'}), '(q * log_q, dim=dim)\n', (3079, 3099), True, 'import torch as T\n'), ((4155, 4203), 'torch.full', 'T.full', (['shape', '(1.0 - dropout_prob)'], {'device': 'device'}), '(shape, 1.0 - dropout_prob, device=device)\n', (4161, 4203), True, 'import torch as T\n'), ((2543, 2570), 'torch.arange', 'T.arange', (['sl'], {'dtype': 'T.int64'}), '(sl, dtype=T.int64)\n', (2551, 2570), True, 'import torch as T\n'), ((4071, 4100), 'torch.empty', 'T.empty', (['shape'], {'device': 'device'}), '(shape, device=device)\n', (4078, 4100), True, 'import torch as T\n'), ((1796, 1810), 'torch.log', 'T.log', (['(q + eps)'], {}), '(q + eps)\n', (1801, 1810), True, 'import torch as T\n'), ((1813, 1827), 'torch.log', 'T.log', (['(p + eps)'], {}), '(p + eps)\n', (1818, 1827), True, 'import torch as T\n'), ((2471, 2498), 'torch.arange', 'T.arange', (['bs'], {'dtype': 'T.int64'}), '(bs, dtype=T.int64)\n', (2479, 2498), True, 'import torch as T\n'), ((2002, 2014), 'torch.log', 'T.log', (['sigma'], {}), '(sigma)\n', (2007, 2014), True, 'import torch as T\n')] |
import sys
import os
import cv2
import numpy as np
import torch
import torch.nn.functional as F
import math
import copy
from light_cnn import LightCNN_Action_Net
from openpose import pyopenpose as op
from scipy.spatial.distance import euclidean
# ========= Action Classification params =============
nNumAction = 15
# nNumJoint = 25 + 21 + 21
nNumJoint = 8 + 21 + 21
nViewFrame = 5
nCheckFrame = nViewFrame * 3
fActionArr = []
fActionProb = []
sAction = ["bitenail", "covermouth", "fighting", "fingerheart", "fingerok", "foldarms"
,"neutral", "pickear", "restchin", "scratch", "shakehand", "thumbchuck", "touchnose", "waving", "bowing"]
sTendency = [ "Active", "Neutral", "Passive" ]
vAllX = []
vAllY = []
vLHX = []
vLHY = []
vRHX = []
vRHY = []
fVelocityX = 0.
fVelocityY = 0.
# ========= Action Classification params =============
# ========= Openpose params =============
params = dict()
params["model_folder"] = "./models/"
params["face"] = False
params["hand"] = True
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()
# ========= Openpose params =============
# ========= Tendency params =============
avgNeutralJointX = [322.14026178524216, 323.3281960896681
, 277.6280570269355, 265.22208367646823, 261.6198277323165
, 369.14238240576293, 381.8753715723246, 384.12814252086275
, 383.8150782772658, 377.16965215767823, 372.35474501571855, 371.4983903382251, 371.42920561757785
, 382.2229180099679, 378.82329372475, 374.7004049838305, 372.06152992247354
, 386.23766577882594, 382.318045646779, 377.9146361243536, 374.59103746959545
, 388.5917371759107, 385.05130484878816, 380.5643692650183, 377.4295040421212
, 389.13456373253734, 386.456162121497, 383.45871614932923, 381.25423902040205
, 261.06534784463366, 266.6972531766876, 270.331130736252, 270.8570231111664, 271.0723326401262
, 260.0929566000788, 264.0953283027428, 268.5716093939732, 271.32327342868064
, 257.1165693398627, 261.6365024253106, 266.3998209939861, 269.5787362852653
, 255.62472866798808, 259.84006482310855, 264.52176403682466, 267.37121442783734
, 256.07010035496364, 259.17439073593306, 262.3886682265787, 264.53931257396704]
avgNeutralJointY = [187.05623015997642, 250.92758932685655
, 251.01828426255068, 326.396158656766, 398.44751644240466
, 250.45998458556156, 325.98409536873675, 396.96939004778125
, 399.4718000294363, 405.37457221026983, 415.9773905655077, 426.3001862842175, 433.4880929239434
, 424.5302855937776, 435.2456918539938, 438.14686639701415, 439.4700757764309
, 425.23371987919376, 435.28300817741234, 437.4852645445141, 437.91386246124137
, 424.20721414015026, 432.6158489755624, 434.9105444245196, 435.39410204690677
, 422.34410844644253, 429.5531373514435, 431.4419222423643, 432.0382540928608
, 400.9096647112953, 406.9963011366081, 417.58202193347114, 427.61835087832156, 434.67334775218734
, 425.13964785421615, 435.42590991313597, 438.08185959366193, 439.2206478706609
, 425.47184133060625, 435.3077795402021, 436.7916575994253, 436.6014509559256
, 424.5252443643856, 432.7842847999346, 434.2857559218688, 433.88741732793784
, 422.80452726493945, 429.91303461321115, 431.1844964654373, 431.10141673363245]
# ========= Tendency params =============
def getOpenposeSkeletons(cvImg):
datum = op.Datum()
imageToProcess = cvImg
datum.cvInputData = imageToProcess
opWrapper.emplaceAndPop([datum])
return datum
def convertInputJointFormat(opDatum):
poseKeypoint = opDatum.poseKeypoints
lhKeypoint = opDatum.handKeypoints[0]
rhKeypoint = opDatum.handKeypoints[1]
vInputJointX = []
vInputJointY = []
try:
nCenterDif = 999
nCenterIndex = -1
for ii in range(len(poseKeypoint)):
if math.fabs(320. - poseKeypoint[ii][0][0]) < nCenterDif:
nCenterDif = math.fabs(320. - poseKeypoint[ii][0][0])
nCenterIndex = ii
for ii in range(8):
vInputJointX.append(poseKeypoint[nCenterIndex][ii][0])
vInputJointY.append(poseKeypoint[nCenterIndex][ii][1])
for ii in range(len(lhKeypoint[nCenterIndex])):
vInputJointX.append(lhKeypoint[nCenterIndex][ii][0])
vInputJointY.append(lhKeypoint[nCenterIndex][ii][1])
for ii in range(len(rhKeypoint[nCenterIndex])):
vInputJointX.append(rhKeypoint[nCenterIndex][ii][0])
vInputJointY.append(rhKeypoint[nCenterIndex][ii][1])
if nCenterIndex != -1:
return vInputJointX, vInputJointY, poseKeypoint[nCenterIndex][0][0], poseKeypoint[nCenterIndex][0][1]
else:
return vInputJointX, vInputJointY, 0, 0
except Exception as e:
print(e)
return vInputJointX, vInputJointY, 0, 0
# updateVector
def updateJoint(vInputJointX, vInputJointY):
global vAllX, vAllY, vLHX, vLHY, vRHX, vRHY, fVelocityX, fVelocityY
vAllX = vAllX + vInputJointX
vAllY = vAllY + vInputJointY
lhx = vInputJointX[8:8+21]
lhy = vInputJointY[8:8+21]
rhx = vInputJointX[8+21:]
rhy = vInputJointY[8+21:]
vLHX = vLHX + lhx
vLHY = vLHY + lhy
vRHX = vRHX + rhx
vRHY = vRHY + rhy
if len(vAllX) > nViewFrame * nNumJoint:
del vAllX[0:nNumJoint]
del vAllY[0:nNumJoint]
del vLHX[0:21]
del vLHY[0:21]
del vRHX[0:21]
del vRHY[0:21]
fVelocityX = 0.
fVelocityY = 0.
for ii in range(nViewFrame-1):
if vLHX[21 * (ii + 1) + 0] != 0 and vLHX[21 * ii + 0] != 0:
fVelocityX = fVelocityX + math.fabs(vLHX[21 * (ii + 1) + 0] - vLHX[21 * ii + 0])
if vLHY[21 * (ii + 1) + 0] != 0 and vLHY[21 * ii + 0] != 0:
fVelocityY = fVelocityY + math.fabs(vLHY[21 * (ii + 1) + 0] - vLHY[21 * ii + 0])
def convertToActionArr():
global nNumJoint, nViewFrame, vAllX, vAllY, vLHX, vLHY, vRHX, vRHY
mTransformed = np.zeros((nNumJoint+21+21, nViewFrame, 3), dtype="uint8")
if len(vAllX) < nViewFrame*nNumJoint:
mTransformed = cv2.resize(mTransformed, (128,128))
return mTransformed
tX = copy.copy(vAllX)
tY = copy.copy(vAllY)
tLHX = copy.copy(vLHX)
tLHY = copy.copy(vLHY)
tRHX = copy.copy(vRHX)
tRHY = copy.copy(vRHY)
tX.sort()
tY.sort()
tLHX.sort()
tLHY.sort()
tRHX.sort()
tRHY.sort()
while 0 in tX:
tX.remove(0)
while 0 in tY:
tY.remove(0)
while 0 in tLHX:
tLHX.remove(0)
while 0 in tLHY:
tLHY.remove(0)
while 0 in tRHX:
tRHX.remove(0)
while 0 in tRHY:
tRHY.remove(0)
for ff in range(nViewFrame):
for jj in range(nNumJoint):
mTransformed[jj][ff][0] = 255 * (vAllX[ff * nNumJoint + jj] - tX[0]) / (tX[len(tX) - 1] - tX[0])
mTransformed[jj][ff][1] = 255 * (vAllY[ff * nNumJoint + jj] - tY[0]) / (tY[len(tY) - 1] - tY[0])
if len(tLHX) >2 and len(tLHY)>2:
nRowsOffset = nNumJoint
for jj in range(21):
mTransformed[jj + nRowsOffset][ff][0] = 255 * (vLHX[ff * 21 + jj] - tLHX[0]) / (tLHX[len(tLHX) - 1] - tLHX[0])
mTransformed[jj + nRowsOffset][ff][1] = 255 * (vLHY[ff * 21 + jj] - tLHY[0]) / (tLHY[len(tLHY) - 1] - tLHY[0])
if len(tRHX) >2 and len(tRHY)>2:
nRowsOffset = nNumJoint + 21
for jj in range(21):
mTransformed[jj + nRowsOffset][ff][0] = 255 * (vRHX[ff * 21 + jj] - tRHX[0]) / (tRHX[len(tRHX) - 1] - tRHX[0])
mTransformed[jj + nRowsOffset][ff][1] = 255 * (vRHY[ff * 21 + jj] - tRHY[0]) / (tRHY[len(tRHY) - 1] - tRHY[0])
mTransformed = cv2.resize(mTransformed, (128,128))
return mTransformed
def updateAction(nAction):
global fActionArr, nCheckFrame
for ii in range(nCheckFrame-1, 0, -1):
fActionArr[ii] = fActionArr[ii-1]
fActionArr[0] = nAction
def getTopNAction(nTopN, convertedImg):
global fActionProb, fActionArr, nCheckFrame, nNumAction, sAction
fActionRank = []
if nTopN > nNumAction:
fActionRank.append((-1, -1))
return fActionRank, "nTopN is out of scope."
if (convertedImg[80:90, 0:128] == 0).all() or (convertedImg[118:128, 0:128] == 0).all():
fActionRank.append((-2, -2))
return fActionRank, "Hands not detected."
fActionProb = [0 for _ in range(nNumAction)]
fTemp = [0 for _ in range(nNumAction)]
for ii in range(nCheckFrame):
fActionProb[fActionArr[ii]] = fActionProb[fActionArr[ii]] + 1
fSum = 0.
for ii in range(nNumAction):
fExp = math.exp(fActionProb[ii])
fSum = fSum + fExp
fTemp[ii] = fExp
for ii in range(nNumAction):
fActionRank[ii] = (fTemp[ii] / fSum, ii)
fActionRank.sort(reverse=True)
sTopN = ""
for ii in range(nTopN):
sActionNProb = "{sAction} : {fProb:0.1f} \n".format(sAction=sAction[fActionRank[ii][1]]
, fProb=fActionRank[ii][0]*100)
sTopN = sTopN + sActionNProb
# fActionRank는 확률과 행동id를 포함한 튜플로 구성된 리스트(높은확률부터 내림차순 정렬).
# 최근 nCheckFrame번의 인식 결과에 기반하여 확률계산.
# 즉, 이번 프레임에서 인식 된 행동의 확률 리스트는 아니라는 의미.
# 메세지로 전달할 때는 fActionRank[0]을 사용
return fActionRank, sTopN
def EAR_Initialization(path):
global fActionArr, nCheckFrame
fActionArr = [0 for _ in range(nCheckFrame)]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_path = os.path.join(path, 'ETRI_BODYACTION.pth.tar')
EAR_Net = LightCNN_Action_Net(num_classes=14)
EAR_Net = torch.nn.DataParallel(EAR_Net).to(device)
EAR_Net.load_state_dict(torch.load(model_path)['state_dict'])
EAR_Net.eval()
return EAR_Net
def EAR_BodyAction_Estimation(EAR_Net, convertedImg):
global fVelocityX, fVelocityY, vAllX, vAllY
# bowing
fShoulder = math.fabs(vAllX[nNumJoint * (nViewFrame - 1) + 5] - vAllX[nNumJoint * (nViewFrame - 1) + 2])
fNeck = math.fabs(vAllY[nNumJoint*(nViewFrame-1) + 0] - vAllY[nNumJoint*(nViewFrame-1) + 1])
if fNeck < fShoulder/6 or vAllY[nNumJoint*(nViewFrame-1) + 0] > vAllY[nNumJoint*(nViewFrame-1) + 1]:
return 14
else:
convertedImg = cv2.cvtColor(convertedImg, cv2.COLOR_BGR2RGB)
img_trim_in = convertedImg / 255.
img_trim_in = np.transpose(img_trim_in, axes=[2, 0, 1])
img_trim_in = np.array(img_trim_in, dtype=np.float32)
img_trim_in = torch.from_numpy(img_trim_in)
img_trim_in = torch.unsqueeze(img_trim_in, 0)
output = EAR_Net(img_trim_in)
output_cpu = output.cpu()
output_np = output_cpu.detach().numpy().squeeze().tolist()
return output_np.index(max(output_np))
def drawJoint(cvImg, vInputJointX, vInputJointY):
xLen = len(vInputJointX)
yLen = len(vInputJointY)
if not xLen == yLen:
return cvImg
for ii in range(xLen):
cv2.circle(cvImg, (vInputJointX[ii], vInputJointY[ii]), 2, (0,255,0), -1)
return cvImg
def euc_dist(pt1,pt2):
return math.sqrt((pt2[0]-pt1[0])*(pt2[0]-pt1[0])+(pt2[1]-pt1[1])*(pt2[1]-pt1[1]))
def alignSkeleton():
global vAllX, vAllY, avgNeutralJointX, avgNeutralJointY, nNumJoint, nViewFrame
alignedNeutralX = []
alignedNeutralY = []
for ff in range(nViewFrame):
copyNeutralX = copy.deepcopy(avgNeutralJointX)
copyNeutralY = copy.deepcopy(avgNeutralJointY)
frameX = vAllX[ff*nNumJoint + 0:ff*nNumJoint + nNumJoint]
frameY = vAllY[ff*nNumJoint + 0:ff*nNumJoint + nNumJoint]
# Scale
fActionJoint01D = euc_dist((frameX[0],frameY[0]), (frameX[1],frameY[1]))
fAvgJoint10D = euc_dist((copyNeutralX[0],copyNeutralY[0]), (copyNeutralX[1],copyNeutralY[1]))
fScale = fActionJoint01D / fAvgJoint10D
for ii in range(len(copyNeutralX)):
# copyNeutralX[ii] = copyNeutralX[ii] * fScale
copyNeutralY[ii] = copyNeutralY[ii] * fScale
# translation
fXOffset = frameX[1] - copyNeutralX[1]
fYOffset = frameY[1] - copyNeutralY[1]
# for ii in range(len(copyNeutralX)):
# copyNeutralX[ii] = copyNeutralX[ii] + fXOffset
# copyNeutralY[ii] = copyNeutralY[ii] + fYOffset
# for ii in range(len(copyNeutralX)):
copyNeutralX = copyNeutralX + fXOffset
copyNeutralY = copyNeutralY + fYOffset
alignedNeutralX = alignedNeutralX + np.ndarray.tolist(copyNeutralX)
alignedNeutralY = alignedNeutralY + np.ndarray.tolist(copyNeutralY)
return alignedNeutralX, alignedNeutralY
def getVectorDistance(alignedNeutralX, alignedNeutralY):
global vAllX, vAllY, nNumJoint, nViewFrame
if len(alignedNeutralX) != len(vAllX) or len(alignedNeutralY) != len(vAllY):
return -1
distSum = 0.
for ii in range(len(vAllX)):
if vAllX[ii] == 0:
alignedNeutralX[ii] = 0
if vAllY[ii] == 0:
alignedNeutralY[ii] = 0
dist = euclidean(vAllX, alignedNeutralX)
distSum = distSum + dist
dist = euclidean(vAllY, alignedNeutralY)
distSum = distSum + dist
return distSum
def getTendencyCategory(fDistance):
if fDistance<1500:
return 1
elif fDistance<4000:
return 0
else:
return 2
| [
"os.path.join",
"scipy.spatial.distance.euclidean",
"cv2.cvtColor",
"torch.load",
"numpy.transpose",
"light_cnn.LightCNN_Action_Net",
"numpy.ndarray.tolist",
"openpose.pyopenpose.Datum",
"cv2.resize",
"copy.deepcopy",
"cv2.circle",
"math.sqrt",
"torch.cuda.is_available",
"torch.unsqueeze",... | [((1000, 1018), 'openpose.pyopenpose.WrapperPython', 'op.WrapperPython', ([], {}), '()\n', (1016, 1018), True, 'from openpose import pyopenpose as op\n'), ((3346, 3356), 'openpose.pyopenpose.Datum', 'op.Datum', ([], {}), '()\n', (3354, 3356), True, 'from openpose import pyopenpose as op\n'), ((5969, 6030), 'numpy.zeros', 'np.zeros', (['(nNumJoint + 21 + 21, nViewFrame, 3)'], {'dtype': '"""uint8"""'}), "((nNumJoint + 21 + 21, nViewFrame, 3), dtype='uint8')\n", (5977, 6030), True, 'import numpy as np\n'), ((6167, 6183), 'copy.copy', 'copy.copy', (['vAllX'], {}), '(vAllX)\n', (6176, 6183), False, 'import copy\n'), ((6193, 6209), 'copy.copy', 'copy.copy', (['vAllY'], {}), '(vAllY)\n', (6202, 6209), False, 'import copy\n'), ((6221, 6236), 'copy.copy', 'copy.copy', (['vLHX'], {}), '(vLHX)\n', (6230, 6236), False, 'import copy\n'), ((6248, 6263), 'copy.copy', 'copy.copy', (['vLHY'], {}), '(vLHY)\n', (6257, 6263), False, 'import copy\n'), ((6275, 6290), 'copy.copy', 'copy.copy', (['vRHX'], {}), '(vRHX)\n', (6284, 6290), False, 'import copy\n'), ((6302, 6317), 'copy.copy', 'copy.copy', (['vRHY'], {}), '(vRHY)\n', (6311, 6317), False, 'import copy\n'), ((7712, 7748), 'cv2.resize', 'cv2.resize', (['mTransformed', '(128, 128)'], {}), '(mTransformed, (128, 128))\n', (7722, 7748), False, 'import cv2\n'), ((9531, 9576), 'os.path.join', 'os.path.join', (['path', '"""ETRI_BODYACTION.pth.tar"""'], {}), "(path, 'ETRI_BODYACTION.pth.tar')\n", (9543, 9576), False, 'import os\n'), ((9591, 9626), 'light_cnn.LightCNN_Action_Net', 'LightCNN_Action_Net', ([], {'num_classes': '(14)'}), '(num_classes=14)\n', (9610, 9626), False, 'from light_cnn import LightCNN_Action_Net\n'), ((9922, 10019), 'math.fabs', 'math.fabs', (['(vAllX[nNumJoint * (nViewFrame - 1) + 5] - vAllX[nNumJoint * (nViewFrame - \n 1) + 2])'], {}), '(vAllX[nNumJoint * (nViewFrame - 1) + 5] - vAllX[nNumJoint * (\n nViewFrame - 1) + 2])\n', (9931, 10019), False, 'import math\n'), ((10027, 10124), 'math.fabs', 'math.fabs', (['(vAllY[nNumJoint * (nViewFrame - 1) + 0] - vAllY[nNumJoint * (nViewFrame - \n 1) + 1])'], {}), '(vAllY[nNumJoint * (nViewFrame - 1) + 0] - vAllY[nNumJoint * (\n nViewFrame - 1) + 1])\n', (10036, 10124), False, 'import math\n'), ((11097, 11190), 'math.sqrt', 'math.sqrt', (['((pt2[0] - pt1[0]) * (pt2[0] - pt1[0]) + (pt2[1] - pt1[1]) * (pt2[1] - pt1[1]))'], {}), '((pt2[0] - pt1[0]) * (pt2[0] - pt1[0]) + (pt2[1] - pt1[1]) * (pt2[\n 1] - pt1[1]))\n', (11106, 11190), False, 'import math\n'), ((13031, 13064), 'scipy.spatial.distance.euclidean', 'euclidean', (['vAllX', 'alignedNeutralX'], {}), '(vAllX, alignedNeutralX)\n', (13040, 13064), False, 'from scipy.spatial.distance import euclidean\n'), ((13105, 13138), 'scipy.spatial.distance.euclidean', 'euclidean', (['vAllY', 'alignedNeutralY'], {}), '(vAllY, alignedNeutralY)\n', (13114, 13138), False, 'from scipy.spatial.distance import euclidean\n'), ((6093, 6129), 'cv2.resize', 'cv2.resize', (['mTransformed', '(128, 128)'], {}), '(mTransformed, (128, 128))\n', (6103, 6129), False, 'import cv2\n'), ((8641, 8666), 'math.exp', 'math.exp', (['fActionProb[ii]'], {}), '(fActionProb[ii])\n', (8649, 8666), False, 'import math\n'), ((10269, 10314), 'cv2.cvtColor', 'cv2.cvtColor', (['convertedImg', 'cv2.COLOR_BGR2RGB'], {}), '(convertedImg, cv2.COLOR_BGR2RGB)\n', (10281, 10314), False, 'import cv2\n'), ((10379, 10420), 'numpy.transpose', 'np.transpose', (['img_trim_in'], {'axes': '[2, 0, 1]'}), '(img_trim_in, axes=[2, 0, 1])\n', (10391, 10420), True, 'import numpy as np\n'), ((10443, 10482), 'numpy.array', 'np.array', (['img_trim_in'], {'dtype': 'np.float32'}), '(img_trim_in, dtype=np.float32)\n', (10451, 10482), True, 'import numpy as np\n'), ((10505, 10534), 'torch.from_numpy', 'torch.from_numpy', (['img_trim_in'], {}), '(img_trim_in)\n', (10521, 10534), False, 'import torch\n'), ((10557, 10588), 'torch.unsqueeze', 'torch.unsqueeze', (['img_trim_in', '(0)'], {}), '(img_trim_in, 0)\n', (10572, 10588), False, 'import torch\n'), ((10969, 11044), 'cv2.circle', 'cv2.circle', (['cvImg', '(vInputJointX[ii], vInputJointY[ii])', '(2)', '(0, 255, 0)', '(-1)'], {}), '(cvImg, (vInputJointX[ii], vInputJointY[ii]), 2, (0, 255, 0), -1)\n', (10979, 11044), False, 'import cv2\n'), ((11385, 11416), 'copy.deepcopy', 'copy.deepcopy', (['avgNeutralJointX'], {}), '(avgNeutralJointX)\n', (11398, 11416), False, 'import copy\n'), ((11440, 11471), 'copy.deepcopy', 'copy.deepcopy', (['avgNeutralJointY'], {}), '(avgNeutralJointY)\n', (11453, 11471), False, 'import copy\n'), ((9475, 9500), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9498, 9500), False, 'import torch\n'), ((9641, 9671), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['EAR_Net'], {}), '(EAR_Net)\n', (9662, 9671), False, 'import torch\n'), ((9711, 9733), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (9721, 9733), False, 'import torch\n'), ((12482, 12513), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['copyNeutralX'], {}), '(copyNeutralX)\n', (12499, 12513), True, 'import numpy as np\n'), ((12558, 12589), 'numpy.ndarray.tolist', 'np.ndarray.tolist', (['copyNeutralY'], {}), '(copyNeutralY)\n', (12575, 12589), True, 'import numpy as np\n'), ((3810, 3851), 'math.fabs', 'math.fabs', (['(320.0 - poseKeypoint[ii][0][0])'], {}), '(320.0 - poseKeypoint[ii][0][0])\n', (3819, 3851), False, 'import math\n'), ((3894, 3935), 'math.fabs', 'math.fabs', (['(320.0 - poseKeypoint[ii][0][0])'], {}), '(320.0 - poseKeypoint[ii][0][0])\n', (3903, 3935), False, 'import math\n'), ((5626, 5680), 'math.fabs', 'math.fabs', (['(vLHX[21 * (ii + 1) + 0] - vLHX[21 * ii + 0])'], {}), '(vLHX[21 * (ii + 1) + 0] - vLHX[21 * ii + 0])\n', (5635, 5680), False, 'import math\n'), ((5795, 5849), 'math.fabs', 'math.fabs', (['(vLHY[21 * (ii + 1) + 0] - vLHY[21 * ii + 0])'], {}), '(vLHY[21 * (ii + 1) + 0] - vLHY[21 * ii + 0])\n', (5804, 5849), False, 'import math\n')] |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.9.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Example 5: Acceleration due to gravity (GPy application)
# %% [markdown]
# In this example, we will illustrate how to use an external package, `GPy`, within `surmise`s framework.
#
# First, import the main libraries we use for this example:
# %%
import numpy as np
from matplotlib import pyplot as plt
import scipy.stats as sps
from surmise.emulation import emulator
from surmise.calibration import calibrator
# %%
# Read the data
ball = np.loadtxt('ball.csv', delimiter=',')
m = len(ball)
# height
xrep = np.reshape(ball[:, 0], (m, 1))
x = xrep[0:21]
# time
y = np.reshape(ball[:, 1], ((m, 1)))
y = y[0:21]
# %%
# Observe the data
plt.scatter(x, y, color='red')
plt.xlabel("height (meters)")
plt.ylabel("time (seconds)")
plt.show()
# %%
# Computer implementation of the mathematical model
def timedrop(x, theta, hr, gr):
'''
Parameters
----------
x : m x 1 array
Input settings.
theta : n x 1 array
Parameters to be calibrated.
hr : Array of size 2
min and max value of height.
gr : Array of size 2
min and max value of gravity.
Returns
-------
m x n array
m x n computer model evaluations.
'''
# Assume x and theta are within (0, 1)
min_g = min(gr)
range_g = max(gr) - min(gr)
min_h = min(hr)
range_h = max(hr) - min_h
f = np.zeros((theta.shape[0], x.shape[0]))
for k in range(0, theta.shape[0]):
g = range_g*theta[k] + min_g
h = range_h*x + min_h
f[k, :] = np.sqrt(2*h/g).reshape(x.shape[0])
return f.T
# %%
# Define prior
class prior_balldrop:
""" This defines the class instance of priors provided to the method. """
def lpdf(theta):
return sps.uniform.logpdf(theta[:, 0], 0, 1).reshape((len(theta), 1))
def rnd(n):
return np.vstack((sps.uniform.rvs(0, 1, size=n)))
# %%
# Draw 100 random parameters from uniform prior
n = 100
theta = prior_balldrop.rnd(n)
theta_range = np.array([6, 15])
# Standardize
x_range = np.array([min(x), max(x)])
x_std = (x - min(x))/(max(x) - min(x))
# Obtain computer model output
f = timedrop(x_std, theta, x_range, theta_range)
print(np.shape(theta))
print(np.shape(x_std))
print(np.shape(f))
# %%
emulator_1 = emulator(x=x_std, theta=theta, f=f, method='GPy')
# %%
# Generate random reasonable theta values
n_test = 1000
theta_test = prior_balldrop.rnd(n_test)
print(np.shape(theta_test))
# Obtain computer model output
f_test = timedrop(x_std, theta_test, x_range, theta_range)
print(np.shape(f_test))
# Predict
p_1 = emulator_1.predict(x_std, theta_test)
p_1_mean, p_1_var = p_1.mean(), p_1.var()
print('SSE PCGP = ', np.round(np.sum((p_1_mean - f_test)**2), 2))
print('Rsq PCGP = ', 1 - np.round(np.sum(np.square(p_1_mean - f_test))/np.sum(np.square(f_test.T - np.mean(f_test, axis = 1))), 2))
# %%
def plot_pred(x_std, xrep, y, cal, theta_range):
fig, axs = plt.subplots(1, 4, figsize=(14, 3))
cal_theta = cal.theta.rnd(1000)
cal_theta = cal_theta*(theta_range[1] - theta_range[0]) + theta_range[0]
axs[0].plot(cal_theta)
axs[1].boxplot(cal_theta)
axs[2].hist(cal_theta)
post = cal.predict(x_std)
rndm_m = post.rnd(s = 1000)
upper = np.percentile(rndm_m, 97.5, axis = 0)
lower = np.percentile(rndm_m, 2.5, axis = 0)
median = np.percentile(rndm_m, 50, axis = 0)
axs[3].plot(xrep[0:21].reshape(21), median, color = 'black')
axs[3].fill_between(xrep[0:21].reshape(21), lower, upper, color = 'grey')
axs[3].plot(xrep, y, 'ro', markersize = 5, color='red')
plt.show()
# %%
obsvar = np.maximum(0.2*y, 0.1)
# Fit a calibrator with emulator 1 via via method = 'directbayes' and 'sampler' = 'metropolis_hastings'
cal_1 = calibrator(emu=emulator_1,
y=y,
x=x_std,
thetaprior=prior_balldrop,
method='directbayes',
yvar=obsvar,
args={'theta0': np.array([[0.4]]),
'numsamp' : 1000,
'stepType' : 'normal',
'stepParam' : [0.3]})
plot_pred(x_std, x, y, cal_1, theta_range)
| [
"numpy.maximum",
"numpy.sum",
"numpy.shape",
"numpy.mean",
"surmise.emulation.emulator",
"numpy.loadtxt",
"numpy.reshape",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"numpy.square",
"numpy.percentile",
"scipy.stats.uniform.logpdf",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplo... | [((760, 797), 'numpy.loadtxt', 'np.loadtxt', (['"""ball.csv"""'], {'delimiter': '""","""'}), "('ball.csv', delimiter=',')\n", (770, 797), True, 'import numpy as np\n'), ((828, 858), 'numpy.reshape', 'np.reshape', (['ball[:, 0]', '(m, 1)'], {}), '(ball[:, 0], (m, 1))\n', (838, 858), True, 'import numpy as np\n'), ((885, 915), 'numpy.reshape', 'np.reshape', (['ball[:, 1]', '(m, 1)'], {}), '(ball[:, 1], (m, 1))\n', (895, 915), True, 'import numpy as np\n'), ((955, 985), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'y'], {'color': '"""red"""'}), "(x, y, color='red')\n", (966, 985), True, 'from matplotlib import pyplot as plt\n'), ((986, 1015), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""height (meters)"""'], {}), "('height (meters)')\n", (996, 1015), True, 'from matplotlib import pyplot as plt\n'), ((1016, 1044), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""time (seconds)"""'], {}), "('time (seconds)')\n", (1026, 1044), True, 'from matplotlib import pyplot as plt\n'), ((1045, 1055), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1053, 1055), True, 'from matplotlib import pyplot as plt\n'), ((2278, 2295), 'numpy.array', 'np.array', (['[6, 15]'], {}), '([6, 15])\n', (2286, 2295), True, 'import numpy as np\n'), ((2554, 2603), 'surmise.emulation.emulator', 'emulator', ([], {'x': 'x_std', 'theta': 'theta', 'f': 'f', 'method': '"""GPy"""'}), "(x=x_std, theta=theta, f=f, method='GPy')\n", (2562, 2603), False, 'from surmise.emulation import emulator\n'), ((3915, 3939), 'numpy.maximum', 'np.maximum', (['(0.2 * y)', '(0.1)'], {}), '(0.2 * y, 0.1)\n', (3925, 3939), True, 'import numpy as np\n'), ((1661, 1699), 'numpy.zeros', 'np.zeros', (['(theta.shape[0], x.shape[0])'], {}), '((theta.shape[0], x.shape[0]))\n', (1669, 1699), True, 'import numpy as np\n'), ((2476, 2491), 'numpy.shape', 'np.shape', (['theta'], {}), '(theta)\n', (2484, 2491), True, 'import numpy as np\n'), ((2499, 2514), 'numpy.shape', 'np.shape', (['x_std'], {}), '(x_std)\n', (2507, 2514), True, 'import numpy as np\n'), ((2522, 2533), 'numpy.shape', 'np.shape', (['f'], {}), '(f)\n', (2530, 2533), True, 'import numpy as np\n'), ((2712, 2732), 'numpy.shape', 'np.shape', (['theta_test'], {}), '(theta_test)\n', (2720, 2732), True, 'import numpy as np\n'), ((2831, 2847), 'numpy.shape', 'np.shape', (['f_test'], {}), '(f_test)\n', (2839, 2847), True, 'import numpy as np\n'), ((3223, 3258), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(4)'], {'figsize': '(14, 3)'}), '(1, 4, figsize=(14, 3))\n', (3235, 3258), True, 'from matplotlib import pyplot as plt\n'), ((3539, 3574), 'numpy.percentile', 'np.percentile', (['rndm_m', '(97.5)'], {'axis': '(0)'}), '(rndm_m, 97.5, axis=0)\n', (3552, 3574), True, 'import numpy as np\n'), ((3589, 3623), 'numpy.percentile', 'np.percentile', (['rndm_m', '(2.5)'], {'axis': '(0)'}), '(rndm_m, 2.5, axis=0)\n', (3602, 3623), True, 'import numpy as np\n'), ((3639, 3672), 'numpy.percentile', 'np.percentile', (['rndm_m', '(50)'], {'axis': '(0)'}), '(rndm_m, 50, axis=0)\n', (3652, 3672), True, 'import numpy as np\n'), ((3888, 3898), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3896, 3898), True, 'from matplotlib import pyplot as plt\n'), ((2978, 3010), 'numpy.sum', 'np.sum', (['((p_1_mean - f_test) ** 2)'], {}), '((p_1_mean - f_test) ** 2)\n', (2984, 3010), True, 'import numpy as np\n'), ((2139, 2168), 'scipy.stats.uniform.rvs', 'sps.uniform.rvs', (['(0)', '(1)'], {'size': 'n'}), '(0, 1, size=n)\n', (2154, 2168), True, 'import scipy.stats as sps\n'), ((4293, 4310), 'numpy.array', 'np.array', (['[[0.4]]'], {}), '([[0.4]])\n', (4301, 4310), True, 'import numpy as np\n'), ((1824, 1842), 'numpy.sqrt', 'np.sqrt', (['(2 * h / g)'], {}), '(2 * h / g)\n', (1831, 1842), True, 'import numpy as np\n'), ((2032, 2069), 'scipy.stats.uniform.logpdf', 'sps.uniform.logpdf', (['theta[:, 0]', '(0)', '(1)'], {}), '(theta[:, 0], 0, 1)\n', (2050, 2069), True, 'import scipy.stats as sps\n'), ((3056, 3084), 'numpy.square', 'np.square', (['(p_1_mean - f_test)'], {}), '(p_1_mean - f_test)\n', (3065, 3084), True, 'import numpy as np\n'), ((3114, 3137), 'numpy.mean', 'np.mean', (['f_test'], {'axis': '(1)'}), '(f_test, axis=1)\n', (3121, 3137), True, 'import numpy as np\n')] |
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class SequenceMap(Base):
@staticmethod
def export_sequence_map_identity_1_sequence(): # type: () -> None
body = onnx.helper.make_graph(
[onnx.helper.make_node('Identity', ['in0'], ['out0'])],
'seq_map_body',
[onnx.helper.make_tensor_value_info(
'in0', onnx.TensorProto.FLOAT, ['N'])],
[onnx.helper.make_tensor_value_info(
'out0', onnx.TensorProto.FLOAT, ['M'])]
)
node = onnx.helper.make_node(
'SequenceMap',
inputs=['x'],
outputs=['y'],
body=body
)
x = [np.random.uniform(0.0, 1.0, 10).astype(np.float32)
for _ in range(3)]
y = x
input_type_protos = [
onnx.helper.make_sequence_type_proto(
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ['N'])),
]
output_type_protos = [
onnx.helper.make_sequence_type_proto(
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ['N'])),
]
expect(node, inputs=[x], outputs=[y],
input_type_protos=input_type_protos,
output_type_protos=output_type_protos,
name='test_sequence_map_identity_1_sequence')
@staticmethod
def export_sequence_map_identity_2_sequences(): # type: () -> None
body = onnx.helper.make_graph(
[onnx.helper.make_node('Identity', ['in0'], ['out0']),
onnx.helper.make_node('Identity', ['in1'], ['out1'])],
'seq_map_body',
[onnx.helper.make_tensor_value_info('in0', onnx.TensorProto.FLOAT, ['N']),
onnx.helper.make_tensor_value_info('in1', onnx.TensorProto.FLOAT, ['M'])],
[onnx.helper.make_tensor_value_info('out0', onnx.TensorProto.FLOAT, ['N']),
onnx.helper.make_tensor_value_info('out1', onnx.TensorProto.FLOAT, ['M'])]
)
node = onnx.helper.make_node(
'SequenceMap',
inputs=['x0', 'x1'],
outputs=['y0', 'y1'],
body=body
)
x0 = [np.random.uniform(0.0, 1.0, np.random.randint(
1, 10)).astype(np.float32) for _ in range(3)]
x1 = [np.random.uniform(0.0, 1.0, np.random.randint(
1, 10)).astype(np.float32) for _ in range(3)]
y0 = x0
y1 = x1
input_type_protos = [
onnx.helper.make_sequence_type_proto(
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ['N'])),
onnx.helper.make_sequence_type_proto(
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ['M'])),
]
output_type_protos = [
onnx.helper.make_sequence_type_proto(
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ['N'])),
onnx.helper.make_sequence_type_proto(
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ['M'])),
]
expect(node, inputs=[x0, x1], outputs=[y0, y1],
input_type_protos=input_type_protos,
output_type_protos=output_type_protos,
name='test_sequence_map_identity_2_sequences')
@staticmethod
def export_sequence_map_identity_1_sequence_1_tensor(): # type: () -> None
body = onnx.helper.make_graph(
[onnx.helper.make_node('Identity', ['in0'], ['out0']),
onnx.helper.make_node('Identity', ['in1'], ['out1'])],
'seq_map_body',
[onnx.helper.make_tensor_value_info('in0', onnx.TensorProto.FLOAT, ['N']),
onnx.helper.make_tensor_value_info('in1', onnx.TensorProto.FLOAT, ['M'])],
[onnx.helper.make_tensor_value_info(
'out0', onnx.TensorProto.FLOAT, ['N']),
onnx.helper.make_tensor_value_info(
'out1', onnx.TensorProto.FLOAT, ['M'])]
)
node = onnx.helper.make_node(
'SequenceMap',
inputs=['x0', 'x1'],
outputs=['y0', 'y1'],
body=body
)
x0 = [np.random.uniform(0.0, 1.0, np.random.randint(
1, 10)).astype(np.float32) for _ in range(3)]
x1 = np.random.uniform(0.0, 1.0, np.random.randint(
1, 10)).astype(np.float32)
y0 = x0
y1 = [x1 for _ in range(3)]
input_type_protos = [
onnx.helper.make_sequence_type_proto(
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ['N'])),
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ['M']),
]
output_type_protos = [
onnx.helper.make_sequence_type_proto(
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ['N'])),
onnx.helper.make_sequence_type_proto(
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ['M'])),
]
expect(node, inputs=[x0, x1], outputs=[y0, y1],
input_type_protos=input_type_protos,
output_type_protos=output_type_protos,
name='test_sequence_map_identity_1_sequence_1_tensor')
@staticmethod
def export_sequence_map_add_2_sequences(): # type: () -> None
body = onnx.helper.make_graph(
[onnx.helper.make_node('Add', ['in0', 'in1'], ['out0'])],
'seq_map_body',
[onnx.helper.make_tensor_value_info('in0', onnx.TensorProto.FLOAT, ['N']),
onnx.helper.make_tensor_value_info('in1', onnx.TensorProto.FLOAT, ['N'])],
[onnx.helper.make_tensor_value_info(
'out0', onnx.TensorProto.FLOAT, ['N'])]
)
node = onnx.helper.make_node(
'SequenceMap',
inputs=['x0', 'x1'],
outputs=['y0'],
body=body
)
N = [np.random.randint(1, 10) for _ in range(3)]
x0 = [np.random.uniform(0.0, 1.0, N[k]).astype(np.float32)
for k in range(3)]
x1 = [np.random.uniform(0.0, 1.0, N[k]).astype(np.float32)
for k in range(3)]
y0 = [x0[k] + x1[k] for k in range(3)]
input_type_protos = [
onnx.helper.make_sequence_type_proto(
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ['N'])),
onnx.helper.make_sequence_type_proto(
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ['N'])),
]
output_type_protos = [
onnx.helper.make_sequence_type_proto(
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ['N'])),
]
expect(node, inputs=[x0, x1], outputs=[y0],
input_type_protos=input_type_protos,
output_type_protos=output_type_protos,
name='test_sequence_map_add_2_sequences')
@staticmethod
def export_sequence_map_add_1_sequence_1_tensor(): # type: () -> None
body = onnx.helper.make_graph(
[onnx.helper.make_node('Add', ['in0', 'in1'], ['out0'])],
'seq_map_body',
[onnx.helper.make_tensor_value_info('in0', onnx.TensorProto.FLOAT, ['N']),
onnx.helper.make_tensor_value_info('in1', onnx.TensorProto.FLOAT, ['N'])],
[onnx.helper.make_tensor_value_info(
'out0', onnx.TensorProto.FLOAT, ['N'])]
)
node = onnx.helper.make_node(
'SequenceMap',
inputs=['x0', 'x1'],
outputs=['y0'],
body=body
)
x0 = [np.random.uniform(0.0, 1.0, 10).astype(np.float32) for k in range(3)]
x1 = np.random.uniform(0.0, 1.0, 10).astype(np.float32)
y0 = [x0[i] + x1 for i in range(3)]
input_type_protos = [
onnx.helper.make_sequence_type_proto(
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ['N'])),
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ['N']),
]
output_type_protos = [
onnx.helper.make_sequence_type_proto(
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ['N'])),
]
expect(node, inputs=[x0, x1], outputs=[y0],
input_type_protos=input_type_protos,
output_type_protos=output_type_protos,
name='test_sequence_map_add_1_sequence_1_tensor')
@staticmethod
def export_sequence_map_extract_shapes(): # type: () -> None
body = onnx.helper.make_graph(
[onnx.helper.make_node('Shape', ['x'], ['shape'])],
'seq_map_body',
[onnx.helper.make_tensor_value_info('x', onnx.TensorProto.FLOAT, ['H', 'W', 'C'])],
[onnx.helper.make_tensor_value_info('shape', onnx.TensorProto.INT64, [3])]
)
node = onnx.helper.make_node(
'SequenceMap',
inputs=['in_seq'],
outputs=['shapes'],
body=body
)
shapes = [
np.array([40, 30, 3], dtype=np.int64),
np.array([20, 10, 3], dtype=np.int64),
np.array([10, 5, 3], dtype=np.int64),
]
x0 = [np.zeros(shape, dtype=np.float32) for shape in shapes]
input_type_protos = [
onnx.helper.make_sequence_type_proto(
onnx.helper.make_tensor_type_proto(onnx.TensorProto.FLOAT, ['H', 'W', 'C'])),
]
output_type_protos = [
onnx.helper.make_sequence_type_proto(
onnx.helper.make_tensor_type_proto(onnx.TensorProto.INT64, [3])),
]
expect(node, inputs=[x0], outputs=[shapes],
input_type_protos=input_type_protos,
output_type_protos=output_type_protos,
name='test_sequence_map_extract_shapes')
| [
"onnx.helper.make_tensor_type_proto",
"onnx.helper.make_node",
"numpy.random.uniform",
"onnx.helper.make_tensor_value_info",
"numpy.zeros",
"numpy.random.randint",
"numpy.array"
] | [((769, 845), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""SequenceMap"""'], {'inputs': "['x']", 'outputs': "['y']", 'body': 'body'}), "('SequenceMap', inputs=['x'], outputs=['y'], body=body)\n", (790, 845), False, 'import onnx\n'), ((2247, 2341), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""SequenceMap"""'], {'inputs': "['x0', 'x1']", 'outputs': "['y0', 'y1']", 'body': 'body'}), "('SequenceMap', inputs=['x0', 'x1'], outputs=['y0',\n 'y1'], body=body)\n", (2268, 2341), False, 'import onnx\n'), ((4220, 4314), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""SequenceMap"""'], {'inputs': "['x0', 'x1']", 'outputs': "['y0', 'y1']", 'body': 'body'}), "('SequenceMap', inputs=['x0', 'x1'], outputs=['y0',\n 'y1'], body=body)\n", (4241, 4314), False, 'import onnx\n'), ((5963, 6051), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""SequenceMap"""'], {'inputs': "['x0', 'x1']", 'outputs': "['y0']", 'body': 'body'}), "('SequenceMap', inputs=['x0', 'x1'], outputs=['y0'],\n body=body)\n", (5984, 6051), False, 'import onnx\n'), ((7646, 7734), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""SequenceMap"""'], {'inputs': "['x0', 'x1']", 'outputs': "['y0']", 'body': 'body'}), "('SequenceMap', inputs=['x0', 'x1'], outputs=['y0'],\n body=body)\n", (7667, 7734), False, 'import onnx\n'), ((9058, 9148), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""SequenceMap"""'], {'inputs': "['in_seq']", 'outputs': "['shapes']", 'body': 'body'}), "('SequenceMap', inputs=['in_seq'], outputs=['shapes'],\n body=body)\n", (9079, 9148), False, 'import onnx\n'), ((4816, 4881), 'onnx.helper.make_tensor_type_proto', 'onnx.helper.make_tensor_type_proto', (['onnx.TensorProto.FLOAT', "['M']"], {}), "(onnx.TensorProto.FLOAT, ['M'])\n", (4850, 4881), False, 'import onnx\n'), ((6120, 6144), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (6137, 6144), True, 'import numpy as np\n'), ((8158, 8223), 'onnx.helper.make_tensor_type_proto', 'onnx.helper.make_tensor_type_proto', (['onnx.TensorProto.FLOAT', "['N']"], {}), "(onnx.TensorProto.FLOAT, ['N'])\n", (8192, 8223), False, 'import onnx\n'), ((9235, 9272), 'numpy.array', 'np.array', (['[40, 30, 3]'], {'dtype': 'np.int64'}), '([40, 30, 3], dtype=np.int64)\n', (9243, 9272), True, 'import numpy as np\n'), ((9286, 9323), 'numpy.array', 'np.array', (['[20, 10, 3]'], {'dtype': 'np.int64'}), '([20, 10, 3], dtype=np.int64)\n', (9294, 9323), True, 'import numpy as np\n'), ((9337, 9373), 'numpy.array', 'np.array', (['[10, 5, 3]'], {'dtype': 'np.int64'}), '([10, 5, 3], dtype=np.int64)\n', (9345, 9373), True, 'import numpy as np\n'), ((9399, 9432), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (9407, 9432), True, 'import numpy as np\n'), ((450, 502), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Identity"""', "['in0']", "['out0']"], {}), "('Identity', ['in0'], ['out0'])\n", (471, 502), False, 'import onnx\n'), ((546, 618), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""in0"""', 'onnx.TensorProto.FLOAT', "['N']"], {}), "('in0', onnx.TensorProto.FLOAT, ['N'])\n", (580, 618), False, 'import onnx\n'), ((651, 724), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""out0"""', 'onnx.TensorProto.FLOAT', "['M']"], {}), "('out0', onnx.TensorProto.FLOAT, ['M'])\n", (685, 724), False, 'import onnx\n'), ((1111, 1176), 'onnx.helper.make_tensor_type_proto', 'onnx.helper.make_tensor_type_proto', (['onnx.TensorProto.FLOAT', "['N']"], {}), "(onnx.TensorProto.FLOAT, ['N'])\n", (1145, 1176), False, 'import onnx\n'), ((1286, 1351), 'onnx.helper.make_tensor_type_proto', 'onnx.helper.make_tensor_type_proto', (['onnx.TensorProto.FLOAT', "['N']"], {}), "(onnx.TensorProto.FLOAT, ['N'])\n", (1320, 1351), False, 'import onnx\n'), ((1720, 1772), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Identity"""', "['in0']", "['out0']"], {}), "('Identity', ['in0'], ['out0'])\n", (1741, 1772), False, 'import onnx\n'), ((1787, 1839), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Identity"""', "['in1']", "['out1']"], {}), "('Identity', ['in1'], ['out1'])\n", (1808, 1839), False, 'import onnx\n'), ((1883, 1955), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""in0"""', 'onnx.TensorProto.FLOAT', "['N']"], {}), "('in0', onnx.TensorProto.FLOAT, ['N'])\n", (1917, 1955), False, 'import onnx\n'), ((1970, 2042), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""in1"""', 'onnx.TensorProto.FLOAT', "['M']"], {}), "('in1', onnx.TensorProto.FLOAT, ['M'])\n", (2004, 2042), False, 'import onnx\n'), ((2058, 2131), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""out0"""', 'onnx.TensorProto.FLOAT', "['N']"], {}), "('out0', onnx.TensorProto.FLOAT, ['N'])\n", (2092, 2131), False, 'import onnx\n'), ((2146, 2219), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""out1"""', 'onnx.TensorProto.FLOAT', "['M']"], {}), "('out1', onnx.TensorProto.FLOAT, ['M'])\n", (2180, 2219), False, 'import onnx\n'), ((2763, 2828), 'onnx.helper.make_tensor_type_proto', 'onnx.helper.make_tensor_type_proto', (['onnx.TensorProto.FLOAT', "['N']"], {}), "(onnx.TensorProto.FLOAT, ['N'])\n", (2797, 2828), False, 'import onnx\n'), ((2897, 2962), 'onnx.helper.make_tensor_type_proto', 'onnx.helper.make_tensor_type_proto', (['onnx.TensorProto.FLOAT', "['M']"], {}), "(onnx.TensorProto.FLOAT, ['M'])\n", (2931, 2962), False, 'import onnx\n'), ((3072, 3137), 'onnx.helper.make_tensor_type_proto', 'onnx.helper.make_tensor_type_proto', (['onnx.TensorProto.FLOAT', "['N']"], {}), "(onnx.TensorProto.FLOAT, ['N'])\n", (3106, 3137), False, 'import onnx\n'), ((3206, 3271), 'onnx.helper.make_tensor_type_proto', 'onnx.helper.make_tensor_type_proto', (['onnx.TensorProto.FLOAT', "['M']"], {}), "(onnx.TensorProto.FLOAT, ['M'])\n", (3240, 3271), False, 'import onnx\n'), ((3659, 3711), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Identity"""', "['in0']", "['out0']"], {}), "('Identity', ['in0'], ['out0'])\n", (3680, 3711), False, 'import onnx\n'), ((3726, 3778), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Identity"""', "['in1']", "['out1']"], {}), "('Identity', ['in1'], ['out1'])\n", (3747, 3778), False, 'import onnx\n'), ((3822, 3894), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""in0"""', 'onnx.TensorProto.FLOAT', "['N']"], {}), "('in0', onnx.TensorProto.FLOAT, ['N'])\n", (3856, 3894), False, 'import onnx\n'), ((3909, 3981), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""in1"""', 'onnx.TensorProto.FLOAT', "['M']"], {}), "('in1', onnx.TensorProto.FLOAT, ['M'])\n", (3943, 3981), False, 'import onnx\n'), ((3997, 4070), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""out0"""', 'onnx.TensorProto.FLOAT', "['N']"], {}), "('out0', onnx.TensorProto.FLOAT, ['N'])\n", (4031, 4070), False, 'import onnx\n'), ((4102, 4175), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""out1"""', 'onnx.TensorProto.FLOAT', "['M']"], {}), "('out1', onnx.TensorProto.FLOAT, ['M'])\n", (4136, 4175), False, 'import onnx\n'), ((4736, 4801), 'onnx.helper.make_tensor_type_proto', 'onnx.helper.make_tensor_type_proto', (['onnx.TensorProto.FLOAT', "['N']"], {}), "(onnx.TensorProto.FLOAT, ['N'])\n", (4770, 4801), False, 'import onnx\n'), ((4990, 5055), 'onnx.helper.make_tensor_type_proto', 'onnx.helper.make_tensor_type_proto', (['onnx.TensorProto.FLOAT', "['N']"], {}), "(onnx.TensorProto.FLOAT, ['N'])\n", (5024, 5055), False, 'import onnx\n'), ((5124, 5189), 'onnx.helper.make_tensor_type_proto', 'onnx.helper.make_tensor_type_proto', (['onnx.TensorProto.FLOAT', "['M']"], {}), "(onnx.TensorProto.FLOAT, ['M'])\n", (5158, 5189), False, 'import onnx\n'), ((5572, 5626), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Add"""', "['in0', 'in1']", "['out0']"], {}), "('Add', ['in0', 'in1'], ['out0'])\n", (5593, 5626), False, 'import onnx\n'), ((5670, 5742), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""in0"""', 'onnx.TensorProto.FLOAT', "['N']"], {}), "('in0', onnx.TensorProto.FLOAT, ['N'])\n", (5704, 5742), False, 'import onnx\n'), ((5757, 5829), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""in1"""', 'onnx.TensorProto.FLOAT', "['N']"], {}), "('in1', onnx.TensorProto.FLOAT, ['N'])\n", (5791, 5829), False, 'import onnx\n'), ((5845, 5918), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""out0"""', 'onnx.TensorProto.FLOAT', "['N']"], {}), "('out0', onnx.TensorProto.FLOAT, ['N'])\n", (5879, 5918), False, 'import onnx\n'), ((6507, 6572), 'onnx.helper.make_tensor_type_proto', 'onnx.helper.make_tensor_type_proto', (['onnx.TensorProto.FLOAT', "['N']"], {}), "(onnx.TensorProto.FLOAT, ['N'])\n", (6541, 6572), False, 'import onnx\n'), ((6641, 6706), 'onnx.helper.make_tensor_type_proto', 'onnx.helper.make_tensor_type_proto', (['onnx.TensorProto.FLOAT', "['N']"], {}), "(onnx.TensorProto.FLOAT, ['N'])\n", (6675, 6706), False, 'import onnx\n'), ((6816, 6881), 'onnx.helper.make_tensor_type_proto', 'onnx.helper.make_tensor_type_proto', (['onnx.TensorProto.FLOAT', "['N']"], {}), "(onnx.TensorProto.FLOAT, ['N'])\n", (6850, 6881), False, 'import onnx\n'), ((7255, 7309), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Add"""', "['in0', 'in1']", "['out0']"], {}), "('Add', ['in0', 'in1'], ['out0'])\n", (7276, 7309), False, 'import onnx\n'), ((7353, 7425), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""in0"""', 'onnx.TensorProto.FLOAT', "['N']"], {}), "('in0', onnx.TensorProto.FLOAT, ['N'])\n", (7387, 7425), False, 'import onnx\n'), ((7440, 7512), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""in1"""', 'onnx.TensorProto.FLOAT', "['N']"], {}), "('in1', onnx.TensorProto.FLOAT, ['N'])\n", (7474, 7512), False, 'import onnx\n'), ((7528, 7601), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""out0"""', 'onnx.TensorProto.FLOAT', "['N']"], {}), "('out0', onnx.TensorProto.FLOAT, ['N'])\n", (7562, 7601), False, 'import onnx\n'), ((7887, 7918), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', '(10)'], {}), '(0.0, 1.0, 10)\n', (7904, 7918), True, 'import numpy as np\n'), ((8078, 8143), 'onnx.helper.make_tensor_type_proto', 'onnx.helper.make_tensor_type_proto', (['onnx.TensorProto.FLOAT', "['N']"], {}), "(onnx.TensorProto.FLOAT, ['N'])\n", (8112, 8143), False, 'import onnx\n'), ((8332, 8397), 'onnx.helper.make_tensor_type_proto', 'onnx.helper.make_tensor_type_proto', (['onnx.TensorProto.FLOAT', "['N']"], {}), "(onnx.TensorProto.FLOAT, ['N'])\n", (8366, 8397), False, 'import onnx\n'), ((8770, 8818), 'onnx.helper.make_node', 'onnx.helper.make_node', (['"""Shape"""', "['x']", "['shape']"], {}), "('Shape', ['x'], ['shape'])\n", (8791, 8818), False, 'import onnx\n'), ((8862, 8947), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""x"""', 'onnx.TensorProto.FLOAT', "['H', 'W', 'C']"], {}), "('x', onnx.TensorProto.FLOAT, ['H', 'W', 'C']\n )\n", (8896, 8947), False, 'import onnx\n'), ((8958, 9030), 'onnx.helper.make_tensor_value_info', 'onnx.helper.make_tensor_value_info', (['"""shape"""', 'onnx.TensorProto.INT64', '[3]'], {}), "('shape', onnx.TensorProto.INT64, [3])\n", (8992, 9030), False, 'import onnx\n'), ((9550, 9625), 'onnx.helper.make_tensor_type_proto', 'onnx.helper.make_tensor_type_proto', (['onnx.TensorProto.FLOAT', "['H', 'W', 'C']"], {}), "(onnx.TensorProto.FLOAT, ['H', 'W', 'C'])\n", (9584, 9625), False, 'import onnx\n'), ((9735, 9798), 'onnx.helper.make_tensor_type_proto', 'onnx.helper.make_tensor_type_proto', (['onnx.TensorProto.INT64', '[3]'], {}), '(onnx.TensorProto.INT64, [3])\n', (9769, 9798), False, 'import onnx\n'), ((918, 949), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', '(10)'], {}), '(0.0, 1.0, 10)\n', (935, 949), True, 'import numpy as np\n'), ((4530, 4554), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (4547, 4554), True, 'import numpy as np\n'), ((6178, 6211), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', 'N[k]'], {}), '(0.0, 1.0, N[k])\n', (6195, 6211), True, 'import numpy as np\n'), ((6278, 6311), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', 'N[k]'], {}), '(0.0, 1.0, N[k])\n', (6295, 6311), True, 'import numpy as np\n'), ((7804, 7835), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', '(10)'], {}), '(0.0, 1.0, 10)\n', (7821, 7835), True, 'import numpy as np\n'), ((2439, 2463), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (2456, 2463), True, 'import numpy as np\n'), ((2558, 2582), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (2575, 2582), True, 'import numpy as np\n'), ((4412, 4436), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (4429, 4436), True, 'import numpy as np\n')] |
'''
Created on 30-08-2011
@author: Ankhazam
Based on find_obj.py OpenCV2 sample
'''
import numpy as np
import cv2
from common import anorm
class ORBFlannMatcher(object):
'''
Main recognition and training module.
'''
detector = cv2.FastFeatureDetector(16, True)
detector = cv2.GridAdaptedFeatureDetector(detector)
extractor = cv2.DescriptorExtractor_create('ORB')
FLANN_INDEX_KDTREE = 1
FLANN_INDEX_LSH = 6
flann_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 12, # 12
key_size = 20, # 20
multi_probe_level = 2) #2
matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)
def __init__(self, trainedObjects):
'''
Constructor
@param trainedObjects: List of @see: TrainedObject used as recognition DB
'''
self.trainedObjects = trainedObjects
def addTrainedObject(self, trainedObject):
'''
Extends the loaded database with a new @see: TrainedObject
'''
self.trainedObjects.append(trainedObject)
def __matchWithGivenflann(self, desc1, flannIndex, r_threshold=0.4):
'''
Internal flann descriptors matcher in order to find the best match.
@param desc1: SURF features descriptors of currently processed object orientation and the test image.
@param flannIndex: PreGenerated FlannIndex to be used for searching
@param r_threshold: Tunnable threshold for kNN normalized distance inside the descriptors space.
@return: Array of matched points.
'''
idx2, dist = flannIndex.knnSearch(desc1, 2, params={}) # bug: need to provide empty dict
mask = dist[:, 0] / dist[:, 1] < r_threshold
idx1 = np.arange(len(desc1))
pairs = np.int32(zip(idx1, idx2[:, 0]))
return pairs[mask]
def matchObject(self, image, useRansac = 1):
'''
Finds best match for each object in the database.
@param image: Image with object(s) to be found.
@param useRansac: 1/0 defining the optional use of RANSAC in homography matrix search.
@return: List of tuples (TrainedObject, bestMatchOrientationIndex,
homographyStatus, homographyMatrix,
(matchedPointsInTrained, matchedPointsInTest) )
'''
# training the matcher for current test image
self.matcher.clear()
ref_kp = self.detector.detect(image)
ref_kp, ref_desc = self.extractor.compute(image,ref_kp)
self.matcher.add([ref_desc])
# list of (TrainedObject, bestMatchOrientationIndex, homographyStatus, homographyMatrix)
bestMatches = list()
# simple searching for best matched orientation
for trainedObject in self.trainedObjects:
# (TrainedObject, bestMatchOrientationIndex, homographyStatus, homographyMatrix)
bestMatchObject = None
ind = 0
for orientation in trainedObject.orientations:
raw_matches = self.matcher.knnMatch(orientation[2], 2)
matches = []
for m in raw_matches:
if len(m) == 2:
m1, m2 = m
if m1.distance < m2.distance * 0.7:
matches.append((m1.trainIdx, m1.queryIdx))
if len(matches) > 10:
matched_p1 = np.float32( [ref_kp[i].pt for i, j in matches] )
matched_p2 = np.float32( [orientation[1][j].pt for i, j in matches] )
#print len(matched_p1), len(matched_p2)
H, status = cv2.findHomography(matched_p1, matched_p2, (0, cv2.RANSAC)[useRansac], 10.0)
#print "Orientation name: ", orientation[0].name
#print '%d / %d inliers/matched' % (np.sum(status), len(status))
if ((bestMatchObject is None and np.sum(status) > 0)
or (np.sum(status) > np.sum(bestMatchObject[2])
or (np.sum(status) == np.sum(bestMatchObject[2]) and len(status) > len(bestMatchObject[2])))
) :
bestMatchObject = (trainedObject, ind, status, H, (matched_p1, matched_p2))
ind += 1
# appends to the results the best match for each TrainedObject
if bestMatchObject is not None:
bestMatches.append(bestMatchObject)
return bestMatches
| [
"cv2.DescriptorExtractor_create",
"numpy.sum",
"numpy.float32",
"cv2.FlannBasedMatcher",
"cv2.GridAdaptedFeatureDetector",
"cv2.FastFeatureDetector",
"cv2.findHomography"
] | [((248, 281), 'cv2.FastFeatureDetector', 'cv2.FastFeatureDetector', (['(16)', '(True)'], {}), '(16, True)\n', (271, 281), False, 'import cv2\n'), ((297, 337), 'cv2.GridAdaptedFeatureDetector', 'cv2.GridAdaptedFeatureDetector', (['detector'], {}), '(detector)\n', (327, 337), False, 'import cv2\n'), ((354, 391), 'cv2.DescriptorExtractor_create', 'cv2.DescriptorExtractor_create', (['"""ORB"""'], {}), "('ORB')\n", (384, 391), False, 'import cv2\n'), ((644, 683), 'cv2.FlannBasedMatcher', 'cv2.FlannBasedMatcher', (['flann_params', '{}'], {}), '(flann_params, {})\n', (665, 683), False, 'import cv2\n'), ((3604, 3650), 'numpy.float32', 'np.float32', (['[ref_kp[i].pt for i, j in matches]'], {}), '([ref_kp[i].pt for i, j in matches])\n', (3614, 3650), True, 'import numpy as np\n'), ((3686, 3740), 'numpy.float32', 'np.float32', (['[orientation[1][j].pt for i, j in matches]'], {}), '([orientation[1][j].pt for i, j in matches])\n', (3696, 3740), True, 'import numpy as np\n'), ((3836, 3912), 'cv2.findHomography', 'cv2.findHomography', (['matched_p1', 'matched_p2', '(0, cv2.RANSAC)[useRansac]', '(10.0)'], {}), '(matched_p1, matched_p2, (0, cv2.RANSAC)[useRansac], 10.0)\n', (3854, 3912), False, 'import cv2\n'), ((4122, 4136), 'numpy.sum', 'np.sum', (['status'], {}), '(status)\n', (4128, 4136), True, 'import numpy as np\n'), ((4170, 4184), 'numpy.sum', 'np.sum', (['status'], {}), '(status)\n', (4176, 4184), True, 'import numpy as np\n'), ((4187, 4213), 'numpy.sum', 'np.sum', (['bestMatchObject[2]'], {}), '(bestMatchObject[2])\n', (4193, 4213), True, 'import numpy as np\n'), ((4242, 4256), 'numpy.sum', 'np.sum', (['status'], {}), '(status)\n', (4248, 4256), True, 'import numpy as np\n'), ((4260, 4286), 'numpy.sum', 'np.sum', (['bestMatchObject[2]'], {}), '(bestMatchObject[2])\n', (4266, 4286), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2018 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the GAIRL framework"""
import collections
import os
import random
import time
import sys
from dopamine.agents.abstract_agent import AbstractAgent
from dopamine.agents.dqn import dqn_agent
from dopamine.agents.implicit_quantile import implicit_quantile_agent
from dopamine.agents.rainbow import rainbow_agent
from dopamine.discrete_domains import atari_lib
from dopamine.generators import dummy_generator
from dopamine.generators.gan import gan
from dopamine.generators.regressor import regressor
from dopamine.generators.wgan import wgan
from dopamine.generators.wgan_gp import wgan_gp
from dopamine.replay_memory import circular_replay_buffer
import numpy as np
import tensorflow as tf
import gin.tf
AGENT_APPENDIX = '@a'
OBSERV_APPENDIX = '@o'
REWTERM_APPENDIX = '@r'
AGENT_SUBDIR = 'agent'
OBSERV_SUBDIR = 'observ'
REWTERM_SUBDIR = 'rewterm'
TRAIN_MEM_SUBDIR = 'train_mem'
TEST_MEM_SUBDIR = 'test_mem'
def dict_to_str(d):
return ', '.join([f'{k}: {v}' for k, v in d.items()])
def _calculate_classification_statistics(output, target):
output = np.round(np.clip(output, 0, 1))
target = np.round(np.clip(target, 0, 1))
true_positives = np.sum(output * target)
if true_positives == 0:
return 0., 0., 0.
precision = true_positives / np.sum(output)
recall = true_positives / np.sum(target)
f1 = (2 * precision * recall) / (precision + recall)
return f1, precision, recall
@gin.configurable
def create_agent(sess, agent_name, num_actions,
observation_shape=atari_lib.NATURE_DQN_OBSERVATION_SHAPE,
observation_dtype=atari_lib.NATURE_DQN_DTYPE,
stack_size=atari_lib.NATURE_DQN_STACK_SIZE,
summary_writer=None):
"""Creates an agent.
Args:
sess: A `tf.Session` object for running associated ops.
agent_name: str, name of the agent to create.
num_actions: int, number of actions the agent can take at any state.
summary_writer: A Tensorflow summary writer to pass to the agent
for in-agent training statistics in Tensorboard.
observation_shape: tuple of ints describing the observation shape.
observation_dtype: tf.DType, specifies the type of the observations. Note
that if your inputs are continuous, you should set this to tf.float32.
stack_size: int, number of frames to use in state stack.
Returns:
agent: An RL agent.
Raises:
ValueError: If `agent_name` is not in supported list or one of the
GAIRL submodules is not in supported list when the chosen agent is GAIRL.
"""
if agent_name == 'dqn':
return dqn_agent.DQNAgent(
sess, num_actions, observation_shape=observation_shape,
observation_dtype=observation_dtype, stack_size=stack_size,
summary_writer=summary_writer
)
elif agent_name == 'rainbow':
return rainbow_agent.RainbowAgent(
sess, num_actions, observation_shape=observation_shape,
observation_dtype=observation_dtype, stack_size=stack_size,
summary_writer=summary_writer
)
elif agent_name == 'implicit_quantile':
return implicit_quantile_agent.ImplicitQuantileAgent(
sess, num_actions, summary_writer=summary_writer
)
else:
raise ValueError('Unknown agent: {}'.format(agent_name))
@gin.configurable
def create_generator(sess, generator_name, output_shape,
input_shapes=None, summary_writer=None):
"""Creates a generator.
Args:
sess: A `tf.Session` object for running associated ops.
generator_name: str, name of the generator to create.
output_shape: tuple of ints describing the output shape.
input_shapes: tuple of tuples of ints describing input shapes (there may
be more than one input).
summary_writer: A Tensorflow summary writer to pass to the agent
for in-agent training statistics in Tensorboard.
Returns:
generator: A generator.
Raises:
ValueError: If `generator_name` is not in supported list.
"""
assert generator_name is not None
if generator_name == 'dummy':
return dummy_generator.DummyGenerator(output_shape)
elif generator_name == 'regressor':
return regressor.Regressor(sess, input_shapes, output_shape,
summary_writer=summary_writer)
elif generator_name == 'vgan':
return gan.VanillaGAN(sess, output_shape,
conditional_input_shapes=input_shapes,
summary_writer=summary_writer)
elif generator_name == 'wgan':
return wgan.WassersteinGAN(sess, output_shape,
conditional_input_shapes=input_shapes,
summary_writer=summary_writer)
elif generator_name == 'wgan_gp':
return wgan_gp.WassersteinGANGP(sess, output_shape,
conditional_input_shapes=input_shapes,
summary_writer=summary_writer)
else:
raise ValueError('Unknown generator: {}'.format(generator_name))
@gin.configurable
class GAIRLAgent(AbstractAgent):
"""An implementation of the GAIRL agent."""
def __init__(self,
sess,
num_actions,
rl_agent_name='dqn',
observ_gen_name='wgan_gp',
rewterm_gen_name='regressor',
observation_shape=atari_lib.NATURE_DQN_OBSERVATION_SHAPE,
observation_dtype=atari_lib.NATURE_DQN_DTYPE,
stack_size=atari_lib.NATURE_DQN_STACK_SIZE,
model_free_length=10000,
model_learning_length=50000,
model_learning_logging_frequency=100,
model_based_max_steps_per_episode=10000,
model_based_length=50000,
model_based_logging_frequency=10000,
terminals_upsampling_coeff=None,
train_memory_capacity=40000,
test_memory_capacity=10000,
memory_batch_size=256,
summary_writer=None,
eval_mode=False):
"""Initializes the agent by combining generative models with the
reinforcement learning agent.
Args:
sess: `tf.Session`, for executing ops.
num_actions: int, number of actions the agent can take at any state.
rl_agent_name: agent, the main decision making agent behind the GAIRL
framework.
observ_gen_name: generative model, generative model that will be used
to learn and simulate (state, action) -> observation transition.
rewterm_gen_name: generative model, generative model that will be used
to learn and simulate (state, action) -> (reward, is_terminal)
transition.
observation_shape: tuple of ints describing the observation shape.
stack_size: int, number of frames to use in state stack.
model_free_length: int, how many model-free steps are performed in
a single GAIRL iteration.
model_learning_length: int, how many model learning iterations are
performed in a single GAIRL iteration.
model_learning_logging_frequency: int, frequency with which data will be
logged during the model learning phase. Lower values will result in
slower training.
model_based_max_steps_per_episode: int, maximum number of model based
steps after which an episode terminates.
model_based_length: int, how many reinforcement learning steps will be
performed in a simulated environment in a single GAIRL iteration.
model_based_logging_frequency: int, frequency with which data will be
logged during the model based phase. Lower values will result in
slower training.
terminals_upsampling_coeff: float, specifies what's the expected
terminals/non_terminals ratio in the memory. If terminals don't
naturally reach that ratio, they will be upsampled accordingly.
None if upsampling shouldn't occur.
train_memory_capacity: int, capacity of the memory used for training
the generative models.
test_memory_capacity: int, capacity of the memory used for testing
the generative models.
memory_batch_size: int, batch size used when replaying transitions from
the memories.
summary_writer: SummaryWriter object for outputting training statistics.
Summary writing disabled if set to None.
eval_mode: bool, True for evaluation and False for training.
"""
tf.logging.info('Creating %s agent with the following parameters:',
self.__class__.__name__)
tf.logging.info('\t Model free agent: %s', rl_agent_name)
tf.logging.info('\t Observation generator: %s', observ_gen_name)
tf.logging.info('\t Rewterm generator: %s', rewterm_gen_name)
tf.logging.info('\t Model free length: %d', model_free_length)
tf.logging.info('\t Model learning length: %d', model_learning_length)
tf.logging.info('\t Model based max steps per episode: %d',
model_based_max_steps_per_episode)
tf.logging.info('\t Terminals upsampling coefficient: %d',
terminals_upsampling_coeff)
tf.logging.info('\t Model based length: %d', model_based_length)
tf.logging.info('\t Train memory capacity: %d', train_memory_capacity)
tf.logging.info('\t Test memory capacity: %d', test_memory_capacity)
tf.logging.info('\t Memory batch size: %d', memory_batch_size)
AbstractAgent.__init__(self,
num_actions,
observation_shape=observation_shape,
stack_size=stack_size)
self.observation_dtype = observation_dtype
self.model_free_steps = 0
self.model_free_steps_since_phase_start = 0
self.model_free_length = model_free_length
self.model_learning_steps = 0
self.model_learning_length = model_learning_length
self.model_learning_logging_frequency = model_learning_logging_frequency
self.model_based_steps = 0
self.model_based_steps_since_last_log = 0
self.model_based_steps_since_phase_start = 0
self.model_based_max_steps_per_episode = model_based_max_steps_per_episode
self.model_based_length = model_based_length
self.model_based_logging_frequency = model_based_logging_frequency
self.terminals_so_far = 0
self.non_terminals_so_far = 0
self.terminals_upsampling_coeff = terminals_upsampling_coeff
self.eval_mode = eval_mode
self.summary_writer = summary_writer
self.action_onehot_template = np.eye(num_actions,
dtype=observation_dtype.as_numpy_dtype)
# Initialising submodels
state_shape = self.observation_shape + (stack_size,)
input_shapes = (state_shape, (num_actions,))
with tf.variable_scope('agent'), gin.config_scope('agent'):
self.rl_agent = create_agent(sess, rl_agent_name, num_actions,
observation_shape=observation_shape,
observation_dtype=observation_dtype,
stack_size=stack_size,
summary_writer=summary_writer)
with tf.variable_scope('observ_gen'), gin.config_scope('observ_gen'):
self.observ_gen = create_generator(sess, observ_gen_name,
self.observation_shape,
input_shapes=input_shapes,
summary_writer=summary_writer)
with tf.variable_scope('rewterm_gen'), gin.config_scope('rewterm_gen'):
self.rewterm_gen = create_generator(sess, rewterm_gen_name, (2,),
input_shapes=input_shapes,
summary_writer=summary_writer)
# Each episode goes either to train or to test memory
total_memory = (train_memory_capacity + test_memory_capacity)
self._test_episode_prob = test_memory_capacity / total_memory
self._train_memory = self._build_memory(train_memory_capacity,
memory_batch_size)
self._test_memory = self._build_memory(test_memory_capacity,
memory_batch_size)
# Variables to be initialized by the agent once it interacts with the
# environment.
self._is_test_episode = False
self._train_observation = None
self._last_train_observation = None
def _build_memory(self, capacity, batch_size):
"""Creates the replay buffer used by the generators.
Args:
capacity: int, maximum capacity of the memory unit.
batch_size int, batch size of the batch produced during memory replay.
Returns:
A OutOfGraphReplayBuffer object.
"""
return circular_replay_buffer.OutOfGraphReplayBuffer(
self.observation_shape,
self.stack_size,
capacity,
batch_size,
observation_dtype=self.observation_dtype.as_numpy_dtype,
)
def begin_episode(self, observation):
"""Returns the agent's first action for this episode.
Args:
observation: numpy array, the environment's initial observation.
Returns:
int, the selected action.
"""
self._is_test_episode = random.random() < self._test_episode_prob
if not self.eval_mode:
self._train_observation = np.reshape(observation, self.observation_shape)
self.model_free_steps += 1
self.model_free_steps_since_phase_start += 1
self.rl_agent.eval_mode = self.eval_mode
self.action = self.rl_agent.begin_episode(observation)
return self.action
def step(self, reward, observation):
"""Records the most recent transition and returns the agent's next action.
We store the observation of the last time step since we want to store it
with the reward.
Args:
reward: float, the reward received from the agent's most recent action.
observation: numpy array, the most recent observation.
Returns:
int, the selected action.
"""
if not self.eval_mode:
self._last_train_observation = self._train_observation
self._train_observation = np.reshape(observation, self.observation_shape)
self._store_transition(self._last_train_observation, self.action,
reward, False)
self.model_free_steps += 1
self.model_free_steps_since_phase_start += 1
self.rl_agent.eval_mode = self.eval_mode
self.action = self.rl_agent.step(reward, observation)
return self.action
def end_episode(self, reward):
"""Signals the end of the episode to the agent.
We store the observation of the current time step, which is the last
observation of the episode.
Args:
reward: float, the last reward from the environment.
"""
if not self.eval_mode:
self._store_transition(self._train_observation, self.action, reward, True)
if self.model_free_steps_since_phase_start > self.model_free_length:
self._train_generators()
self._train_model_based()
self.model_free_steps_since_phase_start = 0
self.rl_agent.eval_mode = self.eval_mode
self.rl_agent.end_episode(reward)
def _store_transition(self, last_observation, action, reward, is_terminal):
"""Stores an experienced transition consisting of the tuple
(last_observation, action, reward, is_terminal) in an appropriate
memory unit (train or test).
Pedantically speaking, this does not actually store an entire transition
since the next state is recorded on the following time step.
Args:
last_observation: numpy array, last observation.
action: int, the action taken.
reward: float, the reward.
is_terminal: bool, indicating if the current state is a terminal state.
"""
mem = self._test_memory if self._is_test_episode else self._train_memory
if is_terminal:
self.terminals_so_far += 1
else:
self.non_terminals_so_far += 1
upsampling_ratio = 1
if is_terminal and self.terminals_upsampling_coeff is not None:
nonterm_term_ratio = self.non_terminals_so_far / self.terminals_so_far
upsampling_ratio = nonterm_term_ratio * self.terminals_upsampling_coeff
# At least one (no upsampling) if naturally ratio exceeds expected ratio
upsampling_ratio = np.maximum(1, round(upsampling_ratio))
for i in range(upsampling_ratio):
mem.add(last_observation, action, reward, is_terminal)
def _train_generators(self):
"""Run model learning phase - train generative models"""
tf.logging.info('***Starting model learning phase.***')
start_time = time.time()
mean_statistics = collections.defaultdict(int)
while True:
# Prepare data
batch_data = self._train_memory.sample_transition_batch()
batch_inputs, batch_next_observ, batch_rewterm = \
self._prepare_transitions_batch(batch_data)
# Train models
observ_statistics = self.observ_gen.train(batch_inputs, batch_next_observ)
rewterm_statistics = self.rewterm_gen.train(batch_inputs, batch_rewterm)
for k, v in observ_statistics.items():
weighted_value = v / self.model_learning_logging_frequency
mean_statistics[f'mean_observ_{k}'] += weighted_value
for k, v in rewterm_statistics.items():
weighted_value = v / self.model_learning_logging_frequency
mean_statistics[f'mean_rewterm_{k}'] += weighted_value
self.model_learning_steps += 1
# Log
if self.model_learning_steps % self.model_learning_logging_frequency == 0:
time_delta = time.time() - start_time
tf.logging.info('Step: %d', self.model_learning_steps)
tf.logging.info('Average statistics per training: %s',
dict_to_str(mean_statistics))
tf.logging.info('Average training steps per second: %.2f',
self.model_learning_logging_frequency / time_delta)
start_time = time.time()
mean_statistics = collections.defaultdict(int)
self._save_model_learning_summaries()
# Stop training after specified
if self.model_learning_steps % self.model_learning_length == 0:
break
tf.logging.info('***Finished model learning phase.***')
def _save_model_learning_summaries(self):
train_data = self._train_memory.sample_transition_batch()
train_summs = self._prepare_model_learning_summaries(train_data, 'Train')
test_data = self._test_memory.sample_transition_batch()
test_summs = self._prepare_model_learning_summaries(test_data, 'Test')
summaries = train_summs + test_summs
self.summary_writer.add_summary(tf.Summary(value=summaries),
self.model_learning_steps)
def _prepare_model_learning_summaries(self, batch_data, test_or_train):
batch_inputs, batch_next_observ, batch_rewterm = \
self._prepare_transitions_batch(batch_data)
batch_reward = batch_rewterm[:, 0]
batch_terminal = batch_rewterm[:, 1]
gen_next_observ = self.observ_gen.generate(batch_inputs)
observ_l1 = np.mean(np.abs(gen_next_observ - batch_next_observ))
gen_rewterm = self.rewterm_gen.generate(batch_inputs)
gen_reward = gen_rewterm[:, 0]
gen_terminal = gen_rewterm[:, 1]
rewterm_l1 = np.mean(np.abs(gen_rewterm - batch_rewterm))
reward_l2 = np.mean(np.square(gen_reward - batch_reward))
term_f1, term_precision, term_recall = \
_calculate_classification_statistics(gen_terminal, batch_terminal)
return [
tf.Summary.Value(tag=f'Observ/{test_or_train}L1Loss',
simple_value=observ_l1),
tf.Summary.Value(tag=f'Rewterm/{test_or_train}L1Loss',
simple_value=rewterm_l1),
tf.Summary.Value(tag=f'Rewterm/{test_or_train}RewardL2Loss',
simple_value=reward_l2),
tf.Summary.Value(tag=f'Rewterm/{test_or_train}TerminalPrecision',
simple_value=term_precision),
tf.Summary.Value(tag=f'Rewterm/{test_or_train}TerminalRecall',
simple_value=term_recall),
tf.Summary.Value(tag=f'Rewterm/{test_or_train}TerminalF1',
simple_value=term_f1)
]
def _prepare_transitions_batch(self, batch_data):
"""Transforms batch data from memory into separate batches usable by
generative models.
Args:
batch_data: tuple of numpy arrays, tuple returned by the memory
consisting of all important information about sampled transitions.
Returns:
tuple of numpy arrays, (batch_inputs, batch_next_observ, batch_rewterm),
all necessary and prepared pieces of transition data.
"""
batch_states = batch_data[0]
batch_actions_onehot = self.action_onehot_template[batch_data[1]]
batch_inputs = (batch_states, batch_actions_onehot)
batch_next_observ = batch_data[3][..., -1]
batch_rewterm = np.column_stack((batch_data[2], batch_data[6]))
return batch_inputs, batch_next_observ, batch_rewterm
def _train_model_based(self):
tf.logging.info('***Starting model based phase.***')
self.model_based_steps_since_phase_start = 0
self.rl_agent.eval_mode = False
num_episodes = 0
sum_returns = 0
start_time = time.time()
while self.model_based_steps_since_phase_start < self.model_based_length:
length, reward = self._run_model_based_episode()
self.model_based_steps += length
self.model_based_steps_since_last_log += length
self.model_based_steps_since_phase_start += length
num_episodes += 1
sum_returns += reward
# We use sys.stdout.write instead of tf.logging so as to flush frequently
# without generating a line break.
sys.stdout.write(f'Steps executed so far: '
f'{self.model_based_steps_since_last_log} ' +
f'Episode length: {length} ' +
f'Return: {reward}\r')
sys.stdout.flush()
# Log
if self.model_based_steps_since_last_log > self.model_based_logging_frequency:
time_delta = time.time() - start_time
average_return = sum_returns / num_episodes if num_episodes > 0 else 0.
tf.logging.info('Average return per training episode: %.2f',
average_return)
tf.logging.info('Average training steps per second: %.2f',
self.model_based_steps_since_last_log / time_delta)
start_time = time.time()
self._save_model_based_summaries()
num_episodes = 0
sum_returns = 0
self.model_based_steps_since_last_log = 0
tf.logging.info('***Finished model based phase.***')
def _run_model_based_episode(self):
"""Executes a full trajectory of the agent interacting with the
environment simulation created by the generative models.
The simulation starts based on a sample non-terminal observation from the
train memory and is then roll-out using GAIRL's generative models.
Returns:
The number of steps taken and the total reward.
"""
step_number = 0
total_reward = 0.
state = np.zeros((1,) + self.observation_shape + (self.stack_size,))
# Initialize episode
observation = self._get_initial_model_based_observation()
action = self.rl_agent.begin_episode(observation)
# Keep interacting until we reach a terminal observation.
while True:
state = self._update_state(state, observation)
action_onehot = self.action_onehot_template[[action]]
# Simulate transition
observation = self.observ_gen.generate((state, action_onehot))[0]
reward, is_terminal = self.rewterm_gen.generate((state, action_onehot))[0]
total_reward += reward
step_number += 1
# Perform outputs postprocessing.
reward = np.clip(reward, -1, 1)
is_terminal = round(is_terminal)
is_terminal = np.clip(is_terminal, 0, 1)
if is_terminal or step_number >= self.model_based_max_steps_per_episode:
break
action = self.rl_agent.step(reward, observation)
self.rl_agent.end_episode(reward)
return step_number, total_reward
def _get_initial_model_based_observation(self):
"""Getting a sample observation to initialize the episode.
Returns:
numpy array, the initial non-terminal observation sampled from the train
memory.
"""
state = None
is_terminal = 1
while is_terminal:
transition = self._train_memory.sample_transition_batch(batch_size=1)
state = transition[0][0]
is_terminal = transition[6][0]
return state[..., -1] # Get least recent observation from the state
def _update_state(self, state, observation):
"""Records an observation and updates state.
Extracts a frame from the observation vector and overwrites the oldest
frame in the state buffer.
Args:
state: numpy array, stack of observations.
observation: numpy array, an observation from the environment.
Returns:
Updated state.
"""
# Set current observation. We do the reshaping to handle environments
# without frame stacking.
observation = np.reshape(observation, self.observation_shape)
# Swap out the oldest frame with the current frame.
state = np.roll(state, -1, axis=-1)
state[0, ..., -1] = observation
return state
def _save_model_based_summaries(self):
# TODO
pass
def bundle_and_checkpoint(self, checkpoint_dir, iteration_number):
"""Returns a self-contained bundle of the agent's state.
This is used for checkpointing. It will return a dictionary containing all
non-TensorFlow objects (to be saved into a file by the caller), and it saves
all TensorFlow objects into a checkpoint file.
Args:
checkpoint_dir: str, directory where TensorFlow objects will be saved.
iteration_number: int, iteration number to use for naming the checkpoint
file.
Returns:
A dict containing additional Python objects to be checkpointed by the
experiment. If the checkpoint directory does not exist, returns None.
"""
if not tf.gfile.Exists(checkpoint_dir):
return None
agent_path = os.path.join(checkpoint_dir, AGENT_SUBDIR)
if not os.path.exists(agent_path):
os.mkdir(agent_path)
agent_bundle = self.rl_agent.bundle_and_checkpoint(agent_path,
iteration_number)
agent_bundle = {k + AGENT_APPENDIX: v
for (k, v) in agent_bundle.items()}
observ_path = os.path.join(checkpoint_dir, OBSERV_SUBDIR)
if not os.path.exists(observ_path):
os.mkdir(observ_path)
observ_bundle = self.observ_gen.bundle_and_checkpoint(observ_path,
iteration_number)
observ_bundle = {k + OBSERV_APPENDIX: v
for (k, v) in observ_bundle.items()}
rewterm_path = os.path.join(checkpoint_dir, REWTERM_SUBDIR)
if not os.path.exists(rewterm_path):
os.mkdir(rewterm_path)
rewterm_bundle = self.rewterm_gen.bundle_and_checkpoint(rewterm_path,
iteration_number)
rewterm_bundle = {k + REWTERM_APPENDIX: v
for (k, v) in rewterm_bundle.items()}
train_mem_path = os.path.join(checkpoint_dir, TRAIN_MEM_SUBDIR)
if not os.path.exists(train_mem_path):
os.mkdir(train_mem_path)
self._train_memory.save(train_mem_path, iteration_number)
test_mem_path = os.path.join(checkpoint_dir, TEST_MEM_SUBDIR)
if not os.path.exists(test_mem_path):
os.mkdir(test_mem_path)
self._test_memory.save(test_mem_path, iteration_number)
gairl_bundle = {
'model_free_steps': self.model_free_steps,
'model_free_steps_since_phase_start':
self.model_free_steps_since_phase_start,
'model_learning_steps': self.model_learning_steps,
'model_based_steps': self.model_based_steps,
'model_based_steps_since_last_log':
self.model_based_steps_since_last_log,
'model_based_steps_since_phase_start':
self.model_based_steps_since_phase_start,
'terminals_so_far': self.terminals_so_far,
'non_terminals_so_far': self.non_terminals_so_far
}
return {**agent_bundle, **observ_bundle, **rewterm_bundle, **gairl_bundle}
def unbundle(self, checkpoint_dir, iteration_number, bundle_dictionary):
"""Restores the agent from a checkpoint.
Restores the agent's Python objects to those specified in bundle_dictionary,
and restores the TensorFlow objects to those specified in the
checkpoint_dir. If the checkpoint_dir does not exist, will not reset the
agent's state.
Args:
checkpoint_dir: str, path to the checkpoint saved by tf.Save.
iteration_number: int, checkpoint version, used when restoring the replay
buffer.
bundle_dictionary: dict, containing additional Python objects owned by
the agent.
Returns:
bool, True if unbundling was successful.
"""
agent_path = os.path.join(checkpoint_dir, AGENT_SUBDIR)
agent_bundle = {k[:-2]: v for k, v in bundle_dictionary.items()
if k[-2:] == AGENT_APPENDIX}
if not self.rl_agent.unbundle(agent_path, iteration_number,
agent_bundle):
return False
observ_path = os.path.join(checkpoint_dir, OBSERV_SUBDIR)
observ_bundle = {k[:-2]: v for k, v in bundle_dictionary.items()
if k[-2:] == OBSERV_APPENDIX}
if not self.observ_gen.unbundle(observ_path, iteration_number,
observ_bundle):
return False
rewterm_path = os.path.join(checkpoint_dir, REWTERM_SUBDIR)
rewterm_bundle = {k[:-2]: v for k, v in bundle_dictionary.items()
if k[-2:] == REWTERM_APPENDIX}
if not self.rewterm_gen.unbundle(rewterm_path, iteration_number,
rewterm_bundle):
return False
train_mem_path = os.path.join(checkpoint_dir, TRAIN_MEM_SUBDIR)
self._train_memory.load(train_mem_path, iteration_number)
test_mem_path = os.path.join(checkpoint_dir, TEST_MEM_SUBDIR)
self._test_memory.load(test_mem_path, iteration_number)
for key in self.__dict__:
if key in bundle_dictionary:
self.__dict__[key] = bundle_dictionary[key]
return True
| [
"sys.stdout.write",
"os.mkdir",
"tensorflow.gfile.Exists",
"dopamine.agents.abstract_agent.AbstractAgent.__init__",
"numpy.sum",
"numpy.abs",
"tensorflow.logging.info",
"dopamine.generators.wgan.wgan.WassersteinGAN",
"dopamine.generators.wgan_gp.wgan_gp.WassersteinGANGP",
"numpy.clip",
"collecti... | [((1764, 1787), 'numpy.sum', 'np.sum', (['(output * target)'], {}), '(output * target)\n', (1770, 1787), True, 'import numpy as np\n'), ((1678, 1699), 'numpy.clip', 'np.clip', (['output', '(0)', '(1)'], {}), '(output, 0, 1)\n', (1685, 1699), True, 'import numpy as np\n'), ((1721, 1742), 'numpy.clip', 'np.clip', (['target', '(0)', '(1)'], {}), '(target, 0, 1)\n', (1728, 1742), True, 'import numpy as np\n'), ((1868, 1882), 'numpy.sum', 'np.sum', (['output'], {}), '(output)\n', (1874, 1882), True, 'import numpy as np\n'), ((1911, 1925), 'numpy.sum', 'np.sum', (['target'], {}), '(target)\n', (1917, 1925), True, 'import numpy as np\n'), ((3185, 3358), 'dopamine.agents.dqn.dqn_agent.DQNAgent', 'dqn_agent.DQNAgent', (['sess', 'num_actions'], {'observation_shape': 'observation_shape', 'observation_dtype': 'observation_dtype', 'stack_size': 'stack_size', 'summary_writer': 'summary_writer'}), '(sess, num_actions, observation_shape=observation_shape,\n observation_dtype=observation_dtype, stack_size=stack_size,\n summary_writer=summary_writer)\n', (3203, 3358), False, 'from dopamine.agents.dqn import dqn_agent\n'), ((4629, 4673), 'dopamine.generators.dummy_generator.DummyGenerator', 'dummy_generator.DummyGenerator', (['output_shape'], {}), '(output_shape)\n', (4659, 4673), False, 'from dopamine.generators import dummy_generator\n'), ((8996, 9093), 'tensorflow.logging.info', 'tf.logging.info', (['"""Creating %s agent with the following parameters:"""', 'self.__class__.__name__'], {}), "('Creating %s agent with the following parameters:', self.\n __class__.__name__)\n", (9011, 9093), True, 'import tensorflow as tf\n'), ((9113, 9170), 'tensorflow.logging.info', 'tf.logging.info', (['"""\t Model free agent: %s"""', 'rl_agent_name'], {}), "('\\t Model free agent: %s', rl_agent_name)\n", (9128, 9170), True, 'import tensorflow as tf\n'), ((9175, 9239), 'tensorflow.logging.info', 'tf.logging.info', (['"""\t Observation generator: %s"""', 'observ_gen_name'], {}), "('\\t Observation generator: %s', observ_gen_name)\n", (9190, 9239), True, 'import tensorflow as tf\n'), ((9244, 9305), 'tensorflow.logging.info', 'tf.logging.info', (['"""\t Rewterm generator: %s"""', 'rewterm_gen_name'], {}), "('\\t Rewterm generator: %s', rewterm_gen_name)\n", (9259, 9305), True, 'import tensorflow as tf\n'), ((9310, 9372), 'tensorflow.logging.info', 'tf.logging.info', (['"""\t Model free length: %d"""', 'model_free_length'], {}), "('\\t Model free length: %d', model_free_length)\n", (9325, 9372), True, 'import tensorflow as tf\n'), ((9377, 9447), 'tensorflow.logging.info', 'tf.logging.info', (['"""\t Model learning length: %d"""', 'model_learning_length'], {}), "('\\t Model learning length: %d', model_learning_length)\n", (9392, 9447), True, 'import tensorflow as tf\n'), ((9452, 9550), 'tensorflow.logging.info', 'tf.logging.info', (['"""\t Model based max steps per episode: %d"""', 'model_based_max_steps_per_episode'], {}), "('\\t Model based max steps per episode: %d',\n model_based_max_steps_per_episode)\n", (9467, 9550), True, 'import tensorflow as tf\n'), ((9571, 9661), 'tensorflow.logging.info', 'tf.logging.info', (['"""\t Terminals upsampling coefficient: %d"""', 'terminals_upsampling_coeff'], {}), "('\\t Terminals upsampling coefficient: %d',\n terminals_upsampling_coeff)\n", (9586, 9661), True, 'import tensorflow as tf\n'), ((9682, 9746), 'tensorflow.logging.info', 'tf.logging.info', (['"""\t Model based length: %d"""', 'model_based_length'], {}), "('\\t Model based length: %d', model_based_length)\n", (9697, 9746), True, 'import tensorflow as tf\n'), ((9751, 9821), 'tensorflow.logging.info', 'tf.logging.info', (['"""\t Train memory capacity: %d"""', 'train_memory_capacity'], {}), "('\\t Train memory capacity: %d', train_memory_capacity)\n", (9766, 9821), True, 'import tensorflow as tf\n'), ((9826, 9894), 'tensorflow.logging.info', 'tf.logging.info', (['"""\t Test memory capacity: %d"""', 'test_memory_capacity'], {}), "('\\t Test memory capacity: %d', test_memory_capacity)\n", (9841, 9894), True, 'import tensorflow as tf\n'), ((9899, 9961), 'tensorflow.logging.info', 'tf.logging.info', (['"""\t Memory batch size: %d"""', 'memory_batch_size'], {}), "('\\t Memory batch size: %d', memory_batch_size)\n", (9914, 9961), True, 'import tensorflow as tf\n'), ((9967, 10073), 'dopamine.agents.abstract_agent.AbstractAgent.__init__', 'AbstractAgent.__init__', (['self', 'num_actions'], {'observation_shape': 'observation_shape', 'stack_size': 'stack_size'}), '(self, num_actions, observation_shape=\n observation_shape, stack_size=stack_size)\n', (9989, 10073), False, 'from dopamine.agents.abstract_agent import AbstractAgent\n'), ((11048, 11107), 'numpy.eye', 'np.eye', (['num_actions'], {'dtype': 'observation_dtype.as_numpy_dtype'}), '(num_actions, dtype=observation_dtype.as_numpy_dtype)\n', (11054, 11107), True, 'import numpy as np\n'), ((13295, 13470), 'dopamine.replay_memory.circular_replay_buffer.OutOfGraphReplayBuffer', 'circular_replay_buffer.OutOfGraphReplayBuffer', (['self.observation_shape', 'self.stack_size', 'capacity', 'batch_size'], {'observation_dtype': 'self.observation_dtype.as_numpy_dtype'}), '(self.observation_shape, self.\n stack_size, capacity, batch_size, observation_dtype=self.\n observation_dtype.as_numpy_dtype)\n', (13340, 13470), False, 'from dopamine.replay_memory import circular_replay_buffer\n'), ((17073, 17128), 'tensorflow.logging.info', 'tf.logging.info', (['"""***Starting model learning phase.***"""'], {}), "('***Starting model learning phase.***')\n", (17088, 17128), True, 'import tensorflow as tf\n'), ((17146, 17157), 'time.time', 'time.time', ([], {}), '()\n', (17155, 17157), False, 'import time\n'), ((17180, 17208), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (17203, 17208), False, 'import collections\n'), ((18713, 18768), 'tensorflow.logging.info', 'tf.logging.info', (['"""***Finished model learning phase.***"""'], {}), "('***Finished model learning phase.***')\n", (18728, 18768), True, 'import tensorflow as tf\n'), ((21422, 21469), 'numpy.column_stack', 'np.column_stack', (['(batch_data[2], batch_data[6])'], {}), '((batch_data[2], batch_data[6]))\n', (21437, 21469), True, 'import numpy as np\n'), ((21565, 21617), 'tensorflow.logging.info', 'tf.logging.info', (['"""***Starting model based phase.***"""'], {}), "('***Starting model based phase.***')\n", (21580, 21617), True, 'import tensorflow as tf\n'), ((21761, 21772), 'time.time', 'time.time', ([], {}), '()\n', (21770, 21772), False, 'import time\n'), ((23131, 23183), 'tensorflow.logging.info', 'tf.logging.info', (['"""***Finished model based phase.***"""'], {}), "('***Finished model based phase.***')\n", (23146, 23183), True, 'import tensorflow as tf\n'), ((23632, 23692), 'numpy.zeros', 'np.zeros', (['((1,) + self.observation_shape + (self.stack_size,))'], {}), '((1,) + self.observation_shape + (self.stack_size,))\n', (23640, 23692), True, 'import numpy as np\n'), ((25660, 25707), 'numpy.reshape', 'np.reshape', (['observation', 'self.observation_shape'], {}), '(observation, self.observation_shape)\n', (25670, 25707), True, 'import numpy as np\n'), ((25776, 25803), 'numpy.roll', 'np.roll', (['state', '(-1)'], {'axis': '(-1)'}), '(state, -1, axis=-1)\n', (25783, 25803), True, 'import numpy as np\n'), ((26695, 26737), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'AGENT_SUBDIR'], {}), '(checkpoint_dir, AGENT_SUBDIR)\n', (26707, 26737), False, 'import os\n'), ((27061, 27104), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'OBSERV_SUBDIR'], {}), '(checkpoint_dir, OBSERV_SUBDIR)\n', (27073, 27104), False, 'import os\n'), ((27442, 27486), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'REWTERM_SUBDIR'], {}), '(checkpoint_dir, REWTERM_SUBDIR)\n', (27454, 27486), False, 'import os\n'), ((27837, 27883), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'TRAIN_MEM_SUBDIR'], {}), '(checkpoint_dir, TRAIN_MEM_SUBDIR)\n', (27849, 27883), False, 'import os\n'), ((28041, 28086), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'TEST_MEM_SUBDIR'], {}), '(checkpoint_dir, TEST_MEM_SUBDIR)\n', (28053, 28086), False, 'import os\n'), ((29590, 29632), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'AGENT_SUBDIR'], {}), '(checkpoint_dir, AGENT_SUBDIR)\n', (29602, 29632), False, 'import os\n'), ((29901, 29944), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'OBSERV_SUBDIR'], {}), '(checkpoint_dir, OBSERV_SUBDIR)\n', (29913, 29944), False, 'import os\n'), ((30223, 30267), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'REWTERM_SUBDIR'], {}), '(checkpoint_dir, REWTERM_SUBDIR)\n', (30235, 30267), False, 'import os\n'), ((30555, 30601), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'TRAIN_MEM_SUBDIR'], {}), '(checkpoint_dir, TRAIN_MEM_SUBDIR)\n', (30567, 30601), False, 'import os\n'), ((30685, 30730), 'os.path.join', 'os.path.join', (['checkpoint_dir', 'TEST_MEM_SUBDIR'], {}), '(checkpoint_dir, TEST_MEM_SUBDIR)\n', (30697, 30730), False, 'import os\n'), ((3418, 3601), 'dopamine.agents.rainbow.rainbow_agent.RainbowAgent', 'rainbow_agent.RainbowAgent', (['sess', 'num_actions'], {'observation_shape': 'observation_shape', 'observation_dtype': 'observation_dtype', 'stack_size': 'stack_size', 'summary_writer': 'summary_writer'}), '(sess, num_actions, observation_shape=\n observation_shape, observation_dtype=observation_dtype, stack_size=\n stack_size, summary_writer=summary_writer)\n', (3444, 3601), False, 'from dopamine.agents.rainbow import rainbow_agent\n'), ((4723, 4812), 'dopamine.generators.regressor.regressor.Regressor', 'regressor.Regressor', (['sess', 'input_shapes', 'output_shape'], {'summary_writer': 'summary_writer'}), '(sess, input_shapes, output_shape, summary_writer=\n summary_writer)\n', (4742, 4812), False, 'from dopamine.generators.regressor import regressor\n'), ((11294, 11320), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""agent"""'], {}), "('agent')\n", (11311, 11320), True, 'import tensorflow as tf\n'), ((11695, 11726), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""observ_gen"""'], {}), "('observ_gen')\n", (11712, 11726), True, 'import tensorflow as tf\n'), ((12038, 12070), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""rewterm_gen"""'], {}), "('rewterm_gen')\n", (12055, 12070), True, 'import tensorflow as tf\n'), ((13761, 13776), 'random.random', 'random.random', ([], {}), '()\n', (13774, 13776), False, 'import random\n'), ((13863, 13910), 'numpy.reshape', 'np.reshape', (['observation', 'self.observation_shape'], {}), '(observation, self.observation_shape)\n', (13873, 13910), True, 'import numpy as np\n'), ((14665, 14712), 'numpy.reshape', 'np.reshape', (['observation', 'self.observation_shape'], {}), '(observation, self.observation_shape)\n', (14675, 14712), True, 'import numpy as np\n'), ((19166, 19193), 'tensorflow.Summary', 'tf.Summary', ([], {'value': 'summaries'}), '(value=summaries)\n', (19176, 19193), True, 'import tensorflow as tf\n'), ((19604, 19647), 'numpy.abs', 'np.abs', (['(gen_next_observ - batch_next_observ)'], {}), '(gen_next_observ - batch_next_observ)\n', (19610, 19647), True, 'import numpy as np\n'), ((19805, 19840), 'numpy.abs', 'np.abs', (['(gen_rewterm - batch_rewterm)'], {}), '(gen_rewterm - batch_rewterm)\n', (19811, 19840), True, 'import numpy as np\n'), ((19866, 19902), 'numpy.square', 'np.square', (['(gen_reward - batch_reward)'], {}), '(gen_reward - batch_reward)\n', (19875, 19902), True, 'import numpy as np\n'), ((20041, 20118), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': 'f"""Observ/{test_or_train}L1Loss"""', 'simple_value': 'observ_l1'}), "(tag=f'Observ/{test_or_train}L1Loss', simple_value=observ_l1)\n", (20057, 20118), True, 'import tensorflow as tf\n'), ((20149, 20228), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': 'f"""Rewterm/{test_or_train}L1Loss"""', 'simple_value': 'rewterm_l1'}), "(tag=f'Rewterm/{test_or_train}L1Loss', simple_value=rewterm_l1)\n", (20165, 20228), True, 'import tensorflow as tf\n'), ((20259, 20348), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': 'f"""Rewterm/{test_or_train}RewardL2Loss"""', 'simple_value': 'reward_l2'}), "(tag=f'Rewterm/{test_or_train}RewardL2Loss', simple_value=\n reward_l2)\n", (20275, 20348), True, 'import tensorflow as tf\n'), ((20374, 20472), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': 'f"""Rewterm/{test_or_train}TerminalPrecision"""', 'simple_value': 'term_precision'}), "(tag=f'Rewterm/{test_or_train}TerminalPrecision',\n simple_value=term_precision)\n", (20390, 20472), True, 'import tensorflow as tf\n'), ((20499, 20592), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': 'f"""Rewterm/{test_or_train}TerminalRecall"""', 'simple_value': 'term_recall'}), "(tag=f'Rewterm/{test_or_train}TerminalRecall', simple_value\n =term_recall)\n", (20515, 20592), True, 'import tensorflow as tf\n'), ((20618, 20703), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': 'f"""Rewterm/{test_or_train}TerminalF1"""', 'simple_value': 'term_f1'}), "(tag=f'Rewterm/{test_or_train}TerminalF1', simple_value=term_f1\n )\n", (20634, 20703), True, 'import tensorflow as tf\n'), ((22237, 22385), 'sys.stdout.write', 'sys.stdout.write', (["(f'Steps executed so far: {self.model_based_steps_since_last_log} ' +\n f'Episode length: {length} ' + f'Return: {reward}\\r')"], {}), "(\n f'Steps executed so far: {self.model_based_steps_since_last_log} ' +\n f'Episode length: {length} ' + f'Return: {reward}\\r')\n", (22253, 22385), False, 'import sys\n'), ((22456, 22474), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (22472, 22474), False, 'import sys\n'), ((24318, 24340), 'numpy.clip', 'np.clip', (['reward', '(-1)', '(1)'], {}), '(reward, -1, 1)\n', (24325, 24340), True, 'import numpy as np\n'), ((24400, 24426), 'numpy.clip', 'np.clip', (['is_terminal', '(0)', '(1)'], {}), '(is_terminal, 0, 1)\n', (24407, 24426), True, 'import numpy as np\n'), ((26626, 26657), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (26641, 26657), True, 'import tensorflow as tf\n'), ((26749, 26775), 'os.path.exists', 'os.path.exists', (['agent_path'], {}), '(agent_path)\n', (26763, 26775), False, 'import os\n'), ((26783, 26803), 'os.mkdir', 'os.mkdir', (['agent_path'], {}), '(agent_path)\n', (26791, 26803), False, 'import os\n'), ((27116, 27143), 'os.path.exists', 'os.path.exists', (['observ_path'], {}), '(observ_path)\n', (27130, 27143), False, 'import os\n'), ((27151, 27172), 'os.mkdir', 'os.mkdir', (['observ_path'], {}), '(observ_path)\n', (27159, 27172), False, 'import os\n'), ((27498, 27526), 'os.path.exists', 'os.path.exists', (['rewterm_path'], {}), '(rewterm_path)\n', (27512, 27526), False, 'import os\n'), ((27534, 27556), 'os.mkdir', 'os.mkdir', (['rewterm_path'], {}), '(rewterm_path)\n', (27542, 27556), False, 'import os\n'), ((27895, 27925), 'os.path.exists', 'os.path.exists', (['train_mem_path'], {}), '(train_mem_path)\n', (27909, 27925), False, 'import os\n'), ((27933, 27957), 'os.mkdir', 'os.mkdir', (['train_mem_path'], {}), '(train_mem_path)\n', (27941, 27957), False, 'import os\n'), ((28098, 28127), 'os.path.exists', 'os.path.exists', (['test_mem_path'], {}), '(test_mem_path)\n', (28112, 28127), False, 'import os\n'), ((28135, 28158), 'os.mkdir', 'os.mkdir', (['test_mem_path'], {}), '(test_mem_path)\n', (28143, 28158), False, 'import os\n'), ((3669, 3768), 'dopamine.agents.implicit_quantile.implicit_quantile_agent.ImplicitQuantileAgent', 'implicit_quantile_agent.ImplicitQuantileAgent', (['sess', 'num_actions'], {'summary_writer': 'summary_writer'}), '(sess, num_actions,\n summary_writer=summary_writer)\n', (3714, 3768), False, 'from dopamine.agents.implicit_quantile import implicit_quantile_agent\n'), ((4883, 4991), 'dopamine.generators.gan.gan.VanillaGAN', 'gan.VanillaGAN', (['sess', 'output_shape'], {'conditional_input_shapes': 'input_shapes', 'summary_writer': 'summary_writer'}), '(sess, output_shape, conditional_input_shapes=input_shapes,\n summary_writer=summary_writer)\n', (4897, 4991), False, 'from dopamine.generators.gan import gan\n'), ((18136, 18190), 'tensorflow.logging.info', 'tf.logging.info', (['"""Step: %d"""', 'self.model_learning_steps'], {}), "('Step: %d', self.model_learning_steps)\n", (18151, 18190), True, 'import tensorflow as tf\n'), ((18316, 18431), 'tensorflow.logging.info', 'tf.logging.info', (['"""Average training steps per second: %.2f"""', '(self.model_learning_logging_frequency / time_delta)'], {}), "('Average training steps per second: %.2f', self.\n model_learning_logging_frequency / time_delta)\n", (18331, 18431), True, 'import tensorflow as tf\n'), ((18472, 18483), 'time.time', 'time.time', ([], {}), '()\n', (18481, 18483), False, 'import time\n'), ((18510, 18538), 'collections.defaultdict', 'collections.defaultdict', (['int'], {}), '(int)\n', (18533, 18538), False, 'import collections\n'), ((22707, 22783), 'tensorflow.logging.info', 'tf.logging.info', (['"""Average return per training episode: %.2f"""', 'average_return'], {}), "('Average return per training episode: %.2f', average_return)\n", (22722, 22783), True, 'import tensorflow as tf\n'), ((22816, 22931), 'tensorflow.logging.info', 'tf.logging.info', (['"""Average training steps per second: %.2f"""', '(self.model_based_steps_since_last_log / time_delta)'], {}), "('Average training steps per second: %.2f', self.\n model_based_steps_since_last_log / time_delta)\n", (22831, 22931), True, 'import tensorflow as tf\n'), ((22972, 22983), 'time.time', 'time.time', ([], {}), '()\n', (22981, 22983), False, 'import time\n'), ((5084, 5198), 'dopamine.generators.wgan.wgan.WassersteinGAN', 'wgan.WassersteinGAN', (['sess', 'output_shape'], {'conditional_input_shapes': 'input_shapes', 'summary_writer': 'summary_writer'}), '(sess, output_shape, conditional_input_shapes=\n input_shapes, summary_writer=summary_writer)\n', (5103, 5198), False, 'from dopamine.generators.wgan import wgan\n'), ((18103, 18114), 'time.time', 'time.time', ([], {}), '()\n', (18112, 18114), False, 'import time\n'), ((22594, 22605), 'time.time', 'time.time', ([], {}), '()\n', (22603, 22605), False, 'import time\n'), ((5303, 5422), 'dopamine.generators.wgan_gp.wgan_gp.WassersteinGANGP', 'wgan_gp.WassersteinGANGP', (['sess', 'output_shape'], {'conditional_input_shapes': 'input_shapes', 'summary_writer': 'summary_writer'}), '(sess, output_shape, conditional_input_shapes=\n input_shapes, summary_writer=summary_writer)\n', (5327, 5422), False, 'from dopamine.generators.wgan_gp import wgan_gp\n')] |
import numbers
import os
import queue as Queue
import threading
import mxnet as mx
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
import cv2
import albumentations as A
from albumentations.pytorch import ToTensorV2
from insightface.app import MaskAugmentation
class BackgroundGenerator(threading.Thread):
def __init__(self, generator, local_rank, max_prefetch=6):
super(BackgroundGenerator, self).__init__()
self.queue = Queue.Queue(max_prefetch)
self.generator = generator
self.local_rank = local_rank
self.daemon = True
self.start()
def run(self):
torch.cuda.set_device(self.local_rank)
for item in self.generator:
self.queue.put(item)
self.queue.put(None)
def next(self):
next_item = self.queue.get()
if next_item is None:
raise StopIteration
return next_item
def __next__(self):
return self.next()
def __iter__(self):
return self
class DataLoaderX(DataLoader):
def __init__(self, local_rank, **kwargs):
super(DataLoaderX, self).__init__(**kwargs)
self.stream = torch.cuda.Stream(local_rank)
self.local_rank = local_rank
def __iter__(self):
self.iter = super(DataLoaderX, self).__iter__()
self.iter = BackgroundGenerator(self.iter, self.local_rank)
self.preload()
return self
def preload(self):
self.batch = next(self.iter, None)
if self.batch is None:
return None
with torch.cuda.stream(self.stream):
for k in range(len(self.batch)):
self.batch[k] = self.batch[k].to(device=self.local_rank,
non_blocking=True)
def __next__(self):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
if batch is None:
raise StopIteration
self.preload()
return batch
class MXFaceDataset(Dataset):
def __init__(self, root_dir, local_rank, aug_modes="brightness=0.1+mask=0.1"):
super(MXFaceDataset, self).__init__()
default_aug_probs = {
'brightness' : 0.2,
'blur': 0.1,
'mask': 0.1,
}
aug_mode_list = aug_modes.lower().split('+')
aug_mode_map = {}
for aug_mode_str in aug_mode_list:
_aug = aug_mode_str.split('=')
aug_key = _aug[0]
if len(_aug)>1:
aug_prob = float(_aug[1])
else:
aug_prob = default_aug_probs[aug_key]
aug_mode_map[aug_key] = aug_prob
transform_list = []
self.mask_aug = False
self.mask_prob = 0.0
key = 'mask'
if key in aug_mode_map:
self.mask_aug = True
self.mask_prob = aug_mode_map[key]
transform_list.append(
MaskAugmentation(mask_names=['mask_white', 'mask_blue', 'mask_black', 'mask_green'], mask_probs=[0.4, 0.4, 0.1, 0.1], h_low=0.33, h_high=0.4, p=self.mask_prob)
)
if local_rank==0:
print('data_transform_list:', transform_list)
print('mask:', self.mask_aug, self.mask_prob)
key = 'brightness'
if key in aug_mode_map:
prob = aug_mode_map[key]
transform_list.append(
A.RandomBrightnessContrast(brightness_limit=0.125, contrast_limit=0.05, p=prob)
)
key = 'blur'
if key in aug_mode_map:
prob = aug_mode_map[key]
transform_list.append(
A.ImageCompression(quality_lower=30, quality_upper=80, p=prob)
)
transform_list.append(
A.MedianBlur(blur_limit=(1,7), p=prob)
)
transform_list.append(
A.MotionBlur(blur_limit=(5,12), p=prob)
)
transform_list += \
[
A.HorizontalFlip(p=0.5),
A.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
ToTensorV2(),
]
#here, the input for A transform is rgb cv2 img
self.transform = A.Compose(
transform_list
)
self.root_dir = root_dir
self.local_rank = local_rank
path_imgrec = os.path.join(root_dir, 'train.rec')
path_imgidx = os.path.join(root_dir, 'train.idx')
self.imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')
s = self.imgrec.read_idx(0)
header, _ = mx.recordio.unpack(s)
#print(header)
#print(len(self.imgrec.keys))
if header.flag > 0:
if len(header.label)==2:
self.imgidx = np.array(range(1, int(header.label[0])))
else:
self.imgidx = np.array(list(self.imgrec.keys))
else:
self.imgidx = np.array(list(self.imgrec.keys))
#print('imgidx len:', len(self.imgidx))
def __getitem__(self, index):
idx = self.imgidx[index]
s = self.imgrec.read_idx(idx)
header, img = mx.recordio.unpack(s)
hlabel = header.label
#print('hlabel:', hlabel.__class__)
sample = mx.image.imdecode(img).asnumpy()
if not isinstance(hlabel, numbers.Number):
idlabel = hlabel[0]
else:
idlabel = hlabel
label = torch.tensor(idlabel, dtype=torch.long)
if self.transform is not None:
sample = self.transform(image=sample, hlabel=hlabel)['image']
return sample, label
def __len__(self):
return len(self.imgidx)
if __name__ == "__main__":
import argparse, cv2, copy
parser = argparse.ArgumentParser(description='dataset test')
parser.add_argument('--dataset', type=str, help='dataset path')
parser.add_argument('--samples', type=int, default=256, help='')
parser.add_argument('--cols', type=int, default=16, help='')
args = parser.parse_args()
assert args.samples%args.cols==0
assert args.cols%2==0
samples = args.samples
cols = args.cols
rows = args.samples // args.cols
dataset = MXFaceDataset(root_dir=args.dataset, local_rank=0, aug_modes='mask=1.0')
dataset.transform = A.Compose([t for t in dataset.transform if not isinstance(t, (A.Normalize, ToTensorV2))])
dataset_0 = copy.deepcopy(dataset)
#dataset_0.transform = None
dataset_1 = copy.deepcopy(dataset)
#dataset_1.transform = A.Compose(
# [
# A.RandomBrightnessContrast(brightness_limit=0.125, contrast_limit=0.05, p=1.0),
# A.ImageCompression(quality_lower=30, quality_upper=80, p=1.0),
# A.MedianBlur(blur_limit=(1,7), p=1.0),
# A.MotionBlur(blur_limit=(5,12), p=1.0),
# A.Affine(scale=(0.92, 1.08), translate_percent=(-0.06, 0.06), rotate=(-6, 6), shear=None, interpolation=cv2.INTER_LINEAR, p=1.0),
# ]
#)
fig = np.zeros( (112*rows, 112*cols, 3), dtype=np.uint8 )
for idx in range(samples):
if idx%2==0:
image, _ = dataset_0[idx//2]
else:
image, _ = dataset_1[idx//2]
row_idx = idx // cols
col_idx = idx % cols
fig[row_idx*112:(row_idx+1)*112, col_idx*112:(col_idx+1)*112,:] = image[:,:,::-1] # to bgr
cv2.imwrite("./datasets.png", fig)
| [
"mxnet.recordio.MXIndexedRecordIO",
"argparse.ArgumentParser",
"mxnet.image.imdecode",
"albumentations.Normalize",
"albumentations.HorizontalFlip",
"albumentations.ImageCompression",
"os.path.join",
"albumentations.MotionBlur",
"insightface.app.MaskAugmentation",
"cv2.imwrite",
"torch.cuda.set_d... | [((5808, 5859), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""dataset test"""'}), "(description='dataset test')\n", (5831, 5859), False, 'import argparse, cv2, copy\n'), ((6459, 6481), 'copy.deepcopy', 'copy.deepcopy', (['dataset'], {}), '(dataset)\n', (6472, 6481), False, 'import argparse, cv2, copy\n'), ((6530, 6552), 'copy.deepcopy', 'copy.deepcopy', (['dataset'], {}), '(dataset)\n', (6543, 6552), False, 'import argparse, cv2, copy\n'), ((7048, 7101), 'numpy.zeros', 'np.zeros', (['(112 * rows, 112 * cols, 3)'], {'dtype': 'np.uint8'}), '((112 * rows, 112 * cols, 3), dtype=np.uint8)\n', (7056, 7101), True, 'import numpy as np\n'), ((7410, 7444), 'cv2.imwrite', 'cv2.imwrite', (['"""./datasets.png"""', 'fig'], {}), "('./datasets.png', fig)\n", (7421, 7444), False, 'import argparse, cv2, copy\n'), ((512, 537), 'queue.Queue', 'Queue.Queue', (['max_prefetch'], {}), '(max_prefetch)\n', (523, 537), True, 'import queue as Queue\n'), ((686, 724), 'torch.cuda.set_device', 'torch.cuda.set_device', (['self.local_rank'], {}), '(self.local_rank)\n', (707, 724), False, 'import torch\n'), ((1218, 1247), 'torch.cuda.Stream', 'torch.cuda.Stream', (['local_rank'], {}), '(local_rank)\n', (1235, 1247), False, 'import torch\n'), ((4287, 4312), 'albumentations.Compose', 'A.Compose', (['transform_list'], {}), '(transform_list)\n', (4296, 4312), True, 'import albumentations as A\n'), ((4428, 4463), 'os.path.join', 'os.path.join', (['root_dir', '"""train.rec"""'], {}), "(root_dir, 'train.rec')\n", (4440, 4463), False, 'import os\n'), ((4486, 4521), 'os.path.join', 'os.path.join', (['root_dir', '"""train.idx"""'], {}), "(root_dir, 'train.idx')\n", (4498, 4521), False, 'import os\n'), ((4544, 4604), 'mxnet.recordio.MXIndexedRecordIO', 'mx.recordio.MXIndexedRecordIO', (['path_imgidx', 'path_imgrec', '"""r"""'], {}), "(path_imgidx, path_imgrec, 'r')\n", (4573, 4604), True, 'import mxnet as mx\n'), ((4661, 4682), 'mxnet.recordio.unpack', 'mx.recordio.unpack', (['s'], {}), '(s)\n', (4679, 4682), True, 'import mxnet as mx\n'), ((5210, 5231), 'mxnet.recordio.unpack', 'mx.recordio.unpack', (['s'], {}), '(s)\n', (5228, 5231), True, 'import mxnet as mx\n'), ((5498, 5537), 'torch.tensor', 'torch.tensor', (['idlabel'], {'dtype': 'torch.long'}), '(idlabel, dtype=torch.long)\n', (5510, 5537), False, 'import torch\n'), ((1612, 1642), 'torch.cuda.stream', 'torch.cuda.stream', (['self.stream'], {}), '(self.stream)\n', (1629, 1642), False, 'import torch\n'), ((4065, 4088), 'albumentations.HorizontalFlip', 'A.HorizontalFlip', ([], {'p': '(0.5)'}), '(p=0.5)\n', (4081, 4088), True, 'import albumentations as A\n'), ((4106, 4160), 'albumentations.Normalize', 'A.Normalize', ([], {'mean': '[0.5, 0.5, 0.5]', 'std': '[0.5, 0.5, 0.5]'}), '(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])\n', (4117, 4160), True, 'import albumentations as A\n'), ((4178, 4190), 'albumentations.pytorch.ToTensorV2', 'ToTensorV2', ([], {}), '()\n', (4188, 4190), False, 'from albumentations.pytorch import ToTensorV2\n'), ((1863, 1890), 'torch.cuda.current_stream', 'torch.cuda.current_stream', ([], {}), '()\n', (1888, 1890), False, 'import torch\n'), ((3003, 3170), 'insightface.app.MaskAugmentation', 'MaskAugmentation', ([], {'mask_names': "['mask_white', 'mask_blue', 'mask_black', 'mask_green']", 'mask_probs': '[0.4, 0.4, 0.1, 0.1]', 'h_low': '(0.33)', 'h_high': '(0.4)', 'p': 'self.mask_prob'}), "(mask_names=['mask_white', 'mask_blue', 'mask_black',\n 'mask_green'], mask_probs=[0.4, 0.4, 0.1, 0.1], h_low=0.33, h_high=0.4,\n p=self.mask_prob)\n", (3019, 3170), False, 'from insightface.app import MaskAugmentation\n'), ((3470, 3549), 'albumentations.RandomBrightnessContrast', 'A.RandomBrightnessContrast', ([], {'brightness_limit': '(0.125)', 'contrast_limit': '(0.05)', 'p': 'prob'}), '(brightness_limit=0.125, contrast_limit=0.05, p=prob)\n', (3496, 3549), True, 'import albumentations as A\n'), ((3709, 3771), 'albumentations.ImageCompression', 'A.ImageCompression', ([], {'quality_lower': '(30)', 'quality_upper': '(80)', 'p': 'prob'}), '(quality_lower=30, quality_upper=80, p=prob)\n', (3727, 3771), True, 'import albumentations as A\n'), ((3841, 3880), 'albumentations.MedianBlur', 'A.MedianBlur', ([], {'blur_limit': '(1, 7)', 'p': 'prob'}), '(blur_limit=(1, 7), p=prob)\n', (3853, 3880), True, 'import albumentations as A\n'), ((3949, 3989), 'albumentations.MotionBlur', 'A.MotionBlur', ([], {'blur_limit': '(5, 12)', 'p': 'prob'}), '(blur_limit=(5, 12), p=prob)\n', (3961, 3989), True, 'import albumentations as A\n'), ((5323, 5345), 'mxnet.image.imdecode', 'mx.image.imdecode', (['img'], {}), '(img)\n', (5340, 5345), True, 'import mxnet as mx\n')] |
"""Hops flask middleware example"""
from flask import Flask
import ghhops_server as hs
#from numpy.lib.polynomial import poly1d
import rhino3dm
import numpy as np
# #-------------------------------------------------------
# import os
# import sys
# path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
# path = path+'\ArchitecturalGeometry'
# sys.path.append(path)###==C:\Users\WANGH0M\Documents\GitHub\ArchitecturalGeometry
from gridshell import Gridshell
from gridshell_agnet import Gridshell_AGNet
from hops_agnet import AGNet
#-------------------------------------------------------
#
# register hops app as middleware
app = Flask(__name__)
hops: hs.HopsFlask = hs.Hops(app)
#hops = hs.Hops(app,debug=True)
#-------------------------------------------------------
#==================================================================
#
# OPTIMIZATIONS
#
#==================================================================
def _reading_mesh(mesh,opt=False,**kwargs):
### GET THE VERTEXLIST+FACELIST OF INPUT MESH:
vlist,flist = [],[]
for i in range(len(mesh.Vertices)):
vlist.append([mesh.Vertices[i].X,mesh.Vertices[i].Y,mesh.Vertices[i].Z])
for i in range(mesh.Faces.Count):
flist.append(list(mesh.Faces[i]))
varr = np.array(vlist)
farr = np.array(flist)
if opt:
### meshpy.py-->quadrings.py-->gridshell.py (further related with guidedprojection)
M = Gridshell()
else:
### meshpy.py-->quadrings.py-->gridshell_agnet.py
M = Gridshell_AGNet(**kwargs)
M.make_mesh(varr, farr) ###Refer gui_basic.py/open_obj_file()
return M
### MAIN OPTIMIZATION:
@hops.component(
"/read_aaa_mesh",
name="processMesh",
nickname="pM",
description="Process a mesh.",
inputs=[
hs.HopsMesh("mesh", "Mesh", "The mesh to process."),
hs.HopsString("web","web","constraint of net or web"),
hs.HopsNumber("direction","direction","0,1 for asy./geo. direction of AGG/GAA web.",default=0),
hs.HopsInteger("iteration","iter","num of iteration.",default=10),
hs.HopsNumber("fairness","w1(fair)","weight of fairness.",default=0.000),
hs.HopsNumber("closness","w2(closeness)","weight of self-closness.",default=0.01),
hs.HopsNumber("glide","w3(glide)","weight of gliding boundaries",default=0),
hs.HopsInteger("glideBdry","index(bdry)","index of glided boundary(s)",access=hs.HopsParamAccess.LIST,default=0),
hs.HopsNumber("fix","w4(fix)","weight of fixed vertices",default=0),
hs.HopsInteger("fixVertices","index(fix)","index of fixed vertices",access=hs.HopsParamAccess.LIST,default=0),
hs.HopsBoolean("Restart","Restart","Restart the optimization",default=False),
],
outputs=[hs.HopsMesh("mesh", "mesh", "The new mesh."),
hs.HopsPoint("an","Vertices","all vertices"),
hs.HopsPoint("vn","Normal","normals at V")]
)
def main(mesh:rhino3dm.Mesh,web,d=0,n=10,w1=0.0000,w2=0.01,w3=0,ind3=0,w4=0,ind4=0,restart=True):
#assert isinstance(mesh, rhino3dm.Mesh)
#m=rhino3dm.Mesh()
###-------------------------------------------
M = _reading_mesh(mesh,opt=True)
###-------------------------------------------
constraints = {
web : True,
'direction_poly' : d,
'num_itera' : n,
'weight_fairness' : w1,
'weight_closeness' : w2,
'weight_gliding' : w3,
'iGlideBdry' : ind3,
'weight_fix' : w4,
'ifixV' : ind4,
'Restart' : restart,
}
###-------------------------------------------
### OPTIMIZATION:
AG = AGNet(**constraints)
AG.mesh = M
AG.optimize_mesh()
###-------------------------------------------
###-------------------------------------------
### MAKE RHINO MESH:
Mout=rhino3dm.Mesh()
for v in AG.vertexlist:
Mout.Vertices.Add(v[0],v[1],v[2])
for f in AG.facelist:
Mout.Faces.AddFace(f[0],f[1],f[2],f[3])
###-------------------------------------------
### OUTPUT VERTICES + NORMALS:
an,vn = AG.get_agweb_an_n_on()
anchor = [rhino3dm.Point3d(a[0],a[1],a[2]) for a in an]
normal = [rhino3dm.Point3d(n[0],n[1],n[2]) for n in vn]
return Mout,anchor,normal
#==================================================================
#==================================================================
#
# VISUALIZATIONS
#
#==================================================================
### PLOTING CHECKER-VERTICES:
@hops.component(
"/checker_tianvertex",
name="checkerVertex",
nickname="ckv",
description="Get checker vertices.",
inputs=[
hs.HopsMesh("mesh", "Mesh", "The optimized mesh.")],
outputs=[hs.HopsPoint("P1","P1","checker vertices P1"),
hs.HopsPoint("P2","P2","checker vertices P2"),
]
)
def checker_tian_vertex(mesh:rhino3dm.Mesh):
M = _reading_mesh(mesh)
Vr,Vb = M.plot_checker_group_tian_select_vertices()
Pr = [rhino3dm.Point3d(r[0],r[1],r[2]) for r in Vr]
Pb = [rhino3dm.Point3d(b[0],b[1],b[2]) for b in Vb]
return Pr,Pb
#==================================================================
@hops.component(
"/select_vertices_to_fix",
name="selectVertex",
nickname="Vi",
description="Get indices of vertices.",
inputs=[
hs.HopsMesh("mesh", "Mesh", "The optimized mesh."),
hs.HopsNumber("x","x","get x of points",access=hs.HopsParamAccess.LIST),
hs.HopsNumber("y","y","get y of points",access=hs.HopsParamAccess.LIST),
hs.HopsNumber("z","z","get z of points",access=hs.HopsParamAccess.LIST),
],
outputs=[
hs.HopsInteger("index","index","index of selected vertices",access=hs.HopsParamAccess.LIST),
hs.HopsPoint("Vs","Vs","selected vertex"),
]
)
def select_vertex(mesh:rhino3dm.Mesh,x,y,z):
M = _reading_mesh(mesh)
print(x)
# xx,yy,zz = [],[],[]
# for x in Pi.X:
# xx.append(x)
# for y in Pi.Y:
# yy.append(y)
# for z in Pi.Z:
# zz.append(z)
# pts = np.c_[xx,yy,zz]
pts = np.c_[x,y,z]
v,Vc = M.plot_selected_vertices(pts)
Pc = [rhino3dm.Point3d(r[0],r[1],r[2]) for r in Vc]
return v,Pc
@hops.component(
"/corner_vertex",
name="cornerVertex",
nickname="corner",
description="Get corner vertices.",
inputs=[
hs.HopsMesh("mesh", "Mesh", "The optimized mesh.")],
outputs=[
hs.HopsInteger("index_corner","index","index of corner vertices",access=hs.HopsParamAccess.LIST),
hs.HopsPoint("Pc","Pc","corner vertex"),
]
)
def corner_vertex(mesh:rhino3dm.Mesh):
M = _reading_mesh(mesh)
v,Vc = M.plot_corner_vertices()
Pc = [rhino3dm.Point3d(r[0],r[1],r[2]) for r in Vc]
print(v)
return v,Pc
@hops.component(
"/polyline_4_directions",
name="polylines",
nickname="pl",
description="Get 4 familes of polylines.",
inputs=[
hs.HopsMesh("mesh", "Mesh", "The optimized mesh.")],
outputs=[
hs.HopsCurve("polyline1", "pl1", access=hs.HopsParamAccess.LIST),
hs.HopsCurve("polyline2", "pl2", access=hs.HopsParamAccess.LIST),
hs.HopsCurve("polyline3", "pl3", access=hs.HopsParamAccess.LIST),
hs.HopsCurve("polyline4", "pl4", access=hs.HopsParamAccess.LIST),
]
)
def polyline(mesh:rhino3dm.Mesh):
M = _reading_mesh(mesh)
V = M.vertexlist
ipl11,ipl12,ipl21,ipl22,ipl31,ipl32,ipl41,ipl42 = M.plot_4family_polylines()
pl1,pl2,pl3,pl4 = [],[],[],[]
for i in range(len(ipl11)):
a = rhino3dm.Point3d(V[ipl11][i][0],V[ipl11][i][1],V[ipl11][i][2])
b = rhino3dm.Point3d(V[ipl12][i][0],V[ipl12][i][1],V[ipl12][i][2])
pl1.append(rhino3dm.LineCurve(a,b))
for i in range(len(ipl21)):
a = rhino3dm.Point3d(V[ipl21][i][0],V[ipl21][i][1],V[ipl21][i][2])
b = rhino3dm.Point3d(V[ipl22][i][0],V[ipl22][i][1],V[ipl22][i][2])
pl2.append(rhino3dm.LineCurve(a,b))
for i in range(len(ipl31)):
a = rhino3dm.Point3d(V[ipl31][i][0],V[ipl31][i][1],V[ipl31][i][2])
b = rhino3dm.Point3d(V[ipl32][i][0],V[ipl32][i][1],V[ipl32][i][2])
pl3.append(rhino3dm.LineCurve(a,b))
for i in range(len(ipl41)):
a = rhino3dm.Point3d(V[ipl41][i][0],V[ipl41][i][1],V[ipl41][i][2])
b = rhino3dm.Point3d(V[ipl42][i][0],V[ipl42][i][1],V[ipl42][i][2])
pl4.append(rhino3dm.LineCurve(a,b))
return pl1,pl2,pl3,pl4
#==================================================================
@hops.component(
"/patch_net",
name="patch_crvnetwork",
nickname="network",
description="Get 2 familes of polylines.",
inputs=[
hs.HopsMesh("mesh", "Mesh", "The optimized mesh.")],
outputs=[
hs.HopsCurve("polyline1", "pl1", access=hs.HopsParamAccess.LIST),
hs.HopsCurve("polyline2", "pl2", access=hs.HopsParamAccess.LIST),
]
)
def patch(mesh:rhino3dm.Mesh):
M = _reading_mesh(mesh)
V = M.vertexlist
ipl11,ipl12,ipl21,ipl22 = M. plot_2family_polylines(rot=False)
pl1,pl2 = [],[]
for i in range(len(ipl11)):
a = rhino3dm.Point3d(V[ipl11][i][0],V[ipl11][i][1],V[ipl11][i][2])
b = rhino3dm.Point3d(V[ipl12][i][0],V[ipl12][i][1],V[ipl12][i][2])
pl1.append(rhino3dm.LineCurve(a,b))
for i in range(len(ipl21)):
a = rhino3dm.Point3d(V[ipl21][i][0],V[ipl21][i][1],V[ipl21][i][2])
b = rhino3dm.Point3d(V[ipl22][i][0],V[ipl22][i][1],V[ipl22][i][2])
pl2.append(rhino3dm.LineCurve(a,b))
return pl1,pl2
#==================================================================
@hops.component(
"/rotational_net",
name="rot_crvnetwork",
nickname="network",
description="Get 2 familes of polylines.",
inputs=[
hs.HopsMesh("mesh", "Mesh", "The optimized mesh.")],
outputs=[
hs.HopsCurve("polyline1", "pl1", access=hs.HopsParamAccess.LIST),
hs.HopsCurve("polyline2", "pl2", access=hs.HopsParamAccess.LIST),
]
)
def rotational(mesh:rhino3dm.Mesh):
M = _reading_mesh(mesh)
V = M.vertexlist
ipl11,ipl12,ipl21,ipl22 = M. plot_2family_polylines(rot=True)
pl1,pl2 = [],[]
for i in range(len(ipl11)):
a = rhino3dm.Point3d(V[ipl11][i][0],V[ipl11][i][1],V[ipl11][i][2])
b = rhino3dm.Point3d(V[ipl12][i][0],V[ipl12][i][1],V[ipl12][i][2])
pl1.append(rhino3dm.LineCurve(a,b))
for i in range(len(ipl21)):
a = rhino3dm.Point3d(V[ipl21][i][0],V[ipl21][i][1],V[ipl21][i][2])
b = rhino3dm.Point3d(V[ipl22][i][0],V[ipl22][i][1],V[ipl22][i][2])
pl2.append(rhino3dm.LineCurve(a,b))
return pl1,pl2
#==================================================================
@hops.component(
"/boundary",
name="boundaries",
nickname="bdry",
description="Get 4 or 2 boundary polylines.",
inputs=[
hs.HopsMesh("mesh", "Mesh", "Input mesh.")],
outputs=[
hs.HopsCurve("polyline1", "pl1", access=hs.HopsParamAccess.LIST),
hs.HopsCurve("polyline2", "pl2", access=hs.HopsParamAccess.LIST),
hs.HopsCurve("polyline3", "pl3", access=hs.HopsParamAccess.LIST),
hs.HopsCurve("polyline4", "pl4", access=hs.HopsParamAccess.LIST),
]
)
def boundaries(mesh:rhino3dm.Mesh):
M = _reading_mesh(mesh)
V = M.vertexlist
ipl11,ipl12 = M.plot_boundary_polylines(0)
ipl21,ipl22 = M.plot_boundary_polylines(1)
ipl31,ipl32 = M.plot_boundary_polylines(2)
ipl41,ipl42 = M.plot_boundary_polylines(3)
pl1,pl2,pl3,pl4 = [],[],[],[]
for i in range(len(ipl11)):
a = rhino3dm.Point3d(V[ipl11][i][0],V[ipl11][i][1],V[ipl11][i][2])
b = rhino3dm.Point3d(V[ipl12][i][0],V[ipl12][i][1],V[ipl12][i][2])
pl1.append(rhino3dm.LineCurve(a,b))
for i in range(len(ipl21)):
a = rhino3dm.Point3d(V[ipl21][i][0],V[ipl21][i][1],V[ipl21][i][2])
b = rhino3dm.Point3d(V[ipl22][i][0],V[ipl22][i][1],V[ipl22][i][2])
pl2.append(rhino3dm.LineCurve(a,b))
for i in range(len(ipl31)):
a = rhino3dm.Point3d(V[ipl31][i][0],V[ipl31][i][1],V[ipl31][i][2])
b = rhino3dm.Point3d(V[ipl32][i][0],V[ipl32][i][1],V[ipl32][i][2])
pl3.append(rhino3dm.LineCurve(a,b))
for i in range(len(ipl41)):
a = rhino3dm.Point3d(V[ipl41][i][0],V[ipl41][i][1],V[ipl41][i][2])
b = rhino3dm.Point3d(V[ipl42][i][0],V[ipl42][i][1],V[ipl42][i][2])
pl4.append(rhino3dm.LineCurve(a,b))
return pl1,pl2,pl3,pl4
#==================================================================
@hops.component(
"/Bezier_Spline_strips",
name="bezier",
nickname="bz",
description="Get Bezier splines.",
inputs=[
hs.HopsMesh("mesh", "Mesh", "The optimized mesh."),
hs.HopsString("web","web","constraint of net or web"),
hs.HopsVector("VN","VN","vertex normals",access=hs.HopsParamAccess.LIST),
hs.HopsInteger("i-th","ipoly","which polyline"),
hs.HopsNumber("weight1","w1(CtrlPoint)","fairness of Bezier ctrl-points",default=0.005),
hs.HopsBoolean("checker","ck/all","switch if at checker-vertices",default=True),
hs.HopsNumber("numChecker","numChecker","number of checker selection",default=4),
hs.HopsBoolean("rectify by E3","optRuling/cmptRuling","switch if optimized or directly computed rulings",default=False),
hs.HopsNumber("weight2","w2(Strip)","fairness of (unrolled) developable strip",default=0.005),
hs.HopsBoolean("denser","dense/sparse","switch if sparse or denser rulings",default=False),
hs.HopsInteger("numDenser","numDenser","number of denser rulings",default=20),
hs.HopsNumber("width","width","width of developable strip",default=0.5),
hs.HopsNumber("distInterval","interval","interval distance of unrolling strips",default=1.5),
],
outputs=[
hs.HopsPoint("ctrlP","P","all ctrl points"),
hs.HopsInteger("indices","ilist","list of control points P",access=hs.HopsParamAccess.LIST),
hs.HopsCurve("Bezier","Bs","list of quintic Bezier splines",access=hs.HopsParamAccess.LIST),
hs.HopsPoint("an","V","anchor points"),
hs.HopsPoint("e1","e1","unit tangent vector"),
hs.HopsPoint("e2","e2","unit principal normal vector"),
hs.HopsPoint("e3","e3","unit binormal vector"),
hs.HopsPoint("r","r","unit ruling vector"),
hs.HopsMesh("Strip", "Strip", "3D mesh of developable strips"),
hs.HopsMesh("unrollStrip", "unrollment", "2D mesh of unrolled developable strips"),
]
)
def bezier(mesh:rhino3dm.Mesh,web,vn:rhino3dm.Vector3d,i,w1,is_ck,num_ck,is_optruling,w2,is_dense,num_dense,width,dist):
x = [n.X for n in vn]
y = [n.Y for n in vn]
z = [n.Z for n in vn]
VN = np.c_[x,y,z]
setting = {
web : True,
'VN' : VN,
'set_Poly' : i,
'weight_CtrlP' : w1,
'weight_SmoothVertex' : w2,
'num_DenserRuling' : num_dense,
'set_ScaleOffset' : width,
'set_DistInterval' : dist,
'is_Checker' : is_ck,
'num_Checker' : num_ck,
'is_DenserRuling' : is_dense,
'is_RulingRectify' : is_optruling,
}
M = _reading_mesh(mesh,**setting)
ctrl_pts,nmlist,an,e1,e2,e3,r,sm,unm = M.set_quintic_bezier_splines()
ilist = [(i-1)*5+1 for i in nmlist] # ctrl-points ## for regular patch
### GET QUINTIC BEZIER SPLINES:
bz = []
k=0
for n in nmlist:
m = np.arange((n-1)*5).reshape(-1,5) ##num of a row of ctrl-points
mm = np.c_[m,(1+np.arange(n-1))*5] + k
pt = []
for i in range(mm.shape[0]):
"for each Bezier CRV WITH 6 CTRL-POINTS"
pt = [rhino3dm.Point3d(ctrl_pts[j][0],ctrl_pts[j][1],ctrl_pts[j][2]) for j in mm[i]]
c = rhino3dm.NurbsCurve.Create(False,5,pt)
bz.append(c)
k += (n-1)*5+1
### GET MULTI-ROW WHOLE NURBS-CURVE:
# bz = []
# k=0
# for n in nmlist:
# m = (n-1)*5+1
# pt = []
# for i in range(m):
# pt.append(rhino3dm.Point3d(ctrl_pts[k+i][0],ctrl_pts[k+i][1],ctrl_pts[k+i][2]))
# c = rhino3dm.NurbsCurve.Create(False,5,pt)
# bz.append(c)
# k += m
### GET LIST:
# ilist = []
# k=0
# for n in nmlist:
# ilist.append([k+i for i in range(n)])
# #ilist.append([rhino3dm.Point3d(k+i,0,0) for i in range(n)])
# k += n
#ilist=[i for i in range(nmlist[0])] ### work to get list of numbers
#print(ilist)
P = [rhino3dm.Point3d(a[0],a[1],a[2]) for a in ctrl_pts]
AN = [rhino3dm.Point3d(a[0],a[1],a[2]) for a in an]
E1 = [rhino3dm.Point3d(a[0],a[1],a[2]) for a in e1]
E2 = [rhino3dm.Point3d(a[0],a[1],a[2]) for a in e2]
E3 = [rhino3dm.Point3d(a[0],a[1],a[2]) for a in e3]
R = [rhino3dm.Point3d(a[0],a[1],a[2]) for a in r]
### MAKE RHINO MESH:
strip=rhino3dm.Mesh()
for v in sm.vertices:
strip.Vertices.Add(v[0],v[1],v[2])
for f in sm.faces_list():
strip.Faces.AddFace(f[0],f[1],f[2],f[3])
unroll=rhino3dm.Mesh()
for v in unm.vertices:
unroll.Vertices.Add(v[0],v[1],v[2])
for f in unm.faces_list():
unroll.Faces.AddFace(f[0],f[1],f[2],f[3])
return P,ilist,bz,AN,E1,E2,E3,R,strip,unroll
#==================================================================
if __name__ == "__main__":
#app.run(host="10.8.0.10",debug=True)
app.run(host="10.8.0.12",debug=True) ##Hui's VPN
#app.run(debug=True)
| [
"rhino3dm.Mesh",
"ghhops_server.HopsPoint",
"ghhops_server.HopsMesh",
"numpy.arange",
"ghhops_server.HopsInteger",
"hops_agnet.AGNet",
"ghhops_server.Hops",
"gridshell_agnet.Gridshell_AGNet",
"ghhops_server.HopsNumber",
"ghhops_server.HopsCurve",
"ghhops_server.HopsBoolean",
"ghhops_server.Hop... | [((698, 713), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (703, 713), False, 'from flask import Flask\n'), ((735, 747), 'ghhops_server.Hops', 'hs.Hops', (['app'], {}), '(app)\n', (742, 747), True, 'import ghhops_server as hs\n'), ((1350, 1365), 'numpy.array', 'np.array', (['vlist'], {}), '(vlist)\n', (1358, 1365), True, 'import numpy as np\n'), ((1377, 1392), 'numpy.array', 'np.array', (['flist'], {}), '(flist)\n', (1385, 1392), True, 'import numpy as np\n'), ((3700, 3720), 'hops_agnet.AGNet', 'AGNet', ([], {}), '(**constraints)\n', (3705, 3720), False, 'from hops_agnet import AGNet\n'), ((3896, 3911), 'rhino3dm.Mesh', 'rhino3dm.Mesh', ([], {}), '()\n', (3909, 3911), False, 'import rhino3dm\n'), ((17118, 17133), 'rhino3dm.Mesh', 'rhino3dm.Mesh', ([], {}), '()\n', (17131, 17133), False, 'import rhino3dm\n'), ((17294, 17309), 'rhino3dm.Mesh', 'rhino3dm.Mesh', ([], {}), '()\n', (17307, 17309), False, 'import rhino3dm\n'), ((1510, 1521), 'gridshell.Gridshell', 'Gridshell', ([], {}), '()\n', (1519, 1521), False, 'from gridshell import Gridshell\n'), ((1602, 1627), 'gridshell_agnet.Gridshell_AGNet', 'Gridshell_AGNet', ([], {}), '(**kwargs)\n', (1617, 1627), False, 'from gridshell_agnet import Gridshell_AGNet\n'), ((4198, 4232), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['a[0]', 'a[1]', 'a[2]'], {}), '(a[0], a[1], a[2])\n', (4214, 4232), False, 'import rhino3dm\n'), ((4258, 4292), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['n[0]', 'n[1]', 'n[2]'], {}), '(n[0], n[1], n[2])\n', (4274, 4292), False, 'import rhino3dm\n'), ((5092, 5126), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['r[0]', 'r[1]', 'r[2]'], {}), '(r[0], r[1], r[2])\n', (5108, 5126), False, 'import rhino3dm\n'), ((5148, 5182), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['b[0]', 'b[1]', 'b[2]'], {}), '(b[0], b[1], b[2])\n', (5164, 5182), False, 'import rhino3dm\n'), ((6279, 6313), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['r[0]', 'r[1]', 'r[2]'], {}), '(r[0], r[1], r[2])\n', (6295, 6313), False, 'import rhino3dm\n'), ((6849, 6883), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['r[0]', 'r[1]', 'r[2]'], {}), '(r[0], r[1], r[2])\n', (6865, 6883), False, 'import rhino3dm\n'), ((7718, 7782), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl11][i][0]', 'V[ipl11][i][1]', 'V[ipl11][i][2]'], {}), '(V[ipl11][i][0], V[ipl11][i][1], V[ipl11][i][2])\n', (7734, 7782), False, 'import rhino3dm\n'), ((7793, 7857), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl12][i][0]', 'V[ipl12][i][1]', 'V[ipl12][i][2]'], {}), '(V[ipl12][i][0], V[ipl12][i][1], V[ipl12][i][2])\n', (7809, 7857), False, 'import rhino3dm\n'), ((7944, 8008), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl21][i][0]', 'V[ipl21][i][1]', 'V[ipl21][i][2]'], {}), '(V[ipl21][i][0], V[ipl21][i][1], V[ipl21][i][2])\n', (7960, 8008), False, 'import rhino3dm\n'), ((8019, 8083), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl22][i][0]', 'V[ipl22][i][1]', 'V[ipl22][i][2]'], {}), '(V[ipl22][i][0], V[ipl22][i][1], V[ipl22][i][2])\n', (8035, 8083), False, 'import rhino3dm\n'), ((8170, 8234), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl31][i][0]', 'V[ipl31][i][1]', 'V[ipl31][i][2]'], {}), '(V[ipl31][i][0], V[ipl31][i][1], V[ipl31][i][2])\n', (8186, 8234), False, 'import rhino3dm\n'), ((8245, 8309), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl32][i][0]', 'V[ipl32][i][1]', 'V[ipl32][i][2]'], {}), '(V[ipl32][i][0], V[ipl32][i][1], V[ipl32][i][2])\n', (8261, 8309), False, 'import rhino3dm\n'), ((8396, 8460), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl41][i][0]', 'V[ipl41][i][1]', 'V[ipl41][i][2]'], {}), '(V[ipl41][i][0], V[ipl41][i][1], V[ipl41][i][2])\n', (8412, 8460), False, 'import rhino3dm\n'), ((8471, 8535), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl42][i][0]', 'V[ipl42][i][1]', 'V[ipl42][i][2]'], {}), '(V[ipl42][i][0], V[ipl42][i][1], V[ipl42][i][2])\n', (8487, 8535), False, 'import rhino3dm\n'), ((9281, 9345), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl11][i][0]', 'V[ipl11][i][1]', 'V[ipl11][i][2]'], {}), '(V[ipl11][i][0], V[ipl11][i][1], V[ipl11][i][2])\n', (9297, 9345), False, 'import rhino3dm\n'), ((9356, 9420), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl12][i][0]', 'V[ipl12][i][1]', 'V[ipl12][i][2]'], {}), '(V[ipl12][i][0], V[ipl12][i][1], V[ipl12][i][2])\n', (9372, 9420), False, 'import rhino3dm\n'), ((9507, 9571), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl21][i][0]', 'V[ipl21][i][1]', 'V[ipl21][i][2]'], {}), '(V[ipl21][i][0], V[ipl21][i][1], V[ipl21][i][2])\n', (9523, 9571), False, 'import rhino3dm\n'), ((9582, 9646), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl22][i][0]', 'V[ipl22][i][1]', 'V[ipl22][i][2]'], {}), '(V[ipl22][i][0], V[ipl22][i][1], V[ipl22][i][2])\n', (9598, 9646), False, 'import rhino3dm\n'), ((10389, 10453), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl11][i][0]', 'V[ipl11][i][1]', 'V[ipl11][i][2]'], {}), '(V[ipl11][i][0], V[ipl11][i][1], V[ipl11][i][2])\n', (10405, 10453), False, 'import rhino3dm\n'), ((10464, 10528), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl12][i][0]', 'V[ipl12][i][1]', 'V[ipl12][i][2]'], {}), '(V[ipl12][i][0], V[ipl12][i][1], V[ipl12][i][2])\n', (10480, 10528), False, 'import rhino3dm\n'), ((10615, 10679), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl21][i][0]', 'V[ipl21][i][1]', 'V[ipl21][i][2]'], {}), '(V[ipl21][i][0], V[ipl21][i][1], V[ipl21][i][2])\n', (10631, 10679), False, 'import rhino3dm\n'), ((10690, 10754), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl22][i][0]', 'V[ipl22][i][1]', 'V[ipl22][i][2]'], {}), '(V[ipl22][i][0], V[ipl22][i][1], V[ipl22][i][2])\n', (10706, 10754), False, 'import rhino3dm\n'), ((11772, 11836), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl11][i][0]', 'V[ipl11][i][1]', 'V[ipl11][i][2]'], {}), '(V[ipl11][i][0], V[ipl11][i][1], V[ipl11][i][2])\n', (11788, 11836), False, 'import rhino3dm\n'), ((11847, 11911), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl12][i][0]', 'V[ipl12][i][1]', 'V[ipl12][i][2]'], {}), '(V[ipl12][i][0], V[ipl12][i][1], V[ipl12][i][2])\n', (11863, 11911), False, 'import rhino3dm\n'), ((11998, 12062), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl21][i][0]', 'V[ipl21][i][1]', 'V[ipl21][i][2]'], {}), '(V[ipl21][i][0], V[ipl21][i][1], V[ipl21][i][2])\n', (12014, 12062), False, 'import rhino3dm\n'), ((12073, 12137), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl22][i][0]', 'V[ipl22][i][1]', 'V[ipl22][i][2]'], {}), '(V[ipl22][i][0], V[ipl22][i][1], V[ipl22][i][2])\n', (12089, 12137), False, 'import rhino3dm\n'), ((12224, 12288), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl31][i][0]', 'V[ipl31][i][1]', 'V[ipl31][i][2]'], {}), '(V[ipl31][i][0], V[ipl31][i][1], V[ipl31][i][2])\n', (12240, 12288), False, 'import rhino3dm\n'), ((12299, 12363), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl32][i][0]', 'V[ipl32][i][1]', 'V[ipl32][i][2]'], {}), '(V[ipl32][i][0], V[ipl32][i][1], V[ipl32][i][2])\n', (12315, 12363), False, 'import rhino3dm\n'), ((12450, 12514), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl41][i][0]', 'V[ipl41][i][1]', 'V[ipl41][i][2]'], {}), '(V[ipl41][i][0], V[ipl41][i][1], V[ipl41][i][2])\n', (12466, 12514), False, 'import rhino3dm\n'), ((12525, 12589), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['V[ipl42][i][0]', 'V[ipl42][i][1]', 'V[ipl42][i][2]'], {}), '(V[ipl42][i][0], V[ipl42][i][1], V[ipl42][i][2])\n', (12541, 12589), False, 'import rhino3dm\n'), ((16752, 16786), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['a[0]', 'a[1]', 'a[2]'], {}), '(a[0], a[1], a[2])\n', (16768, 16786), False, 'import rhino3dm\n'), ((16814, 16848), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['a[0]', 'a[1]', 'a[2]'], {}), '(a[0], a[1], a[2])\n', (16830, 16848), False, 'import rhino3dm\n'), ((16870, 16904), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['a[0]', 'a[1]', 'a[2]'], {}), '(a[0], a[1], a[2])\n', (16886, 16904), False, 'import rhino3dm\n'), ((16926, 16960), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['a[0]', 'a[1]', 'a[2]'], {}), '(a[0], a[1], a[2])\n', (16942, 16960), False, 'import rhino3dm\n'), ((16982, 17016), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['a[0]', 'a[1]', 'a[2]'], {}), '(a[0], a[1], a[2])\n', (16998, 17016), False, 'import rhino3dm\n'), ((17037, 17071), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['a[0]', 'a[1]', 'a[2]'], {}), '(a[0], a[1], a[2])\n', (17053, 17071), False, 'import rhino3dm\n'), ((1869, 1920), 'ghhops_server.HopsMesh', 'hs.HopsMesh', (['"""mesh"""', '"""Mesh"""', '"""The mesh to process."""'], {}), "('mesh', 'Mesh', 'The mesh to process.')\n", (1880, 1920), True, 'import ghhops_server as hs\n'), ((1930, 1985), 'ghhops_server.HopsString', 'hs.HopsString', (['"""web"""', '"""web"""', '"""constraint of net or web"""'], {}), "('web', 'web', 'constraint of net or web')\n", (1943, 1985), True, 'import ghhops_server as hs\n'), ((1993, 2094), 'ghhops_server.HopsNumber', 'hs.HopsNumber', (['"""direction"""', '"""direction"""', '"""0,1 for asy./geo. direction of AGG/GAA web."""'], {'default': '(0)'}), "('direction', 'direction',\n '0,1 for asy./geo. direction of AGG/GAA web.', default=0)\n", (2006, 2094), True, 'import ghhops_server as hs\n'), ((2097, 2165), 'ghhops_server.HopsInteger', 'hs.HopsInteger', (['"""iteration"""', '"""iter"""', '"""num of iteration."""'], {'default': '(10)'}), "('iteration', 'iter', 'num of iteration.', default=10)\n", (2111, 2165), True, 'import ghhops_server as hs\n'), ((2172, 2245), 'ghhops_server.HopsNumber', 'hs.HopsNumber', (['"""fairness"""', '"""w1(fair)"""', '"""weight of fairness."""'], {'default': '(0.0)'}), "('fairness', 'w1(fair)', 'weight of fairness.', default=0.0)\n", (2185, 2245), True, 'import ghhops_server as hs\n'), ((2254, 2342), 'ghhops_server.HopsNumber', 'hs.HopsNumber', (['"""closness"""', '"""w2(closeness)"""', '"""weight of self-closness."""'], {'default': '(0.01)'}), "('closness', 'w2(closeness)', 'weight of self-closness.',\n default=0.01)\n", (2267, 2342), True, 'import ghhops_server as hs\n'), ((2345, 2423), 'ghhops_server.HopsNumber', 'hs.HopsNumber', (['"""glide"""', '"""w3(glide)"""', '"""weight of gliding boundaries"""'], {'default': '(0)'}), "('glide', 'w3(glide)', 'weight of gliding boundaries', default=0)\n", (2358, 2423), True, 'import ghhops_server as hs\n'), ((2430, 2550), 'ghhops_server.HopsInteger', 'hs.HopsInteger', (['"""glideBdry"""', '"""index(bdry)"""', '"""index of glided boundary(s)"""'], {'access': 'hs.HopsParamAccess.LIST', 'default': '(0)'}), "('glideBdry', 'index(bdry)', 'index of glided boundary(s)',\n access=hs.HopsParamAccess.LIST, default=0)\n", (2444, 2550), True, 'import ghhops_server as hs\n'), ((2552, 2622), 'ghhops_server.HopsNumber', 'hs.HopsNumber', (['"""fix"""', '"""w4(fix)"""', '"""weight of fixed vertices"""'], {'default': '(0)'}), "('fix', 'w4(fix)', 'weight of fixed vertices', default=0)\n", (2565, 2622), True, 'import ghhops_server as hs\n'), ((2629, 2746), 'ghhops_server.HopsInteger', 'hs.HopsInteger', (['"""fixVertices"""', '"""index(fix)"""', '"""index of fixed vertices"""'], {'access': 'hs.HopsParamAccess.LIST', 'default': '(0)'}), "('fixVertices', 'index(fix)', 'index of fixed vertices',\n access=hs.HopsParamAccess.LIST, default=0)\n", (2643, 2746), True, 'import ghhops_server as hs\n'), ((2748, 2827), 'ghhops_server.HopsBoolean', 'hs.HopsBoolean', (['"""Restart"""', '"""Restart"""', '"""Restart the optimization"""'], {'default': '(False)'}), "('Restart', 'Restart', 'Restart the optimization', default=False)\n", (2762, 2827), True, 'import ghhops_server as hs\n'), ((2846, 2890), 'ghhops_server.HopsMesh', 'hs.HopsMesh', (['"""mesh"""', '"""mesh"""', '"""The new mesh."""'], {}), "('mesh', 'mesh', 'The new mesh.')\n", (2857, 2890), True, 'import ghhops_server as hs\n'), ((2904, 2950), 'ghhops_server.HopsPoint', 'hs.HopsPoint', (['"""an"""', '"""Vertices"""', '"""all vertices"""'], {}), "('an', 'Vertices', 'all vertices')\n", (2916, 2950), True, 'import ghhops_server as hs\n'), ((2962, 3006), 'ghhops_server.HopsPoint', 'hs.HopsPoint', (['"""vn"""', '"""Normal"""', '"""normals at V"""'], {}), "('vn', 'Normal', 'normals at V')\n", (2974, 3006), True, 'import ghhops_server as hs\n'), ((4765, 4815), 'ghhops_server.HopsMesh', 'hs.HopsMesh', (['"""mesh"""', '"""Mesh"""', '"""The optimized mesh."""'], {}), "('mesh', 'Mesh', 'The optimized mesh.')\n", (4776, 4815), True, 'import ghhops_server as hs\n'), ((4831, 4878), 'ghhops_server.HopsPoint', 'hs.HopsPoint', (['"""P1"""', '"""P1"""', '"""checker vertices P1"""'], {}), "('P1', 'P1', 'checker vertices P1')\n", (4843, 4878), True, 'import ghhops_server as hs\n'), ((4890, 4937), 'ghhops_server.HopsPoint', 'hs.HopsPoint', (['"""P2"""', '"""P2"""', '"""checker vertices P2"""'], {}), "('P2', 'P2', 'checker vertices P2')\n", (4902, 4937), True, 'import ghhops_server as hs\n'), ((5437, 5487), 'ghhops_server.HopsMesh', 'hs.HopsMesh', (['"""mesh"""', '"""Mesh"""', '"""The optimized mesh."""'], {}), "('mesh', 'Mesh', 'The optimized mesh.')\n", (5448, 5487), True, 'import ghhops_server as hs\n'), ((5497, 5571), 'ghhops_server.HopsNumber', 'hs.HopsNumber', (['"""x"""', '"""x"""', '"""get x of points"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('x', 'x', 'get x of points', access=hs.HopsParamAccess.LIST)\n", (5510, 5571), True, 'import ghhops_server as hs\n'), ((5578, 5652), 'ghhops_server.HopsNumber', 'hs.HopsNumber', (['"""y"""', '"""y"""', '"""get y of points"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('y', 'y', 'get y of points', access=hs.HopsParamAccess.LIST)\n", (5591, 5652), True, 'import ghhops_server as hs\n'), ((5659, 5733), 'ghhops_server.HopsNumber', 'hs.HopsNumber', (['"""z"""', '"""z"""', '"""get z of points"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('z', 'z', 'get z of points', access=hs.HopsParamAccess.LIST)\n", (5672, 5733), True, 'import ghhops_server as hs\n'), ((5769, 5868), 'ghhops_server.HopsInteger', 'hs.HopsInteger', (['"""index"""', '"""index"""', '"""index of selected vertices"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('index', 'index', 'index of selected vertices', access=hs.\n HopsParamAccess.LIST)\n", (5783, 5868), True, 'import ghhops_server as hs\n'), ((5874, 5917), 'ghhops_server.HopsPoint', 'hs.HopsPoint', (['"""Vs"""', '"""Vs"""', '"""selected vertex"""'], {}), "('Vs', 'Vs', 'selected vertex')\n", (5886, 5917), True, 'import ghhops_server as hs\n'), ((6490, 6540), 'ghhops_server.HopsMesh', 'hs.HopsMesh', (['"""mesh"""', '"""Mesh"""', '"""The optimized mesh."""'], {}), "('mesh', 'Mesh', 'The optimized mesh.')\n", (6501, 6540), True, 'import ghhops_server as hs\n'), ((6569, 6673), 'ghhops_server.HopsInteger', 'hs.HopsInteger', (['"""index_corner"""', '"""index"""', '"""index of corner vertices"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('index_corner', 'index', 'index of corner vertices', access=\n hs.HopsParamAccess.LIST)\n", (6583, 6673), True, 'import ghhops_server as hs\n'), ((6679, 6720), 'ghhops_server.HopsPoint', 'hs.HopsPoint', (['"""Pc"""', '"""Pc"""', '"""corner vertex"""'], {}), "('Pc', 'Pc', 'corner vertex')\n", (6691, 6720), True, 'import ghhops_server as hs\n'), ((7875, 7899), 'rhino3dm.LineCurve', 'rhino3dm.LineCurve', (['a', 'b'], {}), '(a, b)\n', (7893, 7899), False, 'import rhino3dm\n'), ((8101, 8125), 'rhino3dm.LineCurve', 'rhino3dm.LineCurve', (['a', 'b'], {}), '(a, b)\n', (8119, 8125), False, 'import rhino3dm\n'), ((8327, 8351), 'rhino3dm.LineCurve', 'rhino3dm.LineCurve', (['a', 'b'], {}), '(a, b)\n', (8345, 8351), False, 'import rhino3dm\n'), ((8553, 8577), 'rhino3dm.LineCurve', 'rhino3dm.LineCurve', (['a', 'b'], {}), '(a, b)\n', (8571, 8577), False, 'import rhino3dm\n'), ((7081, 7131), 'ghhops_server.HopsMesh', 'hs.HopsMesh', (['"""mesh"""', '"""Mesh"""', '"""The optimized mesh."""'], {}), "('mesh', 'Mesh', 'The optimized mesh.')\n", (7092, 7131), True, 'import ghhops_server as hs\n'), ((7160, 7224), 'ghhops_server.HopsCurve', 'hs.HopsCurve', (['"""polyline1"""', '"""pl1"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('polyline1', 'pl1', access=hs.HopsParamAccess.LIST)\n", (7172, 7224), True, 'import ghhops_server as hs\n'), ((7238, 7302), 'ghhops_server.HopsCurve', 'hs.HopsCurve', (['"""polyline2"""', '"""pl2"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('polyline2', 'pl2', access=hs.HopsParamAccess.LIST)\n", (7250, 7302), True, 'import ghhops_server as hs\n'), ((7316, 7380), 'ghhops_server.HopsCurve', 'hs.HopsCurve', (['"""polyline3"""', '"""pl3"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('polyline3', 'pl3', access=hs.HopsParamAccess.LIST)\n", (7328, 7380), True, 'import ghhops_server as hs\n'), ((7394, 7458), 'ghhops_server.HopsCurve', 'hs.HopsCurve', (['"""polyline4"""', '"""pl4"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('polyline4', 'pl4', access=hs.HopsParamAccess.LIST)\n", (7406, 7458), True, 'import ghhops_server as hs\n'), ((9438, 9462), 'rhino3dm.LineCurve', 'rhino3dm.LineCurve', (['a', 'b'], {}), '(a, b)\n', (9456, 9462), False, 'import rhino3dm\n'), ((9664, 9688), 'rhino3dm.LineCurve', 'rhino3dm.LineCurve', (['a', 'b'], {}), '(a, b)\n', (9682, 9688), False, 'import rhino3dm\n'), ((8831, 8881), 'ghhops_server.HopsMesh', 'hs.HopsMesh', (['"""mesh"""', '"""Mesh"""', '"""The optimized mesh."""'], {}), "('mesh', 'Mesh', 'The optimized mesh.')\n", (8842, 8881), True, 'import ghhops_server as hs\n'), ((8910, 8974), 'ghhops_server.HopsCurve', 'hs.HopsCurve', (['"""polyline1"""', '"""pl1"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('polyline1', 'pl1', access=hs.HopsParamAccess.LIST)\n", (8922, 8974), True, 'import ghhops_server as hs\n'), ((8988, 9052), 'ghhops_server.HopsCurve', 'hs.HopsCurve', (['"""polyline2"""', '"""pl2"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('polyline2', 'pl2', access=hs.HopsParamAccess.LIST)\n", (9000, 9052), True, 'import ghhops_server as hs\n'), ((10546, 10570), 'rhino3dm.LineCurve', 'rhino3dm.LineCurve', (['a', 'b'], {}), '(a, b)\n', (10564, 10570), False, 'import rhino3dm\n'), ((10772, 10796), 'rhino3dm.LineCurve', 'rhino3dm.LineCurve', (['a', 'b'], {}), '(a, b)\n', (10790, 10796), False, 'import rhino3dm\n'), ((9935, 9985), 'ghhops_server.HopsMesh', 'hs.HopsMesh', (['"""mesh"""', '"""Mesh"""', '"""The optimized mesh."""'], {}), "('mesh', 'Mesh', 'The optimized mesh.')\n", (9946, 9985), True, 'import ghhops_server as hs\n'), ((10014, 10078), 'ghhops_server.HopsCurve', 'hs.HopsCurve', (['"""polyline1"""', '"""pl1"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('polyline1', 'pl1', access=hs.HopsParamAccess.LIST)\n", (10026, 10078), True, 'import ghhops_server as hs\n'), ((10092, 10156), 'ghhops_server.HopsCurve', 'hs.HopsCurve', (['"""polyline2"""', '"""pl2"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('polyline2', 'pl2', access=hs.HopsParamAccess.LIST)\n", (10104, 10156), True, 'import ghhops_server as hs\n'), ((11929, 11953), 'rhino3dm.LineCurve', 'rhino3dm.LineCurve', (['a', 'b'], {}), '(a, b)\n', (11947, 11953), False, 'import rhino3dm\n'), ((12155, 12179), 'rhino3dm.LineCurve', 'rhino3dm.LineCurve', (['a', 'b'], {}), '(a, b)\n', (12173, 12179), False, 'import rhino3dm\n'), ((12381, 12405), 'rhino3dm.LineCurve', 'rhino3dm.LineCurve', (['a', 'b'], {}), '(a, b)\n', (12399, 12405), False, 'import rhino3dm\n'), ((12607, 12631), 'rhino3dm.LineCurve', 'rhino3dm.LineCurve', (['a', 'b'], {}), '(a, b)\n', (12625, 12631), False, 'import rhino3dm\n'), ((11034, 11076), 'ghhops_server.HopsMesh', 'hs.HopsMesh', (['"""mesh"""', '"""Mesh"""', '"""Input mesh."""'], {}), "('mesh', 'Mesh', 'Input mesh.')\n", (11045, 11076), True, 'import ghhops_server as hs\n'), ((11105, 11169), 'ghhops_server.HopsCurve', 'hs.HopsCurve', (['"""polyline1"""', '"""pl1"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('polyline1', 'pl1', access=hs.HopsParamAccess.LIST)\n", (11117, 11169), True, 'import ghhops_server as hs\n'), ((11183, 11247), 'ghhops_server.HopsCurve', 'hs.HopsCurve', (['"""polyline2"""', '"""pl2"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('polyline2', 'pl2', access=hs.HopsParamAccess.LIST)\n", (11195, 11247), True, 'import ghhops_server as hs\n'), ((11261, 11325), 'ghhops_server.HopsCurve', 'hs.HopsCurve', (['"""polyline3"""', '"""pl3"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('polyline3', 'pl3', access=hs.HopsParamAccess.LIST)\n", (11273, 11325), True, 'import ghhops_server as hs\n'), ((11339, 11403), 'ghhops_server.HopsCurve', 'hs.HopsCurve', (['"""polyline4"""', '"""pl4"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('polyline4', 'pl4', access=hs.HopsParamAccess.LIST)\n", (11351, 11403), True, 'import ghhops_server as hs\n'), ((16012, 16052), 'rhino3dm.NurbsCurve.Create', 'rhino3dm.NurbsCurve.Create', (['(False)', '(5)', 'pt'], {}), '(False, 5, pt)\n', (16038, 16052), False, 'import rhino3dm\n'), ((12872, 12922), 'ghhops_server.HopsMesh', 'hs.HopsMesh', (['"""mesh"""', '"""Mesh"""', '"""The optimized mesh."""'], {}), "('mesh', 'Mesh', 'The optimized mesh.')\n", (12883, 12922), True, 'import ghhops_server as hs\n'), ((12932, 12987), 'ghhops_server.HopsString', 'hs.HopsString', (['"""web"""', '"""web"""', '"""constraint of net or web"""'], {}), "('web', 'web', 'constraint of net or web')\n", (12945, 12987), True, 'import ghhops_server as hs\n'), ((12995, 13070), 'ghhops_server.HopsVector', 'hs.HopsVector', (['"""VN"""', '"""VN"""', '"""vertex normals"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('VN', 'VN', 'vertex normals', access=hs.HopsParamAccess.LIST)\n", (13008, 13070), True, 'import ghhops_server as hs\n'), ((13077, 13126), 'ghhops_server.HopsInteger', 'hs.HopsInteger', (['"""i-th"""', '"""ipoly"""', '"""which polyline"""'], {}), "('i-th', 'ipoly', 'which polyline')\n", (13091, 13126), True, 'import ghhops_server as hs\n'), ((13134, 13228), 'ghhops_server.HopsNumber', 'hs.HopsNumber', (['"""weight1"""', '"""w1(CtrlPoint)"""', '"""fairness of Bezier ctrl-points"""'], {'default': '(0.005)'}), "('weight1', 'w1(CtrlPoint)', 'fairness of Bezier ctrl-points',\n default=0.005)\n", (13147, 13228), True, 'import ghhops_server as hs\n'), ((13231, 13317), 'ghhops_server.HopsBoolean', 'hs.HopsBoolean', (['"""checker"""', '"""ck/all"""', '"""switch if at checker-vertices"""'], {'default': '(True)'}), "('checker', 'ck/all', 'switch if at checker-vertices',\n default=True)\n", (13245, 13317), True, 'import ghhops_server as hs\n'), ((13320, 13407), 'ghhops_server.HopsNumber', 'hs.HopsNumber', (['"""numChecker"""', '"""numChecker"""', '"""number of checker selection"""'], {'default': '(4)'}), "('numChecker', 'numChecker', 'number of checker selection',\n default=4)\n", (13333, 13407), True, 'import ghhops_server as hs\n'), ((13410, 13536), 'ghhops_server.HopsBoolean', 'hs.HopsBoolean', (['"""rectify by E3"""', '"""optRuling/cmptRuling"""', '"""switch if optimized or directly computed rulings"""'], {'default': '(False)'}), "('rectify by E3', 'optRuling/cmptRuling',\n 'switch if optimized or directly computed rulings', default=False)\n", (13424, 13536), True, 'import ghhops_server as hs\n'), ((13539, 13639), 'ghhops_server.HopsNumber', 'hs.HopsNumber', (['"""weight2"""', '"""w2(Strip)"""', '"""fairness of (unrolled) developable strip"""'], {'default': '(0.005)'}), "('weight2', 'w2(Strip)',\n 'fairness of (unrolled) developable strip', default=0.005)\n", (13552, 13639), True, 'import ghhops_server as hs\n'), ((13642, 13739), 'ghhops_server.HopsBoolean', 'hs.HopsBoolean', (['"""denser"""', '"""dense/sparse"""', '"""switch if sparse or denser rulings"""'], {'default': '(False)'}), "('denser', 'dense/sparse',\n 'switch if sparse or denser rulings', default=False)\n", (13656, 13739), True, 'import ghhops_server as hs\n'), ((13742, 13827), 'ghhops_server.HopsInteger', 'hs.HopsInteger', (['"""numDenser"""', '"""numDenser"""', '"""number of denser rulings"""'], {'default': '(20)'}), "('numDenser', 'numDenser', 'number of denser rulings', default=20\n )\n", (13756, 13827), True, 'import ghhops_server as hs\n'), ((13829, 13903), 'ghhops_server.HopsNumber', 'hs.HopsNumber', (['"""width"""', '"""width"""', '"""width of developable strip"""'], {'default': '(0.5)'}), "('width', 'width', 'width of developable strip', default=0.5)\n", (13842, 13903), True, 'import ghhops_server as hs\n'), ((13910, 14009), 'ghhops_server.HopsNumber', 'hs.HopsNumber', (['"""distInterval"""', '"""interval"""', '"""interval distance of unrolling strips"""'], {'default': '(1.5)'}), "('distInterval', 'interval',\n 'interval distance of unrolling strips', default=1.5)\n", (13923, 14009), True, 'import ghhops_server as hs\n'), ((14041, 14086), 'ghhops_server.HopsPoint', 'hs.HopsPoint', (['"""ctrlP"""', '"""P"""', '"""all ctrl points"""'], {}), "('ctrlP', 'P', 'all ctrl points')\n", (14053, 14086), True, 'import ghhops_server as hs\n'), ((14098, 14197), 'ghhops_server.HopsInteger', 'hs.HopsInteger', (['"""indices"""', '"""ilist"""', '"""list of control points P"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('indices', 'ilist', 'list of control points P', access=hs.\n HopsParamAccess.LIST)\n", (14112, 14197), True, 'import ghhops_server as hs\n'), ((14203, 14302), 'ghhops_server.HopsCurve', 'hs.HopsCurve', (['"""Bezier"""', '"""Bs"""', '"""list of quintic Bezier splines"""'], {'access': 'hs.HopsParamAccess.LIST'}), "('Bezier', 'Bs', 'list of quintic Bezier splines', access=hs.\n HopsParamAccess.LIST)\n", (14215, 14302), True, 'import ghhops_server as hs\n'), ((14308, 14348), 'ghhops_server.HopsPoint', 'hs.HopsPoint', (['"""an"""', '"""V"""', '"""anchor points"""'], {}), "('an', 'V', 'anchor points')\n", (14320, 14348), True, 'import ghhops_server as hs\n'), ((14360, 14407), 'ghhops_server.HopsPoint', 'hs.HopsPoint', (['"""e1"""', '"""e1"""', '"""unit tangent vector"""'], {}), "('e1', 'e1', 'unit tangent vector')\n", (14372, 14407), True, 'import ghhops_server as hs\n'), ((14419, 14475), 'ghhops_server.HopsPoint', 'hs.HopsPoint', (['"""e2"""', '"""e2"""', '"""unit principal normal vector"""'], {}), "('e2', 'e2', 'unit principal normal vector')\n", (14431, 14475), True, 'import ghhops_server as hs\n'), ((14487, 14535), 'ghhops_server.HopsPoint', 'hs.HopsPoint', (['"""e3"""', '"""e3"""', '"""unit binormal vector"""'], {}), "('e3', 'e3', 'unit binormal vector')\n", (14499, 14535), True, 'import ghhops_server as hs\n'), ((14547, 14591), 'ghhops_server.HopsPoint', 'hs.HopsPoint', (['"""r"""', '"""r"""', '"""unit ruling vector"""'], {}), "('r', 'r', 'unit ruling vector')\n", (14559, 14591), True, 'import ghhops_server as hs\n'), ((14603, 14665), 'ghhops_server.HopsMesh', 'hs.HopsMesh', (['"""Strip"""', '"""Strip"""', '"""3D mesh of developable strips"""'], {}), "('Strip', 'Strip', '3D mesh of developable strips')\n", (14614, 14665), True, 'import ghhops_server as hs\n'), ((14679, 14765), 'ghhops_server.HopsMesh', 'hs.HopsMesh', (['"""unrollStrip"""', '"""unrollment"""', '"""2D mesh of unrolled developable strips"""'], {}), "('unrollStrip', 'unrollment',\n '2D mesh of unrolled developable strips')\n", (14690, 14765), True, 'import ghhops_server as hs\n'), ((15682, 15704), 'numpy.arange', 'np.arange', (['((n - 1) * 5)'], {}), '((n - 1) * 5)\n', (15691, 15704), True, 'import numpy as np\n'), ((15917, 15981), 'rhino3dm.Point3d', 'rhino3dm.Point3d', (['ctrl_pts[j][0]', 'ctrl_pts[j][1]', 'ctrl_pts[j][2]'], {}), '(ctrl_pts[j][0], ctrl_pts[j][1], ctrl_pts[j][2])\n', (15933, 15981), False, 'import rhino3dm\n'), ((15769, 15785), 'numpy.arange', 'np.arange', (['(n - 1)'], {}), '(n - 1)\n', (15778, 15785), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import os
def get_pairs(labels):
pairs = []
stack = []
stack_2 = []
stack_3 = []
stack_4 = []
stack_5 = []
stack_6 = []
stack_7 = []
for i, I in enumerate(labels):
if I == '(':
stack.append(i)
#print(I, i)
elif I == ')':
pairs.append(sorted([stack[-1], i]))
#print(I, i)
del stack[-1]
elif I == '<':
stack_2.append(i)
elif I == '>':
pairs.append(sorted([stack_2[-1], i]))
del stack_2[-1]
elif I == '{':
stack_3.append(i)
elif I == '}':
pairs.append(sorted([stack_3[-1], i]))
del stack_3[-1]
elif I == '[':
stack_4.append(i)
elif I == ']':
pairs.append(sorted([stack_4[-1], i]))
del stack_4[-1]
elif I == 'A':
stack_5.append(i)
elif I == 'a':
pairs.append(sorted([stack_5[-1], i]))
del stack_5[-1]
elif I == 'B':
stack_6.append(i)
elif I == 'b':
pairs.append(sorted([stack_6[-1], i]))
del stack_6[-1]
elif I == 'C':
stack_7.append(i)
elif I == 'c':
pairs.append(sorted([stack_7[-1], i]))
del stack_7[-1]
elif I == '.' or I == '-':
continue
else:
print(I)
return pairs
########--------------------- parse GREMLIN output ---------------------------- #########################
def GREMLIN_prob(base_path, id, seq):
############ read gremlin output ##############################
try:
with open(base_path + '/predictions/GREMLIN/' + id + '.dca') as f:
temp4 = pd.read_csv(f, comment='#', delim_whitespace=True, header=None, skiprows=[0], usecols=[0,1,2]).values
dca = np.zeros((len(seq), len(seq)))
for k in temp4:
if abs(int(k[0]) - int(k[1])) < 4:
dca[int(k[0]), int(k[1])] = 1*k[2]
else:
dca[int(k[0]), int(k[1])] = k[2]
except:
#print("dca missing", id)
dca = np.zeros((len(seq), len(seq)))
return dca
########--------------------- parse plmc output output ---------------------------- #########################
def plmc_prob(base_path, id, seq):
with open(base_path + '/predictions/PLMC/' + id + '.dca') as f:
temp4 = pd.read_csv(f, comment='#', delim_whitespace=True, header=None, usecols=[0,2,5]).values
dca = np.zeros((len(seq), len(seq)))
for k in temp4:
if abs(int(k[0]) - int(k[1])) < 4:
dca[int(k[0]-1), int(k[1]-1)] = 1*k[2]
else:
dca[int(k[0]-1), int(k[1]-1)] = k[2]
return dca
########--------------------- parse mfdca output output ---------------------------- #########################
def mfdca_prob(base_path, id, seq):
with open(base_path + '/predictions/mfDCA/' + 'MFDCA_apc_fn_scores_' + id + '.txt') as f:
temp4 = pd.read_csv(f, comment='#', delim_whitespace=True, header=None, skiprows=[0,1,2,3,4,5,6,7,8,9,10], usecols=[0,1,2]).values
dca = np.zeros((len(seq), len(seq)))
for k in temp4:
if abs(int(k[0]) - int(k[1])) < 4:
dca[int(k[0]-1), int(k[1]-1)] = 1*k[2]
else:
dca[int(k[0]-1), int(k[1]-1)] = k[2]
return dca
########--------------------- parse plmdca output output ---------------------------- #########################
def plmdca_prob(base_path, id, seq):
with open(base_path + '/predictions/plmDCA/PLMDCA_apc_fn_scores_' + id + '.txt') as f:
temp4 = pd.read_csv(f, comment='#', delim_whitespace=True, header=None, skiprows=[0,1,2,3,4,5,6,7,8,9,10,11], usecols=[0,1,2]).values
dca = np.zeros((len(seq), len(seq)))
for k in temp4:
if abs(int(k[0]) - int(k[1])) < 4:
dca[int(k[0]-1), int(k[1]-1)] = 1*k[2]
else:
dca[int(k[0]-1), int(k[1]-1)] = k[2]
return dca
######## --------------------- parse base-pair probability RNAfold output ---------------------------- #########################
def RNAfold_bp_prob(base_path, id, seq):
with open(base_path + '/predictions/RNAfold/' + str(id) + '.prob') as f:
temp = pd.read_csv(f, comment='#', header=None).values
output_pred = np.zeros((len(seq), len(seq)))
for i in temp[:,0]:
a = i.split(' ')
if abs(int(a[0]) - int(a[1])) < 4:
output_pred[int(a[0]) - 1, int(a[1]) - 1] = 1*float(a[2])**2
else:
output_pred[int(a[0]) - 1, int(a[1]) - 1] = float(a[2])**2
return output_pred
######## --------------------- parse base-pair probability SPOT-RNA output ---------------------------- #########################
def spotrna(id, seq):
output_pred = np.loadtxt(base_path + '/predictions/SPOT-RNA/' + str(id) + '.prob')
return output_pred
######## --------------------- parse base-pair probability SPOT-RNA2 output ---------------------------- #########################
def spotrna2(base_path, id, seq):
output_pred = np.loadtxt(base_path + '/predictions/SPOT-RNA2/' + str(id) + '.prob')
return output_pred
######## --------------------- parse LinearPartition output ---------------------------- #########################
def LinearPartition(base_path, id, seq):
with open(base_path + '/predictions/LinearPartition/' + id + '.prob', 'r') as f:
prob = pd.read_csv(f, delimiter=None, delim_whitespace=True, header=None).values
y_pred = np.zeros((len(seq), len(seq)))
for i in prob:
y_pred[int(i[0])-1, int(i[1])-1] = i[2]
return y_pred
######## --------------------- parse base-pair probability SPOT-RNA-2D-Single output ---------------------------- #########################
def spotrna_2d_single(base_path, id, seq):
output_pred = np.loadtxt(base_path + '/predictions/SPOT-RNA-2D-Single/' + str(id) + '.prob')
return output_pred
######## --------------------- parse base-pair probability SPOT-RNA-2D output ---------------------------- #########################
def spotrna_2d(base_path, id, seq):
#print(base_path)
output_pred = np.loadtxt(base_path + '/predictions/SPOT-RNA-2D/' + str(id) + '.prob')
return output_pred
######## --------------------- parse RNAfold output ---------------------------- #########################
def RNAfold_bps(base_path, id, seq):
with open(base_path + '/predictions/RNAfold/' + str(id) + '.dbn') as f:
temp = pd.read_csv(f, comment='#', delim_whitespace=True, header=None, usecols=[0], skiprows=[0,3,4,5]).values
seq_pred = [i for i in temp[0,0]]
labels = [I for i,I in enumerate(temp[1, 0])]
assert len(seq) == len(seq_pred) == len(labels)
pred_pairs = get_pairs(labels)
return pred_pairs
######## --------------------- parse LinearPartition output ---------------------------- #########################
def LinearPartition_bps(base_path, id, seq):
with open(base_path + '/predictions/LinearPartition/' + id + '.prob', 'r') as f:
prob = pd.read_csv(f, delimiter=None, delim_whitespace=True, header=None).values
y_pred = np.zeros((len(seq), len(seq)))
for i in prob:
y_pred[int(i[0])-1, int(i[1])-1] = i[2]
tri_inds = np.triu_indices(y_pred.shape[0], k=1)
out_pred = y_pred[tri_inds]
outputs = out_pred[:, None]
seq_pairs = [[tri_inds[0][j], tri_inds[1][j], ''.join([seq[tri_inds[0][j]], seq[tri_inds[1][j]]])] for j in
range(tri_inds[0].shape[0])]
outputs_T = np.greater_equal(outputs, 0.198)
pred_pairs = [i for I, i in enumerate(seq_pairs) if outputs_T[I]]
pred_pairs = [i[:2] for i in pred_pairs]
return pred_pairs
############ load base-pair prob form SPOT-RNA ##############################
def spot_rna_bps(base_path, id, seq):
y_pred = np.loadtxt(base_path + '/predictions/SPOT-RNA/' + str(id) + '.prob')
tri_inds = np.triu_indices(y_pred.shape[0], k=1)
out_pred = y_pred[tri_inds]
outputs = out_pred[:, None]
seq_pairs = [[tri_inds[0][j], tri_inds[1][j], ''.join([seq[tri_inds[0][j]], seq[tri_inds[1][j]]])] for j in
range(tri_inds[0].shape[0])]
outputs_T = np.greater_equal(outputs, 0.335)
pred_pairs = [i for I, i in enumerate(seq_pairs) if outputs_T[I]]
pred_pairs = [i[:2] for i in pred_pairs]
return pred_pairs
############ load base-pair prob form SPOT-RNA ##############################
def spot_rna2_bps(base_path, id, seq):
y_pred = np.loadtxt(base_path + '/predictions/SPOT-RNA2/' + str(id) + '.prob')
tri_inds = np.triu_indices(y_pred.shape[0], k=1)
out_pred = y_pred[tri_inds]
outputs = out_pred[:, None]
seq_pairs = [[tri_inds[0][j], tri_inds[1][j], ''.join([seq[tri_inds[0][j]], seq[tri_inds[1][j]]])] for j in
range(tri_inds[0].shape[0])]
outputs_T = np.greater_equal(outputs, 0.795)
pred_pairs = [i for I, i in enumerate(seq_pairs) if outputs_T[I]]
pred_pairs = [i[:2] for i in pred_pairs]
return pred_pairs
| [
"pandas.read_csv",
"numpy.greater_equal",
"numpy.triu_indices"
] | [((7022, 7059), 'numpy.triu_indices', 'np.triu_indices', (['y_pred.shape[0]'], {'k': '(1)'}), '(y_pred.shape[0], k=1)\n', (7037, 7059), True, 'import numpy as np\n'), ((7282, 7314), 'numpy.greater_equal', 'np.greater_equal', (['outputs', '(0.198)'], {}), '(outputs, 0.198)\n', (7298, 7314), True, 'import numpy as np\n'), ((7660, 7697), 'numpy.triu_indices', 'np.triu_indices', (['y_pred.shape[0]'], {'k': '(1)'}), '(y_pred.shape[0], k=1)\n', (7675, 7697), True, 'import numpy as np\n'), ((7938, 7970), 'numpy.greater_equal', 'np.greater_equal', (['outputs', '(0.335)'], {}), '(outputs, 0.335)\n', (7954, 7970), True, 'import numpy as np\n'), ((8327, 8364), 'numpy.triu_indices', 'np.triu_indices', (['y_pred.shape[0]'], {'k': '(1)'}), '(y_pred.shape[0], k=1)\n', (8342, 8364), True, 'import numpy as np\n'), ((8605, 8637), 'numpy.greater_equal', 'np.greater_equal', (['outputs', '(0.795)'], {}), '(outputs, 0.795)\n', (8621, 8637), True, 'import numpy as np\n'), ((2377, 2463), 'pandas.read_csv', 'pd.read_csv', (['f'], {'comment': '"""#"""', 'delim_whitespace': '(True)', 'header': 'None', 'usecols': '[0, 2, 5]'}), "(f, comment='#', delim_whitespace=True, header=None, usecols=[0,\n 2, 5])\n", (2388, 2463), True, 'import pandas as pd\n'), ((2919, 3050), 'pandas.read_csv', 'pd.read_csv', (['f'], {'comment': '"""#"""', 'delim_whitespace': '(True)', 'header': 'None', 'skiprows': '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]', 'usecols': '[0, 1, 2]'}), "(f, comment='#', delim_whitespace=True, header=None, skiprows=[0,\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], usecols=[0, 1, 2])\n", (2930, 3050), True, 'import pandas as pd\n'), ((3489, 3624), 'pandas.read_csv', 'pd.read_csv', (['f'], {'comment': '"""#"""', 'delim_whitespace': '(True)', 'header': 'None', 'skiprows': '[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]', 'usecols': '[0, 1, 2]'}), "(f, comment='#', delim_whitespace=True, header=None, skiprows=[0,\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], usecols=[0, 1, 2])\n", (3500, 3624), True, 'import pandas as pd\n'), ((4074, 4114), 'pandas.read_csv', 'pd.read_csv', (['f'], {'comment': '"""#"""', 'header': 'None'}), "(f, comment='#', header=None)\n", (4085, 4114), True, 'import pandas as pd\n'), ((5238, 5304), 'pandas.read_csv', 'pd.read_csv', (['f'], {'delimiter': 'None', 'delim_whitespace': '(True)', 'header': 'None'}), '(f, delimiter=None, delim_whitespace=True, header=None)\n', (5249, 5304), True, 'import pandas as pd\n'), ((6280, 6383), 'pandas.read_csv', 'pd.read_csv', (['f'], {'comment': '"""#"""', 'delim_whitespace': '(True)', 'header': 'None', 'usecols': '[0]', 'skiprows': '[0, 3, 4, 5]'}), "(f, comment='#', delim_whitespace=True, header=None, usecols=[0],\n skiprows=[0, 3, 4, 5])\n", (6291, 6383), True, 'import pandas as pd\n'), ((6835, 6901), 'pandas.read_csv', 'pd.read_csv', (['f'], {'delimiter': 'None', 'delim_whitespace': '(True)', 'header': 'None'}), '(f, delimiter=None, delim_whitespace=True, header=None)\n', (6846, 6901), True, 'import pandas as pd\n'), ((1775, 1876), 'pandas.read_csv', 'pd.read_csv', (['f'], {'comment': '"""#"""', 'delim_whitespace': '(True)', 'header': 'None', 'skiprows': '[0]', 'usecols': '[0, 1, 2]'}), "(f, comment='#', delim_whitespace=True, header=None, skiprows=[0\n ], usecols=[0, 1, 2])\n", (1786, 1876), True, 'import pandas as pd\n')] |
from os import path
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
# 資料網址
DATA_URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data'
# 資料標籤
DATA_LABEL = ['sex', 'length', 'diameter', 'height', 'whole weight',
'shucked weight', 'viscera weight', 'shell weight', 'rings']
# 取得資料
def get_data() -> (np.ndarray, np.ndarray):
if not path.exists('data.csv'):
# 如果在本地沒有資料,先從網址上抓
df = pd.read_csv(DATA_URL)
# 因為來源沒有欄位標籤,要設置
df.columns = DATA_LABEL
# 儲存成csv
df.to_csv('data.csv', index=False)
else:
# 讀取資料
df = pd.read_csv('data.csv')
# 3種不同的weight為x,DATA_LABEL[5]至DATA_LABEL[7]對應3種不同的weight標籤
x = np.array(df[DATA_LABEL[5:8]])
# whole weight為y,DATA_LABEL[4]對應whole weight的標籤
y = np.array(df[DATA_LABEL[4]])
return x, y
# 設定圖表的各種屬性
def config_plt(title: str, xlabel: str, ylabel: str):
# 設定圖表的尺寸、標題、x軸標籤、y軸標籤、緊的輸出、有格線
plt.figure(figsize=(12.0, 6.75))
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.tight_layout()
plt.grid(True)
# 產生資料的圖表
def gen_data_polt(x: np.ndarray, y: np.ndarray):
# 取得標題
title = ','.join(
[s.split()[0] for s in DATA_LABEL[5:8]]) + ' - ' + DATA_LABEL[4]
# 設定圖表
config_plt(title, DATA_LABEL[4].split()[1], DATA_LABEL[4])
# 針對3種不同的weight,分別以3種不同的顏色繪製與whole weight對應的關係
for i, c in enumerate(['r', 'g', 'b']):
# 因為3種不同weight的標籤在DATA_LABEL[5]開始,因此DATA_LABEL[5+i]
plt.scatter(x[..., i], y, color=c, label=DATA_LABEL[5+i])
# 繪製不同顏色代表的標記
plt.legend(loc='lower right')
# 儲存圖表
plt.savefig(f'{title}.png')
# 產生預測與答案的圖表
def gen_result_polt(y_pred: np.ndarray, y: np.ndarray, note: str):
# 取得標題
title = f'prediction - answer results ({note})'
# 設定圖表
config_plt(title, 'prediction', 'answer')
# 繪製預測與答案的關係
plt.scatter(y_pred, y, color='black')
# 儲存圖表
plt.savefig(f'{title}.png')
# 主程式
def main():
# 先取得資料並產生圖表
x, y = get_data()
gen_data_polt(x, y)
# 將資料切成訓練和測試用
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=10, random_state=0x749487)
# 建立一個套用至訓練資料集的線性複回歸模型,因為x不是1D的所以是線性複回歸
lr = LinearRegression().fit(x_train, y_train)
# 用訓練資料集進行預測得到訓練資料集的預設結果,與其答案進行比較並產生圖表
y_train_pred = lr.predict(x_train)
gen_result_polt(y_train_pred, y_train, 'train')
# 用測試資料集進行預測得到測試資料集的預設結果,與其答案進行比較並產生圖表
y_test_pred = lr.predict(x_test)
gen_result_polt(y_test_pred, y_test, 'test')
# 輸出測試資料集、其答案以及預測結果
print(f'x_test\n{x_test}')
print(f'y_test\n{y_test}')
print(f'y_test_pred\n{y_test_pred}')
if __name__ == '__main__':
# 進入主程式
main()
| [
"matplotlib.pyplot.title",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"sklearn.model_selection.train_test_split",
"os.path.exists",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.... | [((852, 881), 'numpy.array', 'np.array', (['df[DATA_LABEL[5:8]]'], {}), '(df[DATA_LABEL[5:8]])\n', (860, 881), True, 'import numpy as np\n'), ((942, 969), 'numpy.array', 'np.array', (['df[DATA_LABEL[4]]'], {}), '(df[DATA_LABEL[4]])\n', (950, 969), True, 'import numpy as np\n'), ((1094, 1126), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12.0, 6.75)'}), '(figsize=(12.0, 6.75))\n', (1104, 1126), True, 'from matplotlib import pyplot as plt\n'), ((1131, 1147), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (1140, 1147), True, 'from matplotlib import pyplot as plt\n'), ((1152, 1170), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (1162, 1170), True, 'from matplotlib import pyplot as plt\n'), ((1175, 1193), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (1185, 1193), True, 'from matplotlib import pyplot as plt\n'), ((1198, 1216), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1214, 1216), True, 'from matplotlib import pyplot as plt\n'), ((1221, 1235), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1229, 1235), True, 'from matplotlib import pyplot as plt\n'), ((1720, 1749), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (1730, 1749), True, 'from matplotlib import pyplot as plt\n'), ((1765, 1792), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{title}.png"""'], {}), "(f'{title}.png')\n", (1776, 1792), True, 'from matplotlib import pyplot as plt\n'), ((2016, 2053), 'matplotlib.pyplot.scatter', 'plt.scatter', (['y_pred', 'y'], {'color': '"""black"""'}), "(y_pred, y, color='black')\n", (2027, 2053), True, 'from matplotlib import pyplot as plt\n'), ((2069, 2096), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{title}.png"""'], {}), "(f'{title}.png')\n", (2080, 2096), True, 'from matplotlib import pyplot as plt\n'), ((2237, 2295), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(10)', 'random_state': '(7640199)'}), '(x, y, test_size=10, random_state=7640199)\n', (2253, 2295), False, 'from sklearn.model_selection import train_test_split\n'), ((515, 538), 'os.path.exists', 'path.exists', (['"""data.csv"""'], {}), "('data.csv')\n", (526, 538), False, 'from os import path\n'), ((580, 601), 'pandas.read_csv', 'pd.read_csv', (['DATA_URL'], {}), '(DATA_URL)\n', (591, 601), True, 'import pandas as pd\n'), ((757, 780), 'pandas.read_csv', 'pd.read_csv', (['"""data.csv"""'], {}), "('data.csv')\n", (768, 780), True, 'import pandas as pd\n'), ((1640, 1699), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[..., i]', 'y'], {'color': 'c', 'label': 'DATA_LABEL[5 + i]'}), '(x[..., i], y, color=c, label=DATA_LABEL[5 + i])\n', (1651, 1699), True, 'from matplotlib import pyplot as plt\n'), ((2359, 2377), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2375, 2377), False, 'from sklearn.linear_model import LinearRegression\n')] |
from copy import deepcopy
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from seqeval.metrics.sequence_labeling import classification_report
from torch.utils.data import DataLoader
from tqdm.autonotebook import tqdm
from pyramid_nested_ner.model import PyramidNer
class PyramidNerTrainer(object):
class _TrainingReport(object):
def __init__(self):
self._total = 0
self.epochs = {
'train_loss': [],
'valid_loss': [],
}
def add_epoch(self, train_loss, valid_loss=None, **kwargs):
self.epochs['train_loss'].append(train_loss)
self.epochs['valid_loss'].append(valid_loss)
for key, arg in kwargs.items():
self.epochs[key] = self.epochs.get(key, list())
self.epochs[key].append(arg)
self._total += 1
@property
def report(self):
return pd.DataFrame(index=np.arange(self._total) + 1, data=self.epochs)
def plot_loss_report(self):
self.plot_custom_report('train_loss', 'valid_loss')
def plot_custom_report(self, *columns):
xticks = [i for i in self.report.index if not i % 10 or i == 1]
self.report[[*columns]].plot(xticks=xticks, xlabel='epoch')
def __init__(self, pyramid_ner: PyramidNer):
self._optimizer = None
self._scheduler = None
self._model = pyramid_ner
self.device = self._model.device
@property
def nnet(self):
return self._model.nnet
@property
def optim(self):
return self._optimizer
@property
def ner_model(self):
return self._model
def train(
self,
train_set: DataLoader,
optimizer,
epochs=10,
patience=np.inf,
dev_data: DataLoader = None,
scheduler=None,
grad_clip=None,
restore_weights_on='loss' # 'loss' to restore weights with best dev loss, 'f1' for best dev f1
):
if patience is None:
patience = 0
train_report = self._TrainingReport()
self._optimizer = optimizer
self._scheduler = scheduler
overall_patience = patience
if restore_weights_on not in ['loss', 'f1', None]:
raise ValueError(
f"Param 'restore_weights_on' can only be 'loss' or 'f1', depending on which"
f" is the preferred metric for weight restoration. Got {restore_weights_on}"
)
best_dev_f1, best_dev_loss = 0.0, np.inf
best_weights = {'loss': None, 'f1': None}
try:
for epoch in range(epochs):
print('==============================')
print(f'Training epoch {epoch + 1}...')
print('==============================')
train_loss = self._training_epoch(train_set, grad_clip)
if self._scheduler:
self._scheduler.step()
if dev_data is not None:
report = self.test_model(dev_data, out_dict=True)
micro_f1 = report['micro avg']['f1-score'] * 100
dev_loss = report['loss']
train_report.add_epoch(
train_loss, dev_loss, micro_f1=micro_f1
)
if dev_loss < best_dev_loss or micro_f1 > best_dev_f1: # good epoch!!
overall_patience = patience
if dev_loss < best_dev_loss:
best_dev_loss = dev_loss
best_weights['loss'] = deepcopy(self.nnet.state_dict())
if micro_f1 > best_dev_f1:
best_dev_f1 = micro_f1
best_weights['f1'] = deepcopy(self.nnet.state_dict())
elif patience < np.inf:
print(f'Bad epoch... (patience left: {overall_patience}/{patience})')
if not overall_patience and best_weights.get(restore_weights_on) is not None:
print('Stopping early (restoring best weights)...')
self.nnet.load_state_dict(best_weights[restore_weights_on], strict=False)
break
overall_patience -= 1
else:
train_report.add_epoch(train_loss, None)
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, stopping training.")
if restore_weights_on and best_weights.get(restore_weights_on) is not None:
print('Training is done (restoring model\'s best weights)')
self.nnet.load_state_dict(best_weights[restore_weights_on], strict=False)
return self._model, train_report
def _training_epoch(self, train_set, grad_clip):
train_loss = list()
self.nnet.train(mode=True)
pbar = tqdm(total=len(train_set))
for batch in train_set:
# forward
y, remedy_y, ids = batch.pop('y'), batch.pop('y_remedy'), batch.pop('id')
self._optimizer.zero_grad()
logits, remedy = self.nnet(**batch)
# loss computation
loss = self.compute_loss(logits, y, batch['word_mask'], remedy, remedy_y)
train_loss.append(loss.item())
# backward
loss.backward()
if grad_clip:
torch.nn.utils.clip_grad_norm_(self.nnet.parameters(), grad_clip)
pbar.set_description(f'train loss: {round(np.mean(train_loss), 2)}')
self._optimizer.step()
pbar.update(1)
pbar.close()
return np.mean(train_loss)
def test_model(self, test_data, out_dict=False):
loss = list()
pred, true = list(), list()
pbar = tqdm(total=len(test_data))
for batch in test_data:
# inference
self.nnet.eval()
y, remedy_y, ids = batch.pop('y'), batch.pop('y_remedy'), batch.pop('id')
with torch.no_grad():
logits, remedy = self.nnet(**batch)
self.nnet.train(mode=True)
# loss computation
batch_loss = self.compute_loss(logits, y, batch['word_mask'], remedy, remedy_y).item()
loss.append(batch_loss)
layers_y_hat = self._model.logits_to_classes(logits)
remedy_y_hat = self._model.remedy_to_classes(remedy)
pbar.set_description(f'valid loss: {round(np.mean(loss), 3)}')
y_pred, y_true = self._classes_to_iob2(layers_y_hat, y, True, remedy_y_hat, remedy_y)
pred.extend(y_pred)
true.extend(y_true)
pbar.update(1)
report = self.classification_report(pred, true, out_dict)
if out_dict:
report['loss'] = np.mean(loss)
f1_score = round(report["micro avg"]["f1-score"] * 100, 2)
pbar.set_description(
f'valid loss: {round(np.mean(loss), 2)}; micro f1: {f1_score}%'
)
pbar.close()
return report
@staticmethod
def compute_loss(logits, y, mask, remedy_logits=None, remedy_y=None) -> torch.Tensor:
assert len(logits) == len(y), 'Predictions and labels are misaligned.'
if remedy_y is None or remedy_logits is None:
assert remedy_y is None and remedy_logits is None, 'Predictions and labels are misaligned'
cross_entropy = nn.CrossEntropyLoss(reduction='none')
loss = 0.0
for i, (logits_layer, y_layer) in enumerate(zip(logits, y)):
layer_loss = cross_entropy(logits_layer.permute(0, -1, 1), y_layer)
loss += torch.sum(layer_loss * mask[:, i:])
if remedy_y is not None and remedy_logits is not None:
ml_loss = nn.BCEWithLogitsLoss(reduction='none')(remedy_logits, remedy_y)
ml_mask = mask[:, len(logits):].unsqueeze(-1).expand_as(ml_loss)
loss += torch.sum(ml_loss * ml_mask)
# note that we return the sum of the loss of each token, rather than averaging it;
# average leads to a loss that is too small and generates small gradients that pr-
# event the model from learning anything due to its depth.
return loss
def _classes_to_iob2(self, pred, true, flatten=False, remedy_pred=None, remedy_true=None):
y_pred = self._model.classes_to_iob2(pred, remedy=remedy_pred)
y_true = self._model.classes_to_iob2(true, remedy=remedy_true)
if len(y_pred) > len(y_true):
y_true.extend([[['O' for _ in y] for y in extra_layer] for extra_layer in y_pred[len(y_true):]])
if len(y_true) > len(y_pred):
y_pred.extend([[['O' for _ in y] for y in extra_layer] for extra_layer in y_true[len(y_pred):]])
if flatten:
y_pred = [seq for layer in y_pred for seq in layer]
y_true = [seq for layer in y_true for seq in layer]
return y_pred, y_true
@staticmethod
def classification_report(y_pred, y_true, out_dict=False):
from seqeval.scheme import IOB2
report = classification_report(
y_true,
y_pred,
digits=4,
output_dict=out_dict,
mode='strict',
zero_division=0,
scheme=IOB2
)
return report
| [
"torch.nn.BCEWithLogitsLoss",
"torch.nn.CrossEntropyLoss",
"seqeval.metrics.sequence_labeling.classification_report",
"numpy.mean",
"numpy.arange",
"torch.no_grad",
"torch.sum"
] | [((5764, 5783), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (5771, 5783), True, 'import numpy as np\n'), ((7537, 7574), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (7556, 7574), True, 'import torch.nn as nn\n'), ((9196, 9315), 'seqeval.metrics.sequence_labeling.classification_report', 'classification_report', (['y_true', 'y_pred'], {'digits': '(4)', 'output_dict': 'out_dict', 'mode': '"""strict"""', 'zero_division': '(0)', 'scheme': 'IOB2'}), "(y_true, y_pred, digits=4, output_dict=out_dict, mode=\n 'strict', zero_division=0, scheme=IOB2)\n", (9217, 9315), False, 'from seqeval.metrics.sequence_labeling import classification_report\n'), ((6911, 6924), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (6918, 6924), True, 'import numpy as np\n'), ((7763, 7798), 'torch.sum', 'torch.sum', (['(layer_loss * mask[:, i:])'], {}), '(layer_loss * mask[:, i:])\n', (7772, 7798), False, 'import torch\n'), ((8046, 8074), 'torch.sum', 'torch.sum', (['(ml_loss * ml_mask)'], {}), '(ml_loss * ml_mask)\n', (8055, 8074), False, 'import torch\n'), ((6126, 6141), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6139, 6141), False, 'import torch\n'), ((7885, 7923), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (7905, 7923), True, 'import torch.nn as nn\n'), ((978, 1000), 'numpy.arange', 'np.arange', (['self._total'], {}), '(self._total)\n', (987, 1000), True, 'import numpy as np\n'), ((5639, 5658), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (5646, 5658), True, 'import numpy as np\n'), ((6584, 6597), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (6591, 6597), True, 'import numpy as np\n'), ((7067, 7080), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (7074, 7080), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
#Create data
x=np.array([0.,0.5,1.])
y_exact=np.array([0.,1.,1.])
#Intiliase W and b
W=0.5
b=0.5
#Set other constants
N=3
eta=0.75
MaxIter=64
#Initialise approximation
F=W * x + b
#Functions
def cost():
return (1/N) * np.sum( 0.5* (y_exact - F)**2 )
#Partial derivates of cost
def cost_W():
return (1/N) * np.sum( x*(W*x- y_exact +b) )
def cost_b():
return (1/N) * np.sum( W*x - y_exact + b )
#Cost_vec
cost_vec=np.empty(MaxIter)
j=np.arange(0,MaxIter,1)
#Peform gradient descent
for i in range(0,MaxIter):
#Forward pass
F=W*x+b
#Alter weights and biases
W= W - eta * cost_W()
b= b - eta * cost_b()
#Calculate newcost
newcost=cost()
cost_vec[i]=newcost
#print(newcost)
plt.plot(j,cost_vec)
plt.title('Cost')
plt.xlabel('Iteration')
plt.ylabel('Cost')
plt.show() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.plot",
"numpy.empty",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((67, 92), 'numpy.array', 'np.array', (['[0.0, 0.5, 1.0]'], {}), '([0.0, 0.5, 1.0])\n', (75, 92), True, 'import numpy as np\n'), ((97, 122), 'numpy.array', 'np.array', (['[0.0, 1.0, 1.0]'], {}), '([0.0, 1.0, 1.0])\n', (105, 122), True, 'import numpy as np\n'), ((481, 498), 'numpy.empty', 'np.empty', (['MaxIter'], {}), '(MaxIter)\n', (489, 498), True, 'import numpy as np\n'), ((501, 525), 'numpy.arange', 'np.arange', (['(0)', 'MaxIter', '(1)'], {}), '(0, MaxIter, 1)\n', (510, 525), True, 'import numpy as np\n'), ((777, 798), 'matplotlib.pyplot.plot', 'plt.plot', (['j', 'cost_vec'], {}), '(j, cost_vec)\n', (785, 798), True, 'import matplotlib.pyplot as plt\n'), ((798, 815), 'matplotlib.pyplot.title', 'plt.title', (['"""Cost"""'], {}), "('Cost')\n", (807, 815), True, 'import matplotlib.pyplot as plt\n'), ((816, 839), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Iteration"""'], {}), "('Iteration')\n", (826, 839), True, 'import matplotlib.pyplot as plt\n'), ((840, 858), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cost"""'], {}), "('Cost')\n", (850, 858), True, 'import matplotlib.pyplot as plt\n'), ((859, 869), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (867, 869), True, 'import matplotlib.pyplot as plt\n'), ((278, 310), 'numpy.sum', 'np.sum', (['(0.5 * (y_exact - F) ** 2)'], {}), '(0.5 * (y_exact - F) ** 2)\n', (284, 310), True, 'import numpy as np\n'), ((370, 403), 'numpy.sum', 'np.sum', (['(x * (W * x - y_exact + b))'], {}), '(x * (W * x - y_exact + b))\n', (376, 403), True, 'import numpy as np\n'), ((433, 460), 'numpy.sum', 'np.sum', (['(W * x - y_exact + b)'], {}), '(W * x - y_exact + b)\n', (439, 460), True, 'import numpy as np\n')] |
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.deepq.policies import FeedForwardPolicy
from stable_baselines import DQN, PPO2
import argparse
import gym
import numpy as np
import pandas as pd
from tqdm import tqdm
from policies import CustomMlpPolicy_1, CustomMlpPolicy_2
from policies import CustomLstmMlpPolicy_2, CustomLstmMlpPolicy_3
from policies import CustomCnnPolicy, CustomLstmCnnPolicy
def test_model(model, env, num_epochs=100, max_steps=100, vectorized_env=False, csv_file="steps_vs_targets.csv"):
log_rewards = []
log_targets = []
log_target_steps = []
log_first_target_steps = []
log_target_perc = []
log_valid_perc = []
log_revisit_perc = []
log_invalid_perc = []
log_episode_lengths = []
log_target_steps = []
log_targets_per_step = {}
for i in range(max_steps):
log_targets_per_step[i+1] = []
for episode in tqdm(range(num_epochs)):
logical_state = env.reset()
r = 0
targets, valids, revisits, invalids = 0, 0, 0, 0
first_target_steps = None
target_steps = []
episode_length = 0
for i in range(max_steps):
episode_length += 1
action, _states = model.predict(logical_state)
logical_state, reward, done, infos = env.step(action)
if vectorized_env == True:
reward, done = reward[0], done[0]
r += reward
if reward >= 1:
targets += 1
target_steps.append(i+1)
elif reward > -0.2:
valids += 1
elif reward == -0.5:
revisits += 1
elif reward == -1:
invalids += 1
log_targets_per_step[i+1].append(targets)
if done:
break
log_rewards.append(r)
log_targets.append(targets)
log_target_perc.append(targets*100.0/episode_length)
log_valid_perc.append(valids*100.0/episode_length)
log_revisit_perc.append(revisits*100.0/episode_length)
log_invalid_perc.append(invalids*100.0/episode_length)
log_episode_lengths.append(episode_length)
log_target_steps.append(target_steps)
print("\n" + "##" * 30)
print("Avg Reward: {:.2f}".format(np.mean(log_rewards)))
print("Avg episode length: {:.2f}".format(np.mean(log_episode_lengths)))
print("Avg # of targets reached: {:.2f}".format(np.mean(log_targets)))
first_target = [_[0] for _ in log_target_steps if len(_) > 0]
print("Avg # of steps to find first target: {:.2f}".format(np.mean(first_target)))
print("\n\n")
print("Avg % of target actions: {:.2f}".format(np.mean(log_target_perc)))
print("Avg % of valid actions: {:.2f}".format(np.mean(log_valid_perc)))
print("Avg % of revisit actions: {:.2f}".format(np.mean(log_revisit_perc)))
print("Avg % of no go actions: {:.2f}".format(np.mean(log_invalid_perc)))
print("##" * 30)
df = pd.DataFrame()
df['steps'] = list(range(1, max_steps+1))
df['Avg Targets'] = [np.mean(log_targets_per_step[i]) for i in range(1, max_steps+1)]
df.to_csv(csv_file, index=False)
def train_dqn(train_env, custom_policy, gamma, learning_rate,
buffer_size, batch_size, training_timesteps, model_file):
model = DQN(custom_policy,
train_env,
gamma=gamma,
learning_rate=learning_rate,
buffer_size=buffer_size,
batch_size=batch_size)
model.learn(total_timesteps=training_timesteps,
log_interval=100)
model.save(model_file)
def train_ppo(train_env, custom_policy, gamma, n_steps, learning_rate,
training_timesteps, model_file):
model = PPO2(custom_policy,
train_env,
gamma=gamma,
n_steps=n_steps,
learning_rate=learning_rate,
nminibatches=1)
model.learn(total_timesteps=training_timesteps,
log_interval=100)
model.save(model_file)
def get_environment(feature_type, config_files, max_steps, algorithm):
if algorithm not in ['dqn', 'ppo']:
raise ValueError("algorithm is not in {dqn, ppo}")
if feature_type == 'direct':
env = gym.make(id="md_envs:direct-eADPD-v1",
config_files=config_files,
max_steps=max_steps,
rewards={"target": 1, "valid": -0.01, "revisit": -0.5, "no-go": -1})
policy = CustomCnnPolicy if algorithm == 'dqn' else CustomLstmCnnPolicy
env = env if algorithm == 'dqn' else DummyVecEnv([lambda: _ for _ in [env]])
elif feature_type == 'logical':
env = gym.make(id="md_envs:logical-eADPD-v1",
config_files=config_files,
max_steps=max_steps,
regressor_type=None,
rewards={"target": 1, "valid": -0.01, "revisit": -0.5, "no-go": -1})
policy = CustomMlpPolicy_1 if algorithm == 'dqn' else CustomLstmMlpPolicy_2
env = env if algorithm == 'dqn' else DummyVecEnv([lambda: _ for _ in [env]])
elif feature_type == 'logical_with_regressor':
env = gym.make(id="md_envs:logical-eADPD-v1",
config_files=config_files,
max_steps=max_steps,
rewards={"target": 1, "valid": -0.01, "revisit": -0.5, "no-go": -1})
policy = CustomMlpPolicy_2 if algorithm == 'dqn' else CustomLstmMlpPolicy_3
env = env if algorithm == 'dqn' else DummyVecEnv([lambda: _ for _ in [env]])
else:
raise ValueError("feature_type is not in {direct, logical, logical_with_regressor}")
return env, policy
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog="python scripts/main.py",
description="scripts for Agent Driven Polymer Discovery environment")
subparsers = parser.add_subparsers(dest="sub_command", help="sub-command help")
parser_train = subparsers.add_parser("train", help="train a model")
parser_train.add_argument('-f', "--feature_type", required=True,
help="direct, logical or logical_with_regressor", metavar='')
parser_train.add_argument('-a', "--algorithm", required=True, help="dqn or ppo", metavar='')
parser_train.add_argument('-o', "--output_model", required=True, help="output model file", metavar='')
parser_test = subparsers.add_parser('test', help="test a trained model")
parser_test.add_argument('-f', "--feature_type", required=True,
help="direct, logical or logical_with_regressor", metavar='')
parser_test.add_argument('-a', "--algorithm", required=True, help="dqn or ppo", metavar='')
parser_test.add_argument('-m', "--model", required=True, help="path to trained model file", metavar='')
parser_test.add_argument('-r', "--results_file",
required=False,
help="output csv file for steps vs targets results. default steps_vs_targets.csv",
metavar='')
args = parser.parse_args()
if args.sub_command == "train":
feature_type = args.feature_type.lower()
algorithm = args.algorithm.lower()
output_model = args.output_model
env, policy = get_environment(feature_type, "./data/polymerDiscovery/train.csv", 200, algorithm)
if algorithm == 'dqn' and feature_type == 'direct':
train_dqn(env, policy, 0.8, 0.003, 20000, 64, 10000, output_model)
elif algorithm == 'dqn' and feature_type == 'logical':
train_dqn(env, policy, 0.8, 0.0003, 20000, 32, 500000, output_model)
elif algorithm == 'dqn' and feature_type == 'logical_with_regressor':
train_dqn(env, policy, 0.8, 0.0003, 20000, 32, 500000, output_model)
elif algorithm == 'ppo' and feature_type == 'direct':
train_ppo(env, policy, 0.8, 512, 0.003, 10000, output_model)
elif algorithm == 'ppo' and feature_type == 'logical':
train_ppo(env, policy, 0.8, 512, 0.003, 100000, output_model)
elif algorithm == 'ppo' and feature_type == 'logical_with_regressor':
train_ppo(env, policy, 0.8, 2048, 0.003, 500000, output_model)
if args.sub_command == "test":
feature_type = args.feature_type.lower()
algorithm = args.algorithm.lower()
model = args.model
results_file = args.results_file
results_file = results_file if results_file else "steps_vs_targets.csv"
env, policy = get_environment(feature_type, "./data/polymerDiscovery/test.csv", 100, algorithm)
if algorithm == 'dqn':
model = DQN.load(model)
else:
model = PPO2.load(model)
vectorized_env = False if algorithm == 'dqn' else True
test_model(model, env, num_epochs=100, max_steps=100, vectorized_env=vectorized_env, csv_file=results_file)
| [
"pandas.DataFrame",
"stable_baselines.common.vec_env.DummyVecEnv",
"stable_baselines.PPO2",
"stable_baselines.DQN",
"argparse.ArgumentParser",
"warnings.filterwarnings",
"gym.make",
"numpy.mean",
"stable_baselines.PPO2.load",
"stable_baselines.DQN.load",
"tensorflow.get_logger"
] | [((17, 50), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (40, 50), False, 'import warnings\n'), ((3103, 3117), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3115, 3117), True, 'import pandas as pd\n'), ((3441, 3564), 'stable_baselines.DQN', 'DQN', (['custom_policy', 'train_env'], {'gamma': 'gamma', 'learning_rate': 'learning_rate', 'buffer_size': 'buffer_size', 'batch_size': 'batch_size'}), '(custom_policy, train_env, gamma=gamma, learning_rate=learning_rate,\n buffer_size=buffer_size, batch_size=batch_size)\n', (3444, 3564), False, 'from stable_baselines import DQN, PPO2\n'), ((3895, 4005), 'stable_baselines.PPO2', 'PPO2', (['custom_policy', 'train_env'], {'gamma': 'gamma', 'n_steps': 'n_steps', 'learning_rate': 'learning_rate', 'nminibatches': '(1)'}), '(custom_policy, train_env, gamma=gamma, n_steps=n_steps, learning_rate=\n learning_rate, nminibatches=1)\n', (3899, 4005), False, 'from stable_baselines import DQN, PPO2\n'), ((5941, 6070), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': '"""python scripts/main.py"""', 'description': '"""scripts for Agent Driven Polymer Discovery environment"""'}), "(prog='python scripts/main.py', description=\n 'scripts for Agent Driven Polymer Discovery environment')\n", (5964, 6070), False, 'import argparse\n'), ((76, 91), 'tensorflow.get_logger', 'tf.get_logger', ([], {}), '()\n', (89, 91), True, 'import tensorflow as tf\n'), ((3189, 3221), 'numpy.mean', 'np.mean', (['log_targets_per_step[i]'], {}), '(log_targets_per_step[i])\n', (3196, 3221), True, 'import numpy as np\n'), ((4427, 4591), 'gym.make', 'gym.make', ([], {'id': '"""md_envs:direct-eADPD-v1"""', 'config_files': 'config_files', 'max_steps': 'max_steps', 'rewards': "{'target': 1, 'valid': -0.01, 'revisit': -0.5, 'no-go': -1}"}), "(id='md_envs:direct-eADPD-v1', config_files=config_files, max_steps\n =max_steps, rewards={'target': 1, 'valid': -0.01, 'revisit': -0.5,\n 'no-go': -1})\n", (4435, 4591), False, 'import gym\n'), ((2414, 2434), 'numpy.mean', 'np.mean', (['log_rewards'], {}), '(log_rewards)\n', (2421, 2434), True, 'import numpy as np\n'), ((2483, 2511), 'numpy.mean', 'np.mean', (['log_episode_lengths'], {}), '(log_episode_lengths)\n', (2490, 2511), True, 'import numpy as np\n'), ((2566, 2586), 'numpy.mean', 'np.mean', (['log_targets'], {}), '(log_targets)\n', (2573, 2586), True, 'import numpy as np\n'), ((2718, 2739), 'numpy.mean', 'np.mean', (['first_target'], {}), '(first_target)\n', (2725, 2739), True, 'import numpy as np\n'), ((2811, 2835), 'numpy.mean', 'np.mean', (['log_target_perc'], {}), '(log_target_perc)\n', (2818, 2835), True, 'import numpy as np\n'), ((2888, 2911), 'numpy.mean', 'np.mean', (['log_valid_perc'], {}), '(log_valid_perc)\n', (2895, 2911), True, 'import numpy as np\n'), ((2966, 2991), 'numpy.mean', 'np.mean', (['log_revisit_perc'], {}), '(log_revisit_perc)\n', (2973, 2991), True, 'import numpy as np\n'), ((3044, 3069), 'numpy.mean', 'np.mean', (['log_invalid_perc'], {}), '(log_invalid_perc)\n', (3051, 3069), True, 'import numpy as np\n'), ((4778, 4820), 'stable_baselines.common.vec_env.DummyVecEnv', 'DummyVecEnv', (['[(lambda : _) for _ in [env]]'], {}), '([(lambda : _) for _ in [env]])\n', (4789, 4820), False, 'from stable_baselines.common.vec_env import DummyVecEnv\n'), ((4869, 5054), 'gym.make', 'gym.make', ([], {'id': '"""md_envs:logical-eADPD-v1"""', 'config_files': 'config_files', 'max_steps': 'max_steps', 'regressor_type': 'None', 'rewards': "{'target': 1, 'valid': -0.01, 'revisit': -0.5, 'no-go': -1}"}), "(id='md_envs:logical-eADPD-v1', config_files=config_files,\n max_steps=max_steps, regressor_type=None, rewards={'target': 1, 'valid':\n -0.01, 'revisit': -0.5, 'no-go': -1})\n", (4877, 5054), False, 'import gym\n'), ((8967, 8982), 'stable_baselines.DQN.load', 'DQN.load', (['model'], {}), '(model)\n', (8975, 8982), False, 'from stable_baselines import DQN, PPO2\n'), ((9017, 9033), 'stable_baselines.PPO2.load', 'PPO2.load', (['model'], {}), '(model)\n', (9026, 9033), False, 'from stable_baselines import DQN, PPO2\n'), ((5269, 5311), 'stable_baselines.common.vec_env.DummyVecEnv', 'DummyVecEnv', (['[(lambda : _) for _ in [env]]'], {}), '([(lambda : _) for _ in [env]])\n', (5280, 5311), False, 'from stable_baselines.common.vec_env import DummyVecEnv\n'), ((5375, 5540), 'gym.make', 'gym.make', ([], {'id': '"""md_envs:logical-eADPD-v1"""', 'config_files': 'config_files', 'max_steps': 'max_steps', 'rewards': "{'target': 1, 'valid': -0.01, 'revisit': -0.5, 'no-go': -1}"}), "(id='md_envs:logical-eADPD-v1', config_files=config_files,\n max_steps=max_steps, rewards={'target': 1, 'valid': -0.01, 'revisit': -\n 0.5, 'no-go': -1})\n", (5383, 5540), False, 'import gym\n'), ((5731, 5773), 'stable_baselines.common.vec_env.DummyVecEnv', 'DummyVecEnv', (['[(lambda : _) for _ in [env]]'], {}), '([(lambda : _) for _ in [env]])\n', (5742, 5773), False, 'from stable_baselines.common.vec_env import DummyVecEnv\n')] |
from scipy.io import loadmat
import numpy as np
import os
from torch.utils.data import Dataset
import matplotlib.pyplot as plt
plt.switch_backend('TKAgg')
from wfdb import rdann
"""Expands the sleep stage and apnea/hypopnea annotation files from Physionet into numpy memmaps with one annotation per sample"""
########################################################################################################################
# SET THESE BASED ON YOUR ENVIRONMENT
arousalDataPath = 'path/to/arousal/.mat/annotations'
dataPath = 'path/to/WFDB/annotation/files/from/physionet'
# Paths to save the expanded annotations
sleepStageFilePath = 'path/to/save/sleepStage/annotations/'
sleepWakeFilePath = 'path/to/save/sleeWake/annotations/'
apneaHypopneaFilePath = 'path/to/save/apneaHypopnea/annotations/'
obstructiveApneaHypopneaFilePath = 'path/to/save/obstructiveApneaHypopnea/annotations/'
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Load records list and define data extraction mode
recordList = filter(lambda x: os.path.isdir(dataPath + x), os.listdir(dataPath))
# apneaHypopneaMode = 'apneaHypopnea' was used for the final submitted models, 'obstructiveApneaHypopnea' was investigated but not used in the final work
apneaHypopneaMode = 'apneaHypopnea' # apneaHypopnea is referred to all types of Apnea and Hypopnea
#apneaHypopneaMode = 'obstructiveApneaHypopnea' # obstructiveApneaHypopnea is referred only obstructive Apnea and Hypopnea
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Convenience function to load annotations
def loadAnnotations(recordName):
arousalAnnotations = loadmat(arousalDataPath + recordName + '-arousal.mat')['data']['arousals'][0][0]
arousalAnnotations = np.squeeze(arousalAnnotations.astype(np.int32))
sleepZoneAnnotations = rdann(dataPath + recordName + '/' + recordName, 'arousal')
return arousalAnnotations, sleepZoneAnnotations
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
class classificationDataset(Dataset):
def __init__(self):
super(classificationDataset, self).__init__()
def __len__(self):
return 994
def __getitem__(self, item):
ind = recordList[item]
ind = str(ind)
arousalAnnotations, sleepZoneAnnotations = loadAnnotations(ind)
numSamples = len(sleepZoneAnnotations.sample)
apneaHypopneaAnn = np.zeros(shape=(len(arousalAnnotations)))
sleepWakeAnnotation = np.zeros(shape=(len(arousalAnnotations)))
sleepStageAnnotation = np.zeros(shape=(len(arousalAnnotations)))
startObsApnea = np.zeros(shape=numSamples)
endObsApnea = np.zeros(shape=numSamples)
startCntApnea = np.zeros(shape=numSamples)
endCntApnea = np.zeros(shape=numSamples)
startMixApnea = np.zeros(shape=numSamples)
endMixApnea = np.zeros(shape=numSamples)
startHypopnea = np.zeros(shape=numSamples)
endHypopnea = np.zeros(shape=numSamples)
startApnea = np.zeros(shape=numSamples)
endApnea = np.zeros(shape=numSamples)
counter = 0
for dataPoints in range(0, numSamples, 1):
# Extracting Hypopnea annotations
if sleepZoneAnnotations.aux_note[dataPoints] == '(resp_hypopnea':
startHypopnea[counter] = sleepZoneAnnotations.sample[dataPoints]
elif sleepZoneAnnotations.aux_note[dataPoints] == 'resp_hypopnea)':
endHypopnea[counter] = sleepZoneAnnotations.sample[dataPoints]
# Extracting mode-based Apnea annotations
if apneaHypopneaMode == 'apneaHypopnea':
if (sleepZoneAnnotations.aux_note[dataPoints] == '(resp_obstructiveapnea') or (sleepZoneAnnotations.aux_note[dataPoints] == '(resp_centralapnea')\
or (sleepZoneAnnotations.aux_note[dataPoints] == '(resp_mixedapnea'):
startApnea[counter] = sleepZoneAnnotations.sample[dataPoints]
elif (sleepZoneAnnotations.aux_note[dataPoints] == 'resp_obstructiveapnea)') or (sleepZoneAnnotations.aux_note[dataPoints] == 'resp_centralapnea)')\
or (sleepZoneAnnotations.aux_note[dataPoints] == 'resp_mixedapnea)'):
endApnea[counter] = sleepZoneAnnotations.sample[dataPoints]
elif apneaHypopneaMode == 'obstructiveApneaHypopnea':
if sleepZoneAnnotations.aux_note[dataPoints] == '(resp_obstructiveapnea':
startObsApnea[counter] = sleepZoneAnnotations.sample[dataPoints]
elif sleepZoneAnnotations.aux_note[dataPoints] == 'resp_obstructiveapnea)':
endObsApnea[counter] = sleepZoneAnnotations.sample[dataPoints]
elif sleepZoneAnnotations.aux_note[dataPoints] == '(resp_centralapnea':
startCntApnea[counter] = sleepZoneAnnotations.sample[dataPoints]
elif sleepZoneAnnotations.aux_note[dataPoints] == 'resp_centralapnea)':
endCntApnea[counter] = sleepZoneAnnotations.sample[dataPoints]
elif sleepZoneAnnotations.aux_note[dataPoints] == '(resp_mixedapnea':
startMixApnea[counter] = sleepZoneAnnotations.sample[dataPoints]
elif sleepZoneAnnotations.aux_note[dataPoints] == 'resp_mixedapnea)':
endMixApnea[counter] = sleepZoneAnnotations.sample[dataPoints]
# Extracting sleep stage annotation
elif sleepZoneAnnotations.aux_note[dataPoints] == 'W': # Wake-up stage
W = sleepZoneAnnotations.sample[dataPoints]
sleepStageAnnotation[W] = 1
sleepWakeAnnotation[W] = 2
elif sleepZoneAnnotations.aux_note[dataPoints] == 'R': # REM stage
R = sleepZoneAnnotations.sample[dataPoints]
sleepStageAnnotation[R] = 2
sleepWakeAnnotation[R] = 1
elif sleepZoneAnnotations.aux_note[dataPoints] == 'N1': # NREM stage 1
N1 = sleepZoneAnnotations.sample[dataPoints]
sleepStageAnnotation[N1] = 3
sleepWakeAnnotation[N1] = 1
elif sleepZoneAnnotations.aux_note[dataPoints] == 'N2': # NREM stage 2
N2 = sleepZoneAnnotations.sample[dataPoints]
sleepStageAnnotation[N2] = 4
sleepWakeAnnotation[N2] = 1
elif sleepZoneAnnotations.aux_note[dataPoints] == 'N3': # NREM stage 3
N3 = sleepZoneAnnotations.sample[dataPoints]
sleepStageAnnotation[N3] = 5
sleepWakeAnnotation[N3] = 1
counter += 1
# Preparing model-based Apnea and Hypopnea annotation array
if apneaHypopneaMode == 'apneaHypopnea':
startApnea = startApnea[startApnea > 0]
endApnea = endApnea[endApnea > 0]
startApnea = np.squeeze(startApnea.astype(np.int32))
endApnea = np.squeeze(endApnea.astype(np.int32))
if np.size(startApnea) > 1:
for numApnea in range(len(startApnea)):
apneaHypopneaAnn[startApnea[numApnea]:endApnea[numApnea]] = 1 # Apnea region
elif np.size(startApnea) == 1:
apneaHypopneaAnn[startApnea:endApnea] = 1
elif apneaHypopneaMode == 'obstructiveApneaHypopnea':
startObsApnea = startObsApnea[startObsApnea > 0]
endObsApnea = endObsApnea[endObsApnea > 0]
startCntApnea = startCntApnea[startCntApnea > 0]
endCntApnea = endCntApnea[endCntApnea > 0]
startMixApnea = startMixApnea[startMixApnea > 0]
endMixApnea = endMixApnea[endMixApnea > 0]
startObsApnea = np.squeeze(startObsApnea.astype(np.int32))
endObsApnea = np.squeeze(endObsApnea.astype(np.int32))
startCntApnea = np.squeeze(startCntApnea.astype(np.int32))
endCntApnea = np.squeeze(endCntApnea.astype(np.int32))
startMixApnea = np.squeeze(startMixApnea.astype(np.int32))
endMixApnea = np.squeeze(endMixApnea.astype(np.int32))
if np.size(startObsApnea) > 1:
for numApnea in range(len(startObsApnea)):
apneaHypopneaAnn[startObsApnea[numApnea]:endObsApnea[numApnea]] = 1 # Obstructive Apnea region
elif np.size(startObsApnea) == 1:
apneaHypopneaAnn[startObsApnea:endObsApnea] = 1
if np.size(startCntApnea) > 1:
for numApnea in range(len(startCntApnea)):
apneaHypopneaAnn[startCntApnea[numApnea]:endCntApnea[numApnea]] = 3 # Central Apnea region
elif np.size(startCntApnea) == 1:
apneaHypopneaAnn[startCntApnea:endCntApnea] = 3
if np.size(startMixApnea) > 1:
for numApnea in range(len(startMixApnea)):
apneaHypopneaAnn[startMixApnea[numApnea]:endMixApnea[numApnea]] = 4 # Mixed Apnea region
elif np.size(startMixApnea) == 1:
apneaHypopneaAnn[startMixApnea:endMixApnea] = 4
startHypopnea = startHypopnea[startHypopnea > 0]
endHypopnea = endHypopnea[endHypopnea > 0]
startHypopnea = np.squeeze(startHypopnea.astype(np.int32))
endHypopnea = np.squeeze(endHypopnea.astype(np.int32))
if np.size(startHypopnea) > 1:
for numHypopnea in range(len(startHypopnea)):
apneaHypopneaAnn[startHypopnea[numHypopnea]:endHypopnea[numHypopnea]] = 2 # Hypopnea region
elif np.size(startHypopnea) == 1:
apneaHypopneaAnn[startHypopnea:endHypopnea] = 2
# Preparing sleep-stage and sleep-wake mode annotation arrays
for index in range(len(sleepWakeAnnotation)):
if not sleepWakeAnnotation[index]:
sleepWakeAnnotation[index] = sleepWakeAnnotation[index-1]
for index in range(len(sleepStageAnnotation)):
if not sleepStageAnnotation[index]:
sleepStageAnnotation[index] = sleepStageAnnotation[index - 1]
# Usually the initial sleep stage is not labelled, this interval is considered an undefined stage
sleepWakeAnnotation[sleepWakeAnnotation == 0] = -1 # undefined
sleepWakeAnnotation[sleepWakeAnnotation == 2] = 0 # wake
sleepStageAnnotation[sleepStageAnnotation == 0] = 6
return apneaHypopneaAnn, sleepWakeAnnotation, sleepStageAnnotation
# %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
ds = classificationDataset()
# Loop over each annotation file and save its expanded sleep/wake, sleep stage and apnea/hypopnea annotations
for n in range(len(ds)):
apneaHypopneaAnn, sleepWakeAnnotation, sleepStageAnnotation = ds[n]
if apneaHypopneaMode == 'obstructiveApneaHypopnea':
fp = np.memmap(obstructiveApneaHypopneaFilePath + 'obstructiveApneaHypopneaAnnotation_' + str(recordList[n]) + '.dat', dtype='int32', mode='w+',
shape=apneaHypopneaAnn.shape)
fp[:] = apneaHypopneaAnn[:]
del fp
elif apneaHypopneaMode == 'apneaHypopnea':
fp = np.memmap(apneaHypopneaFilePath + 'apneaHypopneaAnnotation_' + str(recordList[n]) + '.dat', dtype='int32', mode='w+',
shape=apneaHypopneaAnn.shape)
fp[:] = apneaHypopneaAnn[:]
del fp
fp = np.memmap(sleepWakeFilePath + 'sleepWakeAnnotation_' + str(recordList[n]) + '.dat', dtype='int32', mode='w+',
shape=sleepWakeAnnotation.shape)
fp[:] = sleepWakeAnnotation[:]
del fp
fp = np.memmap(sleepStageFilePath + 'sleepStageAnnotation_' + str(recordList[n]) + '.dat', dtype='int32', mode='w+',
shape=sleepStageAnnotation.shape)
fp[:] = sleepStageAnnotation[:]
del fp
print('Annotations are extracted for record ' + str(n))
| [
"matplotlib.pyplot.switch_backend",
"numpy.size",
"wfdb.rdann",
"os.path.isdir",
"scipy.io.loadmat",
"numpy.zeros",
"os.listdir"
] | [((128, 155), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['"""TKAgg"""'], {}), "('TKAgg')\n", (146, 155), True, 'import matplotlib.pyplot as plt\n'), ((1143, 1163), 'os.listdir', 'os.listdir', (['dataPath'], {}), '(dataPath)\n', (1153, 1163), False, 'import os\n'), ((1963, 2021), 'wfdb.rdann', 'rdann', (["(dataPath + recordName + '/' + recordName)", '"""arousal"""'], {}), "(dataPath + recordName + '/' + recordName, 'arousal')\n", (1968, 2021), False, 'from wfdb import rdann\n'), ((1114, 1141), 'os.path.isdir', 'os.path.isdir', (['(dataPath + x)'], {}), '(dataPath + x)\n', (1127, 1141), False, 'import os\n'), ((2828, 2854), 'numpy.zeros', 'np.zeros', ([], {'shape': 'numSamples'}), '(shape=numSamples)\n', (2836, 2854), True, 'import numpy as np\n'), ((2877, 2903), 'numpy.zeros', 'np.zeros', ([], {'shape': 'numSamples'}), '(shape=numSamples)\n', (2885, 2903), True, 'import numpy as np\n'), ((2928, 2954), 'numpy.zeros', 'np.zeros', ([], {'shape': 'numSamples'}), '(shape=numSamples)\n', (2936, 2954), True, 'import numpy as np\n'), ((2977, 3003), 'numpy.zeros', 'np.zeros', ([], {'shape': 'numSamples'}), '(shape=numSamples)\n', (2985, 3003), True, 'import numpy as np\n'), ((3028, 3054), 'numpy.zeros', 'np.zeros', ([], {'shape': 'numSamples'}), '(shape=numSamples)\n', (3036, 3054), True, 'import numpy as np\n'), ((3077, 3103), 'numpy.zeros', 'np.zeros', ([], {'shape': 'numSamples'}), '(shape=numSamples)\n', (3085, 3103), True, 'import numpy as np\n'), ((3128, 3154), 'numpy.zeros', 'np.zeros', ([], {'shape': 'numSamples'}), '(shape=numSamples)\n', (3136, 3154), True, 'import numpy as np\n'), ((3177, 3203), 'numpy.zeros', 'np.zeros', ([], {'shape': 'numSamples'}), '(shape=numSamples)\n', (3185, 3203), True, 'import numpy as np\n'), ((3225, 3251), 'numpy.zeros', 'np.zeros', ([], {'shape': 'numSamples'}), '(shape=numSamples)\n', (3233, 3251), True, 'import numpy as np\n'), ((3271, 3297), 'numpy.zeros', 'np.zeros', ([], {'shape': 'numSamples'}), '(shape=numSamples)\n', (3279, 3297), True, 'import numpy as np\n'), ((9651, 9673), 'numpy.size', 'np.size', (['startHypopnea'], {}), '(startHypopnea)\n', (9658, 9673), True, 'import numpy as np\n'), ((7314, 7333), 'numpy.size', 'np.size', (['startApnea'], {}), '(startApnea)\n', (7321, 7333), True, 'import numpy as np\n'), ((9859, 9881), 'numpy.size', 'np.size', (['startHypopnea'], {}), '(startHypopnea)\n', (9866, 9881), True, 'import numpy as np\n'), ((1781, 1835), 'scipy.io.loadmat', 'loadmat', (["(arousalDataPath + recordName + '-arousal.mat')"], {}), "(arousalDataPath + recordName + '-arousal.mat')\n", (1788, 1835), False, 'from scipy.io import loadmat\n'), ((7510, 7529), 'numpy.size', 'np.size', (['startApnea'], {}), '(startApnea)\n', (7517, 7529), True, 'import numpy as np\n'), ((8437, 8459), 'numpy.size', 'np.size', (['startObsApnea'], {}), '(startObsApnea)\n', (8444, 8459), True, 'import numpy as np\n'), ((8766, 8788), 'numpy.size', 'np.size', (['startCntApnea'], {}), '(startCntApnea)\n', (8773, 8788), True, 'import numpy as np\n'), ((9091, 9113), 'numpy.size', 'np.size', (['startMixApnea'], {}), '(startMixApnea)\n', (9098, 9113), True, 'import numpy as np\n'), ((8657, 8679), 'numpy.size', 'np.size', (['startObsApnea'], {}), '(startObsApnea)\n', (8664, 8679), True, 'import numpy as np\n'), ((8982, 9004), 'numpy.size', 'np.size', (['startCntApnea'], {}), '(startCntApnea)\n', (8989, 9004), True, 'import numpy as np\n'), ((9305, 9327), 'numpy.size', 'np.size', (['startMixApnea'], {}), '(startMixApnea)\n', (9312, 9327), True, 'import numpy as np\n')] |
import numpy as np
import cv2 as cv
from matplotlib import pyplot as plt
import glob
files = glob.glob("digits/*.jpg")
train = []
train_labels = []
for iii,f in enumerate(files):
img = cv.imread(f)
grayImage = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
ret,thresImg = cv.threshold(grayImage,100,255,cv.THRESH_BINARY)
num = np.array(thresImg)
num_ = num.reshape(-1,20*40).astype(np.float32)
#print(num_)
train.append(num_[0])
#print(thresImg)
f = f.replace("digits\\", "")
tp = f.split(" ")
#print(tp[0])
train_labels.append( int(tp[0]) )
x = np.array(train)
y = np.array(train_labels)
knn = cv.ml.KNearest_create()
#print(train[0])
knn.train(x,cv.ml.ROW_SAMPLE,y)
img3 = cv.imread('digits/7 p49.jpg')
gray3 = cv.cvtColor(img3,cv.COLOR_BGR2GRAY)
num3 = np.array(gray3);
num3_ = num3.reshape(-1,20*40).astype(np.float32)
ret, result, neighbours ,dist = knn.findNearest(num3_,k=5)
print(result) | [
"cv2.cvtColor",
"cv2.ml.KNearest_create",
"cv2.threshold",
"cv2.imread",
"numpy.array",
"glob.glob"
] | [((93, 118), 'glob.glob', 'glob.glob', (['"""digits/*.jpg"""'], {}), "('digits/*.jpg')\n", (102, 118), False, 'import glob\n'), ((585, 600), 'numpy.array', 'np.array', (['train'], {}), '(train)\n', (593, 600), True, 'import numpy as np\n'), ((605, 627), 'numpy.array', 'np.array', (['train_labels'], {}), '(train_labels)\n', (613, 627), True, 'import numpy as np\n'), ((634, 657), 'cv2.ml.KNearest_create', 'cv.ml.KNearest_create', ([], {}), '()\n', (655, 657), True, 'import cv2 as cv\n'), ((719, 748), 'cv2.imread', 'cv.imread', (['"""digits/7 p49.jpg"""'], {}), "('digits/7 p49.jpg')\n", (728, 748), True, 'import cv2 as cv\n'), ((757, 793), 'cv2.cvtColor', 'cv.cvtColor', (['img3', 'cv.COLOR_BGR2GRAY'], {}), '(img3, cv.COLOR_BGR2GRAY)\n', (768, 793), True, 'import cv2 as cv\n'), ((800, 815), 'numpy.array', 'np.array', (['gray3'], {}), '(gray3)\n', (808, 815), True, 'import numpy as np\n'), ((189, 201), 'cv2.imread', 'cv.imread', (['f'], {}), '(f)\n', (198, 201), True, 'import cv2 as cv\n'), ((218, 253), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (229, 253), True, 'import cv2 as cv\n'), ((272, 323), 'cv2.threshold', 'cv.threshold', (['grayImage', '(100)', '(255)', 'cv.THRESH_BINARY'], {}), '(grayImage, 100, 255, cv.THRESH_BINARY)\n', (284, 323), True, 'import cv2 as cv\n'), ((331, 349), 'numpy.array', 'np.array', (['thresImg'], {}), '(thresImg)\n', (339, 349), True, 'import numpy as np\n')] |
import uclasm
import numpy as np
from functools import reduce
n_iterations = 0
def validate_alldiff_solns(tmplt, world, candidates):
""" Check that there exists a solution to the alldiff problem """
# Map from tmplt index to possible candidate indices
var_to_vals = {
tmplt_idx: [
cand_idx for cand_idx in range(world.n_nodes)
if candidates[tmplt_idx, cand_idx]
]
for tmplt_idx in range(tmplt.n_nodes)
}
# if a var has only one possible val, track it then throw it out.
matched_pairs = [(var, list(vals)[0]) for var, vals in var_to_vals.items()
if len(vals) == 1] # TODO: better variable name
var_to_vals = {var: vals for var, vals in var_to_vals.items()
if len(vals) > 1}
unspec_vars = list(var_to_vals.keys())
# which vars is each val a cand for?
val_to_vars = uclasm.invert(var_to_vals)
# gather sets of vals which have the same set of possible vars.
vars_to_vals = uclasm.values_map_to_same_key(val_to_vars)
vars_to_val_counts = {vars: len(vals)
for vars, vals in vars_to_vals.items()}
# each var can belong to multiple sets of vars which key vars_to_val_counts
# so here we find out which sets of vars each var belongs to
var_to_vars_list = {
var: [vars for vars in vars_to_val_counts.keys() if var in vars]
for var in var_to_vals}
def recursive_validate(var_to_vars_list, vars_to_vals, vars_to_val_counts):
if len(var_to_vars_list) == 0:
return True
# Retrieve an arbitrary unspecified variable
var, vars_list = var_to_vars_list.popitem()
# Iterate through possible assignments of that variable
for vars in vars_list:
# How many ways are there to assign the variable in this way?
n_vals = vars_to_val_counts[vars]
if n_vals == 0:
continue
vars_to_val_counts[vars] -= 1
if recursive_validate(var_to_vars_list, vars_to_vals, vars_to_val_counts):
return True
# put the count back so we don't mess up the recursion
vars_to_val_counts[vars] += 1
# put the list back so we don't mess up the recursion
var_to_vars_list[var] = vars_list
return False
return recursive_validate(var_to_vars_list, vars_to_vals, vars_to_val_counts)
# TODO: switch to keyword arguments throughout
def validate_isomorphisms(tmplt, world, candidates, unspec_cover):
""" Validate that at least one isomorphism exists"""
global n_iterations
n_iterations += 1
if len(unspec_cover) == 0:
return validate_alldiff_solns(tmplt, world, candidates)
unspec_idx = unspec_cover[0]
unspec_cands = np.argwhere(candidates[unspec_idx]).flat
for cand_idx in unspec_cands:
# Make a copy to avoid messing up candidate sets during recursion
candidates_copy = candidates.copy()
candidates_copy[unspec_idx, :] = uclasm.one_hot(cand_idx, world.n_nodes)
# rerun filters after picking an assignment for the next unspec node
_, new_world, new_candidates = uclasm.run_filters(
tmplt, world, candidates=candidates_copy,
filters=uclasm.cheap_filters,
init_changed_cands=uclasm.one_hot(unspec_idx, tmplt.n_nodes))
# if any node has no cands due to the current assignment, skip
if not new_candidates.any(axis=1).all():
continue
if validate_isomorphisms(tmplt, new_world, new_candidates,
unspec_cover[1:]):
return True
return False
def has_isomorphism(tmplt, world, *, candidates=None, verbose=False, count_iterations=False, **kwargs):
"""
Searches for an isomorphism and returns true if one is found, else returns false
"""
global n_iterations
n_iterations = 0
if candidates is None:
tmplt, world, candidates = uclasm.run_filters(
tmplt, world, filters=uclasm.all_filters,
candidates=np.ones((tmplt.n_nodes, world.n_nodes), dtype=np.bool),
**kwargs)
# TODO: only recompute unspec_cover when necessary or not at all
# Get node cover for unspecified nodes
cand_counts = candidates.sum(axis=1)
unspec_subgraph = tmplt.subgraph(cand_counts > 1)
unspec_cover = uclasm.get_node_cover(unspec_subgraph)
unspec_cover = np.array([tmplt.node_idxs[unspec_subgraph.nodes[idx]]
for idx in unspec_cover], dtype=np.int)
# TODO: pass arguments as keywords to avoid bugs when changes are made
if validate_isomorphisms(tmplt, world, candidates, unspec_cover):
if count_iterations:
return True, n_iterations
return True
if count_iterations:
return False, n_iterations
return False
| [
"uclasm.values_map_to_same_key",
"numpy.ones",
"uclasm.invert",
"numpy.array",
"numpy.argwhere",
"uclasm.get_node_cover",
"uclasm.one_hot"
] | [((896, 922), 'uclasm.invert', 'uclasm.invert', (['var_to_vals'], {}), '(var_to_vals)\n', (909, 922), False, 'import uclasm\n'), ((1011, 1053), 'uclasm.values_map_to_same_key', 'uclasm.values_map_to_same_key', (['val_to_vars'], {}), '(val_to_vars)\n', (1040, 1053), False, 'import uclasm\n'), ((4383, 4421), 'uclasm.get_node_cover', 'uclasm.get_node_cover', (['unspec_subgraph'], {}), '(unspec_subgraph)\n', (4404, 4421), False, 'import uclasm\n'), ((4441, 4538), 'numpy.array', 'np.array', (['[tmplt.node_idxs[unspec_subgraph.nodes[idx]] for idx in unspec_cover]'], {'dtype': 'np.int'}), '([tmplt.node_idxs[unspec_subgraph.nodes[idx]] for idx in\n unspec_cover], dtype=np.int)\n', (4449, 4538), True, 'import numpy as np\n'), ((2783, 2818), 'numpy.argwhere', 'np.argwhere', (['candidates[unspec_idx]'], {}), '(candidates[unspec_idx])\n', (2794, 2818), True, 'import numpy as np\n'), ((3018, 3057), 'uclasm.one_hot', 'uclasm.one_hot', (['cand_idx', 'world.n_nodes'], {}), '(cand_idx, world.n_nodes)\n', (3032, 3057), False, 'import uclasm\n'), ((3322, 3363), 'uclasm.one_hot', 'uclasm.one_hot', (['unspec_idx', 'tmplt.n_nodes'], {}), '(unspec_idx, tmplt.n_nodes)\n', (3336, 3363), False, 'import uclasm\n'), ((4078, 4132), 'numpy.ones', 'np.ones', (['(tmplt.n_nodes, world.n_nodes)'], {'dtype': 'np.bool'}), '((tmplt.n_nodes, world.n_nodes), dtype=np.bool)\n', (4085, 4132), True, 'import numpy as np\n')] |
import numpy as np
import scipy.stats as st
import random
from generate_infection_states import generate_correlated_infections, generate_correlated_infections_fixed_household_size
from PCR_test import false_negative_rate_binary, pooled_PCR_test
def one_stage_group_testing_fixed_household_size(infections, pool_size, shuffle=False):
"""
perform one-stage hierarchical group testing on population with fixed househould size and binary infection state
INPUT:
infections: 2-d array of shape (m x k), the binary infection states for individuals
where m is the number of households, k is the household size
pool_size: Int, number of samples in a pooled test
shuffle: whether to randomly place individuals into the pools or to assign pools based on households
"""
population_size = infections.size
assert population_size % pool_size == 0
num_pools = population_size // pool_size
if shuffle:
pools = infections.flatten()
np.random.shuffle(pools)
pools = pools.reshape((num_pools, -1))
else:
pools = infections.reshape((num_pools, -1))
num_positives_in_pools = np.sum(pools, axis=1)
results = np.array([st.bernoulli.rvs(1 - false_negative_rate_binary(n)) if n >= 1 else 0 for n in num_positives_in_pools])
num_positive_pools = np.sum(results)
num_false_negatives = np.sum(pools - results.reshape((-1,1)) == 1)
num_positives = np.sum(pools)
fnr_group_testing = num_false_negatives / num_positives
num_indiv_tests = num_positive_pools * pool_size
total_num_tests = num_pools + num_positive_pools * pool_size
efficiency = population_size / total_num_tests
num_positives_found = num_positives - num_false_negatives
num_indiv_tests_per_positive_found = num_indiv_tests / num_positives_found
return fnr_group_testing, efficiency, num_indiv_tests_per_positive_found
def one_stage_group_testing(infections, pool_size, type='binary', LoD=None, shuffle=False):
"""
perform one-stage hierarchical group testing.
INPUT:
infections: list of lists, the infection states of the population
pool_size: Int, number of samples in a pooled test
type: 'binary' if the infection state is binary,
'real' if the infection state is the individual's Log10 viral load
LoD: limit of detection of the PCR test. Should only be specified if type == 'real'
shuffle: boolean, whether to randomly place individuals into the pools or to assign pools based on households
"""
population_size = len(sum(infections,[]))
assert population_size % pool_size == 0
num_pools = population_size // pool_size
if shuffle:
pools = np.array(sum(infections,[]))
np.random.shuffle(pools)
pools = pools.reshape((num_pools, -1))
else:
pools = np.zeros((num_pools, pool_size))
capacities = [pool_size] * num_pools # remaining capacity in each pool
for household_infections in infections:
household_size = len(household_infections)
pool_idx = next((i for i, c in enumerate(capacities) if c >= household_size), -1)
if pool_idx != -1:
loc = pool_size - capacities[pool_idx]
pools[pool_idx, loc:loc + household_size] = household_infections
capacities[pool_idx] -= household_size
else:
last_pool = next(i for i, c in enumerate(np.cumsum(capacities)) if c >= household_size) # last pool for the household to be placed into
allocated = 0
for i in range(last_pool):
pools[i, pool_size - capacities[i]:] = household_infections[allocated:allocated + capacities[i]]
allocated += capacities[i]
capacities[i] = 0
to_allocate_in_last_pool = household_size - allocated
loc = pool_size - capacities[last_pool]
pools[last_pool, loc: loc + to_allocate_in_last_pool] = household_infections[allocated:]
capacities[last_pool] -= to_allocate_in_last_pool
if type == "binary":
num_positives_in_pools = np.sum(pools, axis=1)
results = np.array([st.bernoulli.rvs(1 - false_negative_rate_binary(n)) if n >= 1 else 0 for n in num_positives_in_pools])
num_false_negatives = np.sum(pools - results.reshape((-1,1)) == 1)
num_positives = np.sum(pools)
fnr_group_testing = num_false_negatives / num_positives if num_positives > 0 else 0.0
return fnr_group_testing, num_positives_in_pools
else:
assert type == "real"
convert_log10_viral_load = lambda x: int(10 ** x) if x > 0 else 0
convert_log10_viral_load = np.vectorize(convert_log10_viral_load)
viral_loads = convert_log10_viral_load(pools)
if LoD:
group_testing_results = pooled_PCR_test(viral_loads, LoD=LoD)
else:
group_testing_results = pooled_PCR_test(viral_loads)
num_positive_pools = np.sum(group_testing_results)
group_testing_results = np.repeat(group_testing_results.reshape((-1,1)), pool_size, axis=1)
if LoD:
individual_testing_results = pooled_PCR_test(viral_loads, individual=True, LoD=LoD)
else:
individual_testing_results = pooled_PCR_test(viral_loads, individual=True)
final_results = (group_testing_results * individual_testing_results).astype(int)
expected_outcomes = pools > 0
num_false_negatives = np.sum(expected_outcomes - final_results == 1)
num_positives = np.sum(expected_outcomes)
fnr_group_testing = num_false_negatives / num_positives if num_positives > 0 else 0.0
total_num_tests = num_pools + num_positive_pools * pool_size
num_positives_in_pools = np.sum(pools > 0, axis=1)
efficiency = population_size / total_num_tests
return fnr_group_testing, efficiency, num_positives_in_pools
def main():
print("testing one-stage group testing for fixed household size...")
infections = generate_correlated_infections_fixed_household_size(10000, 4, 0.1)
fnr_indep, eff_indep = one_stage_group_testing_fixed_household_size(infections, 20, shuffle=True)[:2]
fnr_corr, eff_corr = one_stage_group_testing_fixed_household_size(infections, 20, shuffle=False)[:2]
print('indep fnr = {}, corr fnr = {}, indep eff = {}, corr eff = {}'.format(fnr_indep, fnr_corr, eff_indep, eff_corr))
# print("testing one-stage group testing for US household distribution with VL data...")
# infections = generate_correlated_infections(12000, 0.01, type='real', SAR=0.188)
# fnr_indep, num_tests_indep = one_stage_group_testing(infections, pool_size=6, LoD=174, type="real", shuffle=True)[:2]
# fnr_correlated, num_tests_correlated = one_stage_group_testing(infections, pool_size=6, LoD=174, type="real", shuffle=False)[:2]
# print('independent fnr = {}, correlated fnr = {}, num tests indep = {}, num tests corr = {}'.format(fnr_indep, fnr_correlated, num_tests_indep, num_tests_correlated))
# return
if __name__ == '__main__':
main()
| [
"generate_infection_states.generate_correlated_infections_fixed_household_size",
"PCR_test.false_negative_rate_binary",
"numpy.sum",
"numpy.vectorize",
"PCR_test.pooled_PCR_test",
"numpy.zeros",
"numpy.cumsum",
"numpy.random.shuffle"
] | [((1147, 1168), 'numpy.sum', 'np.sum', (['pools'], {'axis': '(1)'}), '(pools, axis=1)\n', (1153, 1168), True, 'import numpy as np\n'), ((1321, 1336), 'numpy.sum', 'np.sum', (['results'], {}), '(results)\n', (1327, 1336), True, 'import numpy as np\n'), ((1428, 1441), 'numpy.sum', 'np.sum', (['pools'], {}), '(pools)\n', (1434, 1441), True, 'import numpy as np\n'), ((6065, 6131), 'generate_infection_states.generate_correlated_infections_fixed_household_size', 'generate_correlated_infections_fixed_household_size', (['(10000)', '(4)', '(0.1)'], {}), '(10000, 4, 0.1)\n', (6116, 6131), False, 'from generate_infection_states import generate_correlated_infections, generate_correlated_infections_fixed_household_size\n'), ((983, 1007), 'numpy.random.shuffle', 'np.random.shuffle', (['pools'], {}), '(pools)\n', (1000, 1007), True, 'import numpy as np\n'), ((2724, 2748), 'numpy.random.shuffle', 'np.random.shuffle', (['pools'], {}), '(pools)\n', (2741, 2748), True, 'import numpy as np\n'), ((2822, 2854), 'numpy.zeros', 'np.zeros', (['(num_pools, pool_size)'], {}), '((num_pools, pool_size))\n', (2830, 2854), True, 'import numpy as np\n'), ((4155, 4176), 'numpy.sum', 'np.sum', (['pools'], {'axis': '(1)'}), '(pools, axis=1)\n', (4161, 4176), True, 'import numpy as np\n'), ((4407, 4420), 'numpy.sum', 'np.sum', (['pools'], {}), '(pools)\n', (4413, 4420), True, 'import numpy as np\n'), ((4722, 4760), 'numpy.vectorize', 'np.vectorize', (['convert_log10_viral_load'], {}), '(convert_log10_viral_load)\n', (4734, 4760), True, 'import numpy as np\n'), ((5015, 5044), 'numpy.sum', 'np.sum', (['group_testing_results'], {}), '(group_testing_results)\n', (5021, 5044), True, 'import numpy as np\n'), ((5518, 5564), 'numpy.sum', 'np.sum', (['(expected_outcomes - final_results == 1)'], {}), '(expected_outcomes - final_results == 1)\n', (5524, 5564), True, 'import numpy as np\n'), ((5589, 5614), 'numpy.sum', 'np.sum', (['expected_outcomes'], {}), '(expected_outcomes)\n', (5595, 5614), True, 'import numpy as np\n'), ((5811, 5836), 'numpy.sum', 'np.sum', (['(pools > 0)'], {'axis': '(1)'}), '(pools > 0, axis=1)\n', (5817, 5836), True, 'import numpy as np\n'), ((4869, 4906), 'PCR_test.pooled_PCR_test', 'pooled_PCR_test', (['viral_loads'], {'LoD': 'LoD'}), '(viral_loads, LoD=LoD)\n', (4884, 4906), False, 'from PCR_test import false_negative_rate_binary, pooled_PCR_test\n'), ((4957, 4985), 'PCR_test.pooled_PCR_test', 'pooled_PCR_test', (['viral_loads'], {}), '(viral_loads)\n', (4972, 4985), False, 'from PCR_test import false_negative_rate_binary, pooled_PCR_test\n'), ((5203, 5257), 'PCR_test.pooled_PCR_test', 'pooled_PCR_test', (['viral_loads'], {'individual': '(True)', 'LoD': 'LoD'}), '(viral_loads, individual=True, LoD=LoD)\n', (5218, 5257), False, 'from PCR_test import false_negative_rate_binary, pooled_PCR_test\n'), ((5313, 5358), 'PCR_test.pooled_PCR_test', 'pooled_PCR_test', (['viral_loads'], {'individual': '(True)'}), '(viral_loads, individual=True)\n', (5328, 5358), False, 'from PCR_test import false_negative_rate_binary, pooled_PCR_test\n'), ((1214, 1243), 'PCR_test.false_negative_rate_binary', 'false_negative_rate_binary', (['n'], {}), '(n)\n', (1240, 1243), False, 'from PCR_test import false_negative_rate_binary, pooled_PCR_test\n'), ((4226, 4255), 'PCR_test.false_negative_rate_binary', 'false_negative_rate_binary', (['n'], {}), '(n)\n', (4252, 4255), False, 'from PCR_test import false_negative_rate_binary, pooled_PCR_test\n'), ((3428, 3449), 'numpy.cumsum', 'np.cumsum', (['capacities'], {}), '(capacities)\n', (3437, 3449), True, 'import numpy as np\n')] |
import math
import astropy as ast
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
import warnings
from iminuit import Minuit
from probfit import UnbinnedLH, gaussian, doublegaussian
import numba as nb
import pandas as pd
from .models import *
class PeakFitting():
def __init__(self,binned,model,peak='both'):
#Define and check model
self.model=model
self.shift=0
self.peak_tofit=peak
self.check_model()
self.binned=binned
##############################################
#EXECUTION
#############################################
def run(self,pulsar_phases):
#Estimate initial values
self.est_initial_values(pulsar_phases)
#Do the fitting
if self.binned==True:
self.fit_Binned(pulsar_phases.histogram)
else:
self.fit_ULmodel(pulsar_phases)
def check_model(self):
model_list=get_model_list()
if self.model not in model_list:
raise ValueError('The model is not in the available model list')
if self.peak_tofit=='both' and self.model=='gaussian':
raise ValueError('Gaussian model can only fit one peak')
if self.peak_tofit=='P1' and self.model=='dgaussian':
raise ValueError('Dgaussian model needs two peaks')
if self.peak_tofit=='P2' and self.model=='dgaussian':
raise ValueError('Gaussian model needs two peaks')
def est_initial_values(self,pulsar_phases):
self.check_model()
self.init=[]
intensity=[]
#Set different initial values for different models
for name in ['P1','P2']:
P_info=pulsar_phases.regions.dic[name]
if P_info is not None:
if name==self.peak_tofit or self.peak_tofit=='both':
intensity.append(P_info.Nex/P_info.noff)
if len(P_info.limits)>2:
self.init.extend([(P_info.limits[0]+1+P_info.limits[3])/2,P_info.deltaP/2])
self.shift=2*P_info.deltaP
else:
self.init.extend([(P_info.limits[0]+P_info.limits[1])/2,P_info.deltaP/2])
if self.model=='asym_dgaussian':
self.init.append(P_info.deltaP/2)
else:
if self.model!='gaussian':
raise ValueError('Double Gaussian model needs two peaks')
self.init.append(1)
self.init.extend(intensity)
#Unbinned fitting
def fit_ULmodel(self,pulsar_phases):
self.check_model()
#Shift the phases if one of the peak is near the interval edge
shift_phases=pulsar_phases.phases
if self.shift!=0:
for i in range(0,len(shift_phases)):
if shift_phases[i]<self.shift:
shift_phases[i]=shift_phases[i]+1
if self.model=='dgaussian':
unbinned_likelihood = UnbinnedLH(double_gaussian, np.array(shift_phases))
minuit = Minuit(unbinned_likelihood,mu=self.init[0], sigma=self.init[1],mu_2=self.init[2],sigma_2=self.init[3],A=self.init[4],B=self.init[5],C=self.init[6])
self.parnames=['mu', 'sigma','mu_2','sigma_2','A','B','C']
elif self.model=='asym_dgaussian':
unbinned_likelihood = UnbinnedLH(assymetric_double_gaussian, np.array(shift_phases))
minuit = Minuit(unbinned_likelihood, mu=self.init[0], sigma1=self.init[1],sigma2=self.init[2],mu_2=self.init[3],sigma1_2=self.init[4],sigma2_2=self.init[5],A=self.init[6],B=self.init[7],C=self.init[8])
self.parnames=['mu', 'sigma1','sigma2','mu_2','sigma1_2','sigma2_2','A','B','C']
elif self.model=='lorentzian':
unbinned_likelihood = UnbinnedLH(double_lorentz, np.array(shift_phases))
minuit = Minuit(unbinned_likelihood, mu_1=self.init[0], gamma_1=self.init[1],mu_2=self.init[2],gamma_2=self.init[3],A=self.init[4],B=self.init[5],C=self.init[6])
self.parnames=['mu_1', 'gamma_1','mu_2','gamma_2','A','B','C']
elif self.model=='gaussian':
unbinned_likelihood = UnbinnedLH(gaussian, np.array(shift_phases))
minuit = Minuit(unbinned_likelihood, mu=self.init[0], sigma=self.init[1],A=self.init[2],B=self.init[3])
self.parnames=['mu', 'sigma','A','B']
minuit.errordef=0.5
minuit.migrad()
#Store results as minuit object
self.minuit=minuit
self.unbinned_lk=unbinned_likelihood
#Store the result of params and errors
self.params=[]
self.errors=[]
for name in self.parnames:
self.params.append(self.minuit.values[name])
self.errors.append(self.minuit.errors[name])
self.create_result_df()
#Binned fitting
def fit_Binned(self,histogram):
self.check_model()
#Shift the phases if one of the peak is near the interval edge
bin_centres=(histogram.lc[1][1:]+histogram.lc[1][:-1])/2
if self.shift!=0:
for i in range(0,len(bin_centres)):
if bin_centres[i]<self.shift:
bin_centres[i]=bin_centres[i]+1
if self.model=='dgaussian':
self.params,pcov_l=curve_fit(double_gaussian,bin_centres,histogram.lc[0],p0=self.init)
self.parnames=['mu', 'sigma','mu_2','sigma_2','A','B','C']
elif self.model=='asym_dgaussian':
assymetric_gaussian_pdf_vec=np.vectorize(assymetric_double_gaussian)
self.params,pcov_l=curve_fit(assymetric_gaussian_pdf_vec,bin_centres,histogram.lc[0],p0=self.init)
self.parnames=['mu', 'sigma1','sigma2','mu_2','sigma1_2','sigma2_2','A','B','C']
elif self.model=='lorentzian':
self.params,pcov_l=curve_fit(double_lorentz,bin_centres,histogram.lc[0],p0=self.init)
self.parnames=['mu_1', 'gamma_1','mu_2','gamma_2','A','B','C']
elif self.model=='gaussian':
self.params,pcov_l=curve_fit(gaussian,bin_centres,histogram.lc[0],p0=self.init)
self.parnames=['mu', 'sigma','A','B']
#Store the result of params and errors
self.errors=np.sqrt(np.diag(pcov_l))
self.create_result_df()
##############################################
#RESULTS
#############################################
def check_fit_result(self):
try:
self.params
except:
return(False)
return(True)
def create_result_df(self):
d = {'Name': self.parnames, 'Value': self.params,'Error':self.errors}
self.df_result=pd.DataFrame(data=d)
def show_result(self):
try:
return(self.df_result)
except:
print('No fit has been done so far')
| [
"pandas.DataFrame",
"numpy.vectorize",
"scipy.optimize.curve_fit",
"numpy.array",
"iminuit.Minuit",
"numpy.diag"
] | [((7748, 7768), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'd'}), '(data=d)\n', (7760, 7768), True, 'import pandas as pd\n'), ((3665, 3828), 'iminuit.Minuit', 'Minuit', (['unbinned_likelihood'], {'mu': 'self.init[0]', 'sigma': 'self.init[1]', 'mu_2': 'self.init[2]', 'sigma_2': 'self.init[3]', 'A': 'self.init[4]', 'B': 'self.init[5]', 'C': 'self.init[6]'}), '(unbinned_likelihood, mu=self.init[0], sigma=self.init[1], mu_2=self.\n init[2], sigma_2=self.init[3], A=self.init[4], B=self.init[5], C=self.\n init[6])\n', (3671, 3828), False, 'from iminuit import Minuit\n'), ((6145, 6215), 'scipy.optimize.curve_fit', 'curve_fit', (['double_gaussian', 'bin_centres', 'histogram.lc[0]'], {'p0': 'self.init'}), '(double_gaussian, bin_centres, histogram.lc[0], p0=self.init)\n', (6154, 6215), False, 'from scipy.optimize import curve_fit\n'), ((7201, 7216), 'numpy.diag', 'np.diag', (['pcov_l'], {}), '(pcov_l)\n', (7208, 7216), True, 'import numpy as np\n'), ((3616, 3638), 'numpy.array', 'np.array', (['shift_phases'], {}), '(shift_phases)\n', (3624, 3638), True, 'import numpy as np\n'), ((4078, 4287), 'iminuit.Minuit', 'Minuit', (['unbinned_likelihood'], {'mu': 'self.init[0]', 'sigma1': 'self.init[1]', 'sigma2': 'self.init[2]', 'mu_2': 'self.init[3]', 'sigma1_2': 'self.init[4]', 'sigma2_2': 'self.init[5]', 'A': 'self.init[6]', 'B': 'self.init[7]', 'C': 'self.init[8]'}), '(unbinned_likelihood, mu=self.init[0], sigma1=self.init[1], sigma2=\n self.init[2], mu_2=self.init[3], sigma1_2=self.init[4], sigma2_2=self.\n init[5], A=self.init[6], B=self.init[7], C=self.init[8])\n', (4084, 4287), False, 'from iminuit import Minuit\n'), ((6416, 6456), 'numpy.vectorize', 'np.vectorize', (['assymetric_double_gaussian'], {}), '(assymetric_double_gaussian)\n', (6428, 6456), True, 'import numpy as np\n'), ((6492, 6579), 'scipy.optimize.curve_fit', 'curve_fit', (['assymetric_gaussian_pdf_vec', 'bin_centres', 'histogram.lc[0]'], {'p0': 'self.init'}), '(assymetric_gaussian_pdf_vec, bin_centres, histogram.lc[0], p0=\n self.init)\n', (6501, 6579), False, 'from scipy.optimize import curve_fit\n'), ((4029, 4051), 'numpy.array', 'np.array', (['shift_phases'], {}), '(shift_phases)\n', (4037, 4051), True, 'import numpy as np\n'), ((4543, 4710), 'iminuit.Minuit', 'Minuit', (['unbinned_likelihood'], {'mu_1': 'self.init[0]', 'gamma_1': 'self.init[1]', 'mu_2': 'self.init[2]', 'gamma_2': 'self.init[3]', 'A': 'self.init[4]', 'B': 'self.init[5]', 'C': 'self.init[6]'}), '(unbinned_likelihood, mu_1=self.init[0], gamma_1=self.init[1], mu_2=\n self.init[2], gamma_2=self.init[3], A=self.init[4], B=self.init[5], C=\n self.init[6])\n', (4549, 4710), False, 'from iminuit import Minuit\n'), ((6749, 6818), 'scipy.optimize.curve_fit', 'curve_fit', (['double_lorentz', 'bin_centres', 'histogram.lc[0]'], {'p0': 'self.init'}), '(double_lorentz, bin_centres, histogram.lc[0], p0=self.init)\n', (6758, 6818), False, 'from scipy.optimize import curve_fit\n'), ((4494, 4516), 'numpy.array', 'np.array', (['shift_phases'], {}), '(shift_phases)\n', (4502, 4516), True, 'import numpy as np\n'), ((4940, 5041), 'iminuit.Minuit', 'Minuit', (['unbinned_likelihood'], {'mu': 'self.init[0]', 'sigma': 'self.init[1]', 'A': 'self.init[2]', 'B': 'self.init[3]'}), '(unbinned_likelihood, mu=self.init[0], sigma=self.init[1], A=self.\n init[2], B=self.init[3])\n', (4946, 5041), False, 'from iminuit import Minuit\n'), ((6985, 7048), 'scipy.optimize.curve_fit', 'curve_fit', (['gaussian', 'bin_centres', 'histogram.lc[0]'], {'p0': 'self.init'}), '(gaussian, bin_centres, histogram.lc[0], p0=self.init)\n', (6994, 7048), False, 'from scipy.optimize import curve_fit\n'), ((4891, 4913), 'numpy.array', 'np.array', (['shift_phases'], {}), '(shift_phases)\n', (4899, 4913), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from os.path import join, dirname
sys.path.insert(0, join(dirname(__file__), '..'))
sys.path.insert(0, join(dirname(__file__), '../../'))
import os
import random
import argparse
import numpy as np
from PIL import Image, ImageDraw
from datetime import datetime
import cv2
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.max_open_warning': 0})
import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from model import RNN_MDN
from utils import write_params, fig2data, sample, mdn_loss
import carla_utils as cu
from robo_dataset_utils.robo_utils.kitti.torch_dataset import TrajectoryDataset_CNNFC
global_trajectory = None
global_trajectory_real = None
random.seed(datetime.now())
torch.manual_seed(999)
torch.cuda.manual_seed(999)
torch.set_num_threads(16)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser()
parser.add_argument('--test_mode', type=bool, default=False, help='test model switch')
parser.add_argument('--dataset_name', type=str, default="kitti-train-mdn-02", help='name of the dataset')
parser.add_argument('--width', type=int, default=400, help='image width')
parser.add_argument('--height', type=int, default=200, help='image height')
parser.add_argument('--scale', type=float, default=30., help='longitudinal length')
parser.add_argument('--batch_size', type=int, default=32, help='size of the batches')
parser.add_argument('--traj_steps', type=int, default=8, help='traj steps')
parser.add_argument('--weight_decay', type=float, default=5e-4, help='adam: weight_decay')
parser.add_argument('--lr', type=float, default=1e-4, help='adam: learning rate')
parser.add_argument('--gamma', type=float, default=0.01, help='xy and vxy loss trade off')
parser.add_argument('--gamma2', type=float, default=0.01, help='xy and axy loss trade off')
parser.add_argument('--img_step', type=int, default=3, help='RNN input image step')
parser.add_argument('--n_cpu', type=int, default=16, help='number of cpu threads to use during batch generation')
parser.add_argument('--checkpoint_interval', type=int, default=100, help='interval between model checkpoints')
parser.add_argument('--test_interval', type=int, default=50, help='interval between model test')
parser.add_argument('--max_dist', type=float, default=25., help='max distance')
parser.add_argument('--max_speed', type=float, default=15., help='max distance')
parser.add_argument('--max_t', type=float, default=3., help='max time')
opt = parser.parse_args()
if opt.test_mode: opt.batch_size = 1
description = 'change costmap'
log_path = 'result/log/'+opt.dataset_name+'/'
os.makedirs('result/saved_models/%s' % opt.dataset_name, exist_ok=True)
os.makedirs('result/output/%s' % opt.dataset_name, exist_ok=True)
if not opt.test_mode:
logger = SummaryWriter(log_dir=log_path)
write_params(log_path, parser, description)
model = RNN_MDN(256).to(device)
criterion = torch.nn.MSELoss().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=opt.lr, weight_decay=opt.weight_decay)
param = cu.parse_yaml_file_unsafe('robo_dataset_utils/params/param_kitti.yaml')
trajectory_dataset = TrajectoryDataset_CNNFC(param, 'train')#7
dataloader = DataLoader(trajectory_dataset, batch_size=opt.batch_size, shuffle=False, num_workers=opt.n_cpu)
eval_trajectory_dataset = TrajectoryDataset_CNNFC(param, 'eval')#2
dataloader_eval = DataLoader(eval_trajectory_dataset, batch_size=1, shuffle=False, num_workers=1)
eval_samples = iter(dataloader_eval)
def xy2uv(x, y):
y = -y
pixs_per_meter = opt.height/opt.scale
u = (opt.height-x*pixs_per_meter-1).astype(int)
v = (y*pixs_per_meter+opt.width//2-1).astype(int)
mask = np.where((u >= 0)&(u < opt.height))[0]
u = u[mask]
v = v[mask]
mask = np.where((v >= 0)&(v < opt.width))[0]
u = u[mask]
v = v[mask]
return u, v
def show_traj(step):
global global_trajectory, global_trajectory_real
max_x = 30.
max_y = 30.
max_speed = 15.0
trajectory = global_trajectory
real_trajectory = global_trajectory_real
fig = plt.figure(figsize=(7, 7))
ax1 = fig.add_subplot(111)
x = trajectory['x']
y = trajectory['y']
real_x = real_trajectory['x']
real_y = real_trajectory['y']
ax1.plot(x, y, label='trajectory', color = 'b', linewidth=5)
ax1.plot(real_x, real_y, label='real-trajectory', color = 'b', linewidth=5, linestyle='--')
ax1.set_xlabel('Forward/(m)')
ax1.set_ylabel('Sideways/(m)')
ax1.set_xlim([0., max_x])
ax1.set_ylim([-max_y, max_y])
plt.legend(loc='lower right')
real_t = max_x*global_trajectory_real['ts_list']
vx = trajectory['vx']
vy = trajectory['vy']
real_vx = real_trajectory['vx']
real_vy = real_trajectory['vy']
ax2 = ax1.twinx()
ax2.plot(real_t, vx, label='vx', color = 'tab:cyan', linewidth=2)
ax2.plot(real_t, vy, label='vy', color = 'tab:purple', linewidth=2)
ax2.plot(real_t, real_vx, label='real-vx', color = 'r', linewidth=2, linestyle='--')
ax2.plot(real_t, real_vy, label='real-vy', color = 'g', linewidth=2, linestyle='--')
ax2.set_ylabel('Velocity/(m/s)')
ax2.set_ylim([-max_speed, max_speed])
plt.legend(loc='lower left')
plt.close('all')
img = fig2data(fig)
cv2.imwrite('result/output/%s/' % opt.dataset_name+str(step)+'_curve.png', img)
def draw_traj(step):
global global_trajectory, global_trajectory_real
model.eval()
batch = next(eval_samples)
img = batch['img'][:,-1,:].clone().data.numpy().squeeze()*127+128
batch['img'] = batch['img'].to(device)
batch['xy'] = batch['xy'].to(device)
batch['vxy'] = batch['vxy'].to(device)
ts_list = batch['t'].data.cpu().numpy()[0][:,0]
pi, sigma, mu = model(batch['img'])
try:
output = sample(pi, sigma, mu)
output = output.view(1, 10, 4)
except:
model.train()
return
x = output[:,:,0]*opt.max_dist
y = output[:,:,1]*opt.max_dist
vx = output[:,:,2]*opt.max_speed
vy = output[:,:,3]*opt.max_speed
real_x = batch['xy'].data.cpu().numpy()[0][:,0]*opt.max_dist
real_y = batch['xy'].data.cpu().numpy()[0][:,1]*opt.max_dist
real_vx = batch['vxy'].data.cpu().numpy()[0][:,0]
real_vy = batch['vxy'].data.cpu().numpy()[0][:,1]
x = x.data.cpu().numpy()[0]
y = y.data.cpu().numpy()[0]
vx = vx.data.cpu().numpy()[0]
vy = vy.data.cpu().numpy()[0]
global_trajectory = {'x':x, 'y':y, 'vx':vx, 'vy':vy}
global_trajectory_real = {'x':real_x, 'y':real_y, 'vx':real_vx, 'vy':real_vy, 'ts_list':ts_list}
show_traj(step)
img = Image.fromarray(img).convert("RGB")
draw =ImageDraw.Draw(img)
real_u, real_v = xy2uv(real_x, real_y)
for i in range(len(real_u)-1):
draw.line((real_v[i], real_u[i], real_v[i+1], real_u[i+1]), 'blue')
draw.line((real_v[i]+1, real_u[i], real_v[i+1]+1, real_u[i+1]), 'blue')
draw.line((real_v[i]-1, real_u[i], real_v[i+1]-1, real_u[i+1]), 'blue')
u, v = xy2uv(x, y)
for i in range(len(u)-1):
draw.line((v[i], u[i], v[i+1], u[i+1]), 'red')
draw.line((v[i]+1, u[i], v[i+1]+1, u[i+1]), 'red')
draw.line((v[i]-1, u[i], v[i+1]-1, u[i+1]), 'red')
img.save(('result/output/%s/' % opt.dataset_name)+str(step)+'_costmap.png')
model.train()
def eval_error(total_step):
model.eval()
abs_x = []
abs_y = []
abs_vx = []
abs_vy = []
abs_v = []
ade = []
final_displacement = []
for i in range(1):
batch = next(eval_samples)
batch['img'] = batch['img'].to(device)
batch['xy'] = batch['xy'].to(device)
batch['vxy'] = batch['vxy'].to(device)
#output = model(batch['img'])
pi, sigma, mu = model(batch['img'])
try:
output = sample(pi, sigma, mu)
output = output.view(1, 10, 4)
except:
continue
output = output.view(1, 10, 4)
x = output[:,:,0]*opt.max_dist
y = output[:,:,1]*opt.max_dist
vx = output[:,:,2]*opt.max_speed
vy = output[:,:,3]*opt.max_speed
real_x = batch['xy'].data.cpu().numpy()[0][:,0]*opt.max_dist
real_y = batch['xy'].data.cpu().numpy()[0][:,1]*opt.max_dist
real_vx = batch['vxy'].data.cpu().numpy()[0][:,0]
real_vy = batch['vxy'].data.cpu().numpy()[0][:,1]
x = x.data.cpu().numpy()[0]
y = y.data.cpu().numpy()[0]
vx = vx.data.cpu().numpy()[0]
vy = vy.data.cpu().numpy()[0]
#print(x,real_x)
abs_x.append(np.mean(np.abs(x-real_x)))
abs_y.append(np.mean(np.abs(y-real_y)))
abs_vx.append(np.mean(np.abs(vx - real_vx)))
abs_vy.append(np.mean(np.abs(vy - real_vy)))
final_displacement.append(np.abs(x-real_x)[-1])
abs_v.append(np.mean(np.hypot(vx - real_vx, vy - real_vy)))
ade.append(np.mean(np.hypot(x - real_x, y - real_y)))
if len(abs_x) > 0: logger.add_scalar('eval/x', sum(abs_x)/len(abs_x), total_step)
if len(abs_y) > 0: logger.add_scalar('eval/y', sum(abs_y)/len(abs_y), total_step)
if len(abs_vx) > 0: logger.add_scalar('eval/vx', sum(abs_vx)/len(abs_vx), total_step)
if len(abs_vy) > 0: logger.add_scalar('eval/vy', sum(abs_vy)/len(abs_vy), total_step)
if len(abs_v) > 0: logger.add_scalar('eval/v', sum(abs_v)/len(abs_v), total_step)
if len(ade) > 0: logger.add_scalar('eval/ade', sum(ade)/len(ade), total_step)
if len(final_displacement) > 0: logger.add_scalar('eval/final_displacement', sum(final_displacement)/len(final_displacement), total_step)
model.train()
total_step = 0
print('Start to train ...')
"""
def gaussian_probability(sigma, mu, target):
#data = torch.randn(32, 20, 7)
data = target.unsqueeze(1)
ONEOVERSQRT2PI = 1.0 / math.sqrt(2*math.pi)
ret = ONEOVERSQRT2PI * torch.exp(-0.5 * ((data - mu) / sigma)**2) / sigma
return torch.prod(ret, 2)
def mdn_loss(pi, sigma, mu, target):
prob = pi * gaussian_probability(sigma, mu, target)
nll = -torch.log(torch.sum(prob, dim=1))
return torch.mean(nll)
def sample(pi, sigma, mu):
categorical = Categorical(pi)
pis = list(categorical.sample().data)
sample = Variable(sigma.data.new(sigma.size(0), sigma.size(2)).normal_())
for i, idx in enumerate(pis):
sample[i] = sample[i].mul(sigma[i,idx]).add(mu[i,idx])
return sample
"""
for index, batch in enumerate(dataloader):
total_step += 1
if opt.test_mode:
for j in range(500): draw_traj(j)
break
batch['img'] = batch['img'].to(device)
batch['xy'] = batch['xy'].to(device)
batch['vxy'] = batch['vxy'].to(device)/opt.max_speed
pi, sigma, mu = model(batch['img'])
model.zero_grad()
loss = mdn_loss(pi, sigma, mu, torch.cat((batch['xy'],batch['vxy']), dim=2).view(-1, 40))
if torch.isnan(loss): continue
loss.backward()
torch.nn.utils.clip_grad_value_(model.parameters(), clip_value=1)
optimizer.step()
logger.add_scalar('train/loss', loss.item(), total_step)
"""
if total_step % opt.test_interval == 0:
try:
eval_error(total_step)
draw_traj(total_step)
except:
pass
"""
if total_step % opt.checkpoint_interval == 0:
torch.save(model.state_dict(), 'result/saved_models/%s/model_%d.pth'%(opt.dataset_name, total_step)) | [
"numpy.abs",
"argparse.ArgumentParser",
"torch.cat",
"matplotlib.pyplot.figure",
"torch.set_num_threads",
"torch.isnan",
"torch.nn.MSELoss",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.close",
"os.path.dirname",
"utils.sample",
"matplotlib.pyplot.rcParams.update",
"torch.utils.tensorbo... | [((362, 413), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.max_open_warning': 0}"], {}), "({'figure.max_open_warning': 0})\n", (381, 413), True, 'import matplotlib.pyplot as plt\n'), ((801, 823), 'torch.manual_seed', 'torch.manual_seed', (['(999)'], {}), '(999)\n', (818, 823), False, 'import torch\n'), ((824, 851), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(999)'], {}), '(999)\n', (846, 851), False, 'import torch\n'), ((852, 877), 'torch.set_num_threads', 'torch.set_num_threads', (['(16)'], {}), '(16)\n', (873, 877), False, 'import torch\n'), ((961, 986), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (984, 986), False, 'import argparse\n'), ((2716, 2787), 'os.makedirs', 'os.makedirs', (["('result/saved_models/%s' % opt.dataset_name)"], {'exist_ok': '(True)'}), "('result/saved_models/%s' % opt.dataset_name, exist_ok=True)\n", (2727, 2787), False, 'import os\n'), ((2788, 2853), 'os.makedirs', 'os.makedirs', (["('result/output/%s' % opt.dataset_name)"], {'exist_ok': '(True)'}), "('result/output/%s' % opt.dataset_name, exist_ok=True)\n", (2799, 2853), False, 'import os\n'), ((3148, 3219), 'carla_utils.parse_yaml_file_unsafe', 'cu.parse_yaml_file_unsafe', (['"""robo_dataset_utils/params/param_kitti.yaml"""'], {}), "('robo_dataset_utils/params/param_kitti.yaml')\n", (3173, 3219), True, 'import carla_utils as cu\n'), ((3241, 3280), 'robo_dataset_utils.robo_utils.kitti.torch_dataset.TrajectoryDataset_CNNFC', 'TrajectoryDataset_CNNFC', (['param', '"""train"""'], {}), "(param, 'train')\n", (3264, 3280), False, 'from robo_dataset_utils.robo_utils.kitti.torch_dataset import TrajectoryDataset_CNNFC\n'), ((3296, 3395), 'torch.utils.data.DataLoader', 'DataLoader', (['trajectory_dataset'], {'batch_size': 'opt.batch_size', 'shuffle': '(False)', 'num_workers': 'opt.n_cpu'}), '(trajectory_dataset, batch_size=opt.batch_size, shuffle=False,\n num_workers=opt.n_cpu)\n', (3306, 3395), False, 'from torch.utils.data import DataLoader\n'), ((3419, 3457), 'robo_dataset_utils.robo_utils.kitti.torch_dataset.TrajectoryDataset_CNNFC', 'TrajectoryDataset_CNNFC', (['param', '"""eval"""'], {}), "(param, 'eval')\n", (3442, 3457), False, 'from robo_dataset_utils.robo_utils.kitti.torch_dataset import TrajectoryDataset_CNNFC\n'), ((3478, 3557), 'torch.utils.data.DataLoader', 'DataLoader', (['eval_trajectory_dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(1)'}), '(eval_trajectory_dataset, batch_size=1, shuffle=False, num_workers=1)\n', (3488, 3557), False, 'from torch.utils.data import DataLoader\n'), ((785, 799), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (797, 799), False, 'from datetime import datetime\n'), ((2889, 2920), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'log_path'}), '(log_dir=log_path)\n', (2902, 2920), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((2925, 2968), 'utils.write_params', 'write_params', (['log_path', 'parser', 'description'], {}), '(log_path, parser, description)\n', (2937, 2968), False, 'from utils import write_params, fig2data, sample, mdn_loss\n'), ((4170, 4196), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (4180, 4196), True, 'import matplotlib.pyplot as plt\n'), ((4644, 4673), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (4654, 4673), True, 'import matplotlib.pyplot as plt\n'), ((5287, 5315), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower left"""'}), "(loc='lower left')\n", (5297, 5315), True, 'import matplotlib.pyplot as plt\n'), ((5320, 5336), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (5329, 5336), True, 'import matplotlib.pyplot as plt\n'), ((5352, 5365), 'utils.fig2data', 'fig2data', (['fig'], {}), '(fig)\n', (5360, 5365), False, 'from utils import write_params, fig2data, sample, mdn_loss\n'), ((6777, 6796), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (6791, 6796), False, 'from PIL import Image, ImageDraw\n'), ((10977, 10994), 'torch.isnan', 'torch.isnan', (['loss'], {}), '(loss)\n', (10988, 10994), False, 'import torch\n'), ((116, 133), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (123, 133), False, 'from os.path import join, dirname\n'), ((166, 183), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (173, 183), False, 'from os.path import join, dirname\n'), ((913, 938), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (936, 938), False, 'import torch\n'), ((2982, 2994), 'model.RNN_MDN', 'RNN_MDN', (['(256)'], {}), '(256)\n', (2989, 2994), False, 'from model import RNN_MDN\n'), ((3018, 3036), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (3034, 3036), False, 'import torch\n'), ((3783, 3820), 'numpy.where', 'np.where', (['((u >= 0) & (u < opt.height))'], {}), '((u >= 0) & (u < opt.height))\n', (3791, 3820), True, 'import numpy as np\n'), ((3865, 3901), 'numpy.where', 'np.where', (['((v >= 0) & (v < opt.width))'], {}), '((v >= 0) & (v < opt.width))\n', (3873, 3901), True, 'import numpy as np\n'), ((5908, 5929), 'utils.sample', 'sample', (['pi', 'sigma', 'mu'], {}), '(pi, sigma, mu)\n', (5914, 5929), False, 'from utils import write_params, fig2data, sample, mdn_loss\n'), ((6731, 6751), 'PIL.Image.fromarray', 'Image.fromarray', (['img'], {}), '(img)\n', (6746, 6751), False, 'from PIL import Image, ImageDraw\n'), ((7934, 7955), 'utils.sample', 'sample', (['pi', 'sigma', 'mu'], {}), '(pi, sigma, mu)\n', (7940, 7955), False, 'from utils import write_params, fig2data, sample, mdn_loss\n'), ((8697, 8715), 'numpy.abs', 'np.abs', (['(x - real_x)'], {}), '(x - real_x)\n', (8703, 8715), True, 'import numpy as np\n'), ((8745, 8763), 'numpy.abs', 'np.abs', (['(y - real_y)'], {}), '(y - real_y)\n', (8751, 8763), True, 'import numpy as np\n'), ((8794, 8814), 'numpy.abs', 'np.abs', (['(vx - real_vx)'], {}), '(vx - real_vx)\n', (8800, 8814), True, 'import numpy as np\n'), ((8847, 8867), 'numpy.abs', 'np.abs', (['(vy - real_vy)'], {}), '(vy - real_vy)\n', (8853, 8867), True, 'import numpy as np\n'), ((8904, 8922), 'numpy.abs', 'np.abs', (['(x - real_x)'], {}), '(x - real_x)\n', (8910, 8922), True, 'import numpy as np\n'), ((8955, 8991), 'numpy.hypot', 'np.hypot', (['(vx - real_vx)', '(vy - real_vy)'], {}), '(vx - real_vx, vy - real_vy)\n', (8963, 8991), True, 'import numpy as np\n'), ((9021, 9053), 'numpy.hypot', 'np.hypot', (['(x - real_x)', '(y - real_y)'], {}), '(x - real_x, y - real_y)\n', (9029, 9053), True, 'import numpy as np\n'), ((10911, 10956), 'torch.cat', 'torch.cat', (["(batch['xy'], batch['vxy'])"], {'dim': '(2)'}), "((batch['xy'], batch['vxy']), dim=2)\n", (10920, 10956), False, 'import torch\n')] |
"""Generic classes and functions which aid the asteroseismology features."""
import numpy as np
import copy
from astropy import units as u
from astropy.units import Quantity
__all__ = ["SeismologyQuantity"]
class SeismologyQuantity(Quantity):
"""Holds an asteroseismic value including its unit, error, and estimation method.
Compared to a traditional AstroPy `~astropy.units.Quantity` object, this
class has the following extra attributes:
* name (e.g. 'deltanu' or 'radius');
* error (i.e. the uncertainty);
* method (e.g. specifying the asteroseismic scaling relation);
* diagnostics;
* diagnostics_plot_method.
"""
def __new__(
cls,
quantity,
name=None,
error=None,
method=None,
diagnostics=None,
diagnostics_plot_method=None,
):
# Note: Quantity is peculiar to sub-class because it inherits from numpy ndarray;
# see https://docs.astropy.org/en/stable/units/quantity.html#subclassing-quantity.
self = Quantity.__new__(cls, quantity.value)
self.__dict__ = quantity.__dict__
self.name = name
self.error = error
self.method = method
self.diagnostics = diagnostics
self.diagnostics_plot_method = diagnostics_plot_method
return self
def __repr__(self):
try:
return "{}: {} {} (method: {})".format(
self.name, "{:.2f}".format(self.value), self.unit.__str__(), self.method
)
except AttributeError: # Math operations appear to remove Seismic attributes for now
return super().__repr__()
def _repr_latex_(self):
try:
return "{}: {} {} (method: {})".format(
self.name,
"${:.2f}$".format(self.value),
self.unit._repr_latex_(),
self.method,
)
except AttributeError: # Math operations appear to remove Seismic attributes for now
return super()._repr_latex_()
def get_fwhm(periodogram, numax):
"""In a power spectrum of a solar-like oscillator, the power of the
modes of oscillation will appear in the shape of that looks
approximately Gaussian, for all basic purposes, also referred to as the
'mode envelope'. For a given numax (the central frequency of the mode
envelope), the expected Full Width Half Maximum of the envelope is known
as a function of numax for evolved Red Giant Branch stars as follows
(see Mosser et al 2010):
fwhm = 0.66 * numax^0.88 .
If the maximum frequency in the periodogram is less than 500 microhertz,
this function will default to the above equation under the assumption it
is dealing with an RGB star, which oscillate at lower frequencies.
If the maximum frequency is above 500 microhertz, the envelope is given
as a different function of numax (see Lund et al. 2017), as
fwhm = 0.25 * numax,
in which case the function assumes it is dealing with a main sequence
star, which oscillate at higher frequencies.
Parameters
----------
numax : float
The estimated position of the numax of the power spectrum. This
is used to calculated the region autocorrelated with itself.
Returns
-------
fwhm: float
The estimate full-width-half-maximum of the seismic mode envelope
"""
# Calculate the index FWHM for a given numax
if u.Quantity(periodogram.frequency[-1], u.microhertz) > u.Quantity(
500.0, u.microhertz
):
fwhm = 0.25 * numax
else:
fwhm = 0.66 * numax ** 0.88
return fwhm
def autocorrelate(periodogram, numax, window_width=25.0, frequency_spacing=None):
"""An autocorrelation function (ACF) for seismic mode envelopes.
We autocorrelate a region with a width of `window_width` (in microhertz)
around a central frequency `numax` (in microhertz). The window size is
determined based on the location of the nyquist frequency when
estimating numax, and based on the expected width of the mode envelope
of the asteroseismic oscillations when calculating deltanu. The section of
power being autocorrelated is first resclaed by subtracting its mean, so
that its noise is centered around zero. If this is not done, noise will
appear in the ACF as a function of 1/lag.
Parameters:
----------
numax : float
The estimated position of the numax of the power spectrum. This
is used to calculated the region autocorrelated with itself.
window_width : int or float
The width of the autocorrelation window around the central
frequency numax.
frequency_spacing : float
The frequency spacing of the periodogram. If none is passed, it
is calculated internally. This should never be set by the user.
Returns:
--------
acf : array-like
The autocorrelation power calculated for the given numax
"""
if frequency_spacing is None:
frequency_spacing = np.median(np.diff(periodogram.frequency.value))
spread = int(window_width / 2 / frequency_spacing) # Find the spread in indices
x = int(numax / frequency_spacing) # Find the index value of numax
x0 = int(
(periodogram.frequency[0].value / frequency_spacing)
) # Transform in case the index isn't from 0
xt = x - x0
p_sel = copy.deepcopy(
periodogram.power[xt - spread : xt + spread].value
) # Make the window selection
p_sel -= np.nanmean(p_sel) # Make it so that the selection has zero mean.
C = np.correlate(p_sel, p_sel, mode="full")[
len(p_sel) - 1 :
] # Correlated the resulting SNR space with itself
return C
| [
"copy.deepcopy",
"astropy.units.Quantity",
"numpy.diff",
"astropy.units.Quantity.__new__",
"numpy.correlate",
"numpy.nanmean"
] | [((5464, 5527), 'copy.deepcopy', 'copy.deepcopy', (['periodogram.power[xt - spread:xt + spread].value'], {}), '(periodogram.power[xt - spread:xt + spread].value)\n', (5477, 5527), False, 'import copy\n'), ((5586, 5603), 'numpy.nanmean', 'np.nanmean', (['p_sel'], {}), '(p_sel)\n', (5596, 5603), True, 'import numpy as np\n'), ((1054, 1091), 'astropy.units.Quantity.__new__', 'Quantity.__new__', (['cls', 'quantity.value'], {}), '(cls, quantity.value)\n', (1070, 1091), False, 'from astropy.units import Quantity\n'), ((3470, 3521), 'astropy.units.Quantity', 'u.Quantity', (['periodogram.frequency[-1]', 'u.microhertz'], {}), '(periodogram.frequency[-1], u.microhertz)\n', (3480, 3521), True, 'from astropy import units as u\n'), ((3524, 3555), 'astropy.units.Quantity', 'u.Quantity', (['(500.0)', 'u.microhertz'], {}), '(500.0, u.microhertz)\n', (3534, 3555), True, 'from astropy import units as u\n'), ((5661, 5700), 'numpy.correlate', 'np.correlate', (['p_sel', 'p_sel'], {'mode': '"""full"""'}), "(p_sel, p_sel, mode='full')\n", (5673, 5700), True, 'import numpy as np\n'), ((5115, 5151), 'numpy.diff', 'np.diff', (['periodogram.frequency.value'], {}), '(periodogram.frequency.value)\n', (5122, 5151), True, 'import numpy as np\n')] |
"""
NGHXRG - Teledyne HxRG Noise Generator
Modification History:
8-15 April 2015, <NAME>, NASA/GSFC
- Implement Pierre Ferruit's (ESA/ESTEC)recommendation to use
numpy.fft.rfft and numpy.fft.irfft for faster execution. This saves
about 30% in execution time.
- Clean up setting default values per several suggestions from
<NAME> (NASA/JPL).
- Change from setting the shell variable H2RG_PCA0 to point to the PCA-zero
file to having a shell variable point to the NG home directory. This is per
a suggestion from <NAME> (NASA/JPL) that would allow seamlessly adding
more PCA-zero templates.
- Implement a request form <NAME> (NASA/JPL) for status
reporting. This was done by adding the "verbose" arguement.
- Implement a request from P<NAME> (ESA/ESTEC) to generate
3-dimensional data cubes.
- Implement a request from P<NAME> to treat ACN as different 1/f
noise in even/odd columns. Previously ACN was treated purely as a feature
in Fourier space.
- Version 2(Beta)
16 April 2015, <NAME>
- Fixed a bug in the pinkening filter definitions. Abs() was used where
sqrt() was intended. The bug caused power spectra to have the wrong shape at
low frequency.
- Version 2.1(Beta)
17 April 2015, <NAME>
- Implement a request from Ch<NAME> for HXRGNoise() to exit gracefully if
the bias_file is not found.
- Version 2.2 (Beta)
8 July 2015, <NAME>
- Address PASP referee comments
* Fast scan direction is now reversible. To reverse the slow scan
direction use the numpy flipud() function.
* Modifications to support subarrays. Specifically,
> Setting reference_pixel_border_width=0 (integer zero);
+ (1) eliminates the reference pixel border and
+ (2) turns off adding in a bias pattern
when simulating data cubes. (Turned on in v2.5)
- Version 2.4
12 Oct 2015, <NAME>, UA/Steward
- Make compatible with Python 2.x
* from __future__ import division
- Allow pca0 & kTC noise to show up for a single frame
- Included options for subarray modes (FULL, WINDOW, and STRIPE)
* Keywords x0 and y0 define a subarray position (lower left corner)
* Selects correct pca0 region for subarray underlay
* Adds reference pixels if they exist within subarray window
- Tie negative values to 0 and anything >=2^16 to 2^16-1
- Version 2.5
20 Oct 2015, <NAME>, UA/Steward
- Padded nstep to the next power of 2 in order to improve FFT runtime
* nstep2 = int(2**np.ceil(np.log2(nstep)))
* Speeds up FFT calculations by ~5x
- Don't generate noise elements if their magnitudes are equal to 0.
- Returns a copy of final HDU result for easy retrieval
- Version 2.6
17 Nov 2015, <NAME>, UA/Steward
- Read in bias_file rather than pca0_file
* Calculate pca0 element by normalizing bias file
- Include alternating column offsets (aco_a & aco_b)
* These can be defined as a list or np array
- Version 2.7
15 Feb 2016, <NAME>, UA/Steward
- Add a reference instability
* ref pixels don't track active pixels perfectly
- Version 2.8
9 May 2016, <NAME>, UA/Steward
- Each channel can have it's own read noise value
- Version 2.9
21 July 2016, <NAME>, UA/Steward
- Add option to use FFTW
* This can be faster for some processors/OS architectures
* The decision to use this is highly dependent on the computer
and should be tested beforehand.
* For more info: https://pypi.python.org/pypi/pyFFTW
- Version 3.0
"""
# Necessary for Python 2.6 and later
#from __future__ import division, print_function
from __future__ import absolute_import, division, print_function, unicode_literals
__version__ = "3.0"
import os
import warnings
from astropy.io import fits
import numpy as np
from scipy.ndimage.interpolation import zoom
from scipy.ndimage import convolve
from astropy.stats.funcs import median_absolute_deviation as mad
import datetime
# import matplotlib.pyplot as plt # Handy for debugging
try:
import pyfftw
pyfftw_available = True
except ImportError:
pyfftw_available = False
import multiprocessing as mp
#import time
warnings.filterwarnings('ignore')
import logging
_log = logging.getLogger('nghxrg')
class HXRGNoise:
"""Simulate Teledyne HxRG + SIDECAR ASIC noise
HXRGNoise is a class for making realistic Teledyne HxRG system
noise. The noise model includes correlated, uncorrelated,
stationary, and non-stationary components. The default parameters
make noise that resembles Channel 1 of JWST NIRSpec. NIRSpec uses
H2RG detectors. They are read out using four video outputs at
1.e+5 pix/s/output.
Parameters
----------
naxis1 : int
X-dimension of the FITS cube.
naxis2 : int
Y-dimension of the FITS cube.
naxis3 : int
Z-dimension of the FITS cube (number of up-the-ramp samples).
n_out : int
Number of detector amplifiers/channels/outputs.
nroh : int
New row overhead in pixels. This allows for a short
wait at the end of a row before starting the next one.
nfoh : int
New frame overhead in rows. This allows for a short
wait at the end of a frame before starting the next one.
nfoh_pix(TBD) : int
New frame overhead in pixels. This allows for a short
wait at the end of a frame before starting the next one.
Generally a single pix offset for full frame and stripe
for JWST ASIC systems.
dt : float
Pixel dwell time in seconds (10e-6 sec, for instance).
bias_file : str
Name of a FITS file that contains bias pattern, also used for PCA-zero.
dark_file : str
Name of a FITS file that contains dark current values per pixel.
verbose : bool
Enable this to provide status reporting.
wind_mode : str
'FULL', 'STRIPE', or 'WINDOW'.
x0/y0 : int
Pixel positions of subarray mode.
det_size : int
Pixel dimension of full detector (square).
reference_pixel_border_width : int
Width of reference pixel border around image area.
reverse_scan_direction : bool
Enable this to reverse the fast scanner readout directions.
This capability was added to support Teledyne's programmable
fast scan readout directions. The default setting of False
corresponds to what HxRG detectors default to upon power up.
use_fftw : bool
If pyFFTW is installed, you can use this in place of np.fft.
ncores : int
Specify number of cores (threads, actually) to use for pyFFTW.
"""
# These class variables are common to all HxRG detectors
nghxrg_version = float(__version__) # Sofware version
def __init__(self, naxis1=None, naxis2=None, naxis3=None, n_out=None,
dt=None, nroh=None, nfoh=None, nfoh_pix=None,
dark_file=None, bias_file=None, verbose=False,
reverse_scan_direction=False, reference_pixel_border_width=None,
wind_mode='FULL', x0=0, y0=0, det_size=None,
use_fftw=False, ncores=None):
# pyFFTW usage
self.use_fftw = True if (use_fftw and pyfftw_available) else False
# By default, use 50% of available cores for FFTW parallelization
self.ncores = mp.cpu_count() // 2 if ncores is None else int(ncores)
# ======================================================================
#
# DEFAULT CLOCKING PARAMETERS
#
# The following parameters define the default HxRG clocking pattern. The
# parameters that define the default noise model are defined in the
# mknoise() method.
#
# ======================================================================
# Subarray?
if wind_mode is None:
wind_mode = 'FULL'
if det_size is None:
det_size = 2048
wind_mode = wind_mode.upper()
modes = ['FULL', 'STRIPE', 'WINDOW']
if wind_mode not in modes:
_log.warning('%s not a valid window readout mode! Returning...' % inst_params['wind_mode'])
os.sys.exit()
if wind_mode == 'WINDOW':
n_out = 1
if wind_mode == 'FULL':
x0 = 0; y0 = 0
if wind_mode == 'STRIPE':
x0 = 0
# Default clocking pattern is JWST NIRSpec
self.naxis1 = 2048 if naxis1 is None else int(naxis1)
self.naxis2 = 2048 if naxis2 is None else int(naxis2)
self.naxis3 = 1 if naxis3 is None else int(naxis3)
self.n_out = 4 if n_out is None else int(n_out)
self.dt = 10e-6 if dt is None else dt
self.nroh = 12 if nroh is None else int(nroh)
self.nfoh = 1 if nfoh is None else int(nfoh)
self.nfoh_pix = 0 #if nfoh_pix is None else int(nfoh_pix)
self.reference_pixel_border_width = 4 if reference_pixel_border_width is None \
else int(reference_pixel_border_width)
# Check that det_size is greater than self.naxis1 and self.naxis2 in WINDOW mode (JML)
if wind_mode == 'WINDOW':
if (self.naxis1 > det_size):
_log.warning('NAXIS1 %s greater than det_size %s! Returning...' % (self.naxis1,det_size))
os.sys.exit()
if (self.naxis2 > det_size):
_log.warning('NAXIS2 %s greater than det_size %s! Returning...' % (self.naxis1,det_size))
os.sys.exit()
# Initialize PCA-zero file and make sure that it exists and is a file
#self.bias_file = os.getenv('NGHXRG_HOME')+'/sca_images/nirspec_pca0.fits' if \
# bias_file is None else bias_file
#self.bias_file = 'nirspec_pca0.fits' if bias_file is None else bias_file
self.bias_file = bias_file
if bias_file is not None:
if os.path.isfile(self.bias_file) is False:
raise ValueError('There was an error finding bias_file {}'.format(bias_file))
print('There was an error finding bias_file!')
print(bias_file)
#os.sys.exit()
# print('There was an error finding bias_file! Check to be')
# print('sure that the NGHXRG_HOME shell environment')
# print('variable is set correctly and that the')
# print('$NGHXRG_HOME/ directory contains the desired PCA0')
# print('file. The default is nirspec_pca0.fits.')
# os.sys.exit()
# Add in dark current file (JML)
self.dark_file = dark_file
if dark_file is not None:
if os.path.isfile(self.dark_file) is False:
raise ValueError('There was an error finding dark_file {}'.format(dark_file))
#print('There was an error finding dark_file!')
#print(dark_file)
#os.sys.exit()
# ======================================================================
# Configure Subarray
self.wind_mode = wind_mode
self.det_size = det_size
self.x0 = x0
self.y0 = y0
# Configure status reporting
self.verbose = verbose
# Configure readout direction
self.reverse_scan_direction = reverse_scan_direction
# Compute the number of pixels in the fast-scan direction per output
self.xsize = self.naxis1 // self.n_out
# Compute the number of time steps per integration, per output
self.nstep_frame = (self.xsize+self.nroh) * (self.naxis2+self.nfoh) + self.nfoh_pix
self.nstep = self.nstep_frame * self.naxis3
# Pad nsteps to a power of 2, which is much faster
self.nstep2 = int(2**np.ceil(np.log2(self.nstep)))
# Compute frame time and ramp time
self.tframe = self.nstep_frame * self.dt
self.inttime = self.tframe * self.naxis3
# For adding in ACN, it is handy to have masks of the even
# and odd pixels on one output neglecting any gaps
self.m_even = np.zeros((self.naxis3,self.naxis2,self.xsize))
self.m_odd = np.zeros_like(self.m_even)
for x in np.arange(0,self.xsize,2):
self.m_even[:,:self.naxis2,x] = 1
self.m_odd[:,:self.naxis2,x+1] = 1
self.m_even = np.reshape(self.m_even, np.size(self.m_even))
self.m_odd = np.reshape(self.m_odd, np.size(self.m_odd))
# Also for adding in ACN, we need a mask that point to just
# the real pixels in ordered vectors of just the even or odd
# pixels
self.m_short = np.zeros((self.naxis3, self.naxis2+self.nfoh, \
(self.xsize+self.nroh)//2))
self.m_short[:,:self.naxis2,:self.xsize//2] = 1
self.m_short = np.reshape(self.m_short, np.size(self.m_short))
# Define frequency arrays
self.f1 = np.fft.rfftfreq(self.nstep2) # Frequencies for nstep elements
self.f2 = np.fft.rfftfreq(2*self.nstep2) # ... for 2*nstep elements
self.f3 = np.fft.rfftfreq(2*self.naxis3)
# First element should not be 0
self.f1[0] = self.f1[1]
self.f2[0] = self.f2[1]
self.f3[0] = self.f3[1]
# Define pinkening filters. F1 and p_filter1 are used to
# generate ACN. F2 and p_filter2 are used to generate 1/f noise.
# F2 and p_filter2 are used to generate reference instabilities.
self.alpha = -1 # Hard code for 1/f noise until proven otherwise
self.p_filter1 = np.sqrt(self.f1**self.alpha)
self.p_filter2 = np.sqrt(self.f2**self.alpha)
self.p_filter3 = np.sqrt(self.f3**self.alpha)
self.p_filter1[0] = 0.
self.p_filter2[0] = 0.
self.p_filter3[0] = 0.
# Initialize pca0. This includes scaling to the correct size,
# zero offsetting, and renormalization. We use robust statistics
# because pca0 is real data
if self.bias_file is None:
h = fits.PrimaryHDU(np.zeros([det_size, det_size]))
hdu = fits.HDUList([h])
else:
hdu = fits.open(self.bias_file)
nx_pca0 = hdu[0].header['naxis1']
ny_pca0 = hdu[0].header['naxis2']
data = hdu[0].data
# Make sure the real PCA image is correctly scaled to size of fake data (JML)
# Depends if we're FULL, STRIPE, or WINDOW
if wind_mode == 'FULL':
scale1 = self.naxis1 / nx_pca0
scale2 = self.naxis2 / ny_pca0
zoom_factor = np.max([scale1, scale2])
if wind_mode == 'STRIPE':
zoom_factor = self.naxis1 / nx_pca0
if wind_mode == 'WINDOW':
# Scale based on det_size
scale1 = self.det_size / nx_pca0
scale2 = self.det_size / ny_pca0
zoom_factor = np.max([scale1, scale2])
# Resize PCA0 data
#print(zoom_factor)
if zoom_factor != 1:
data = zoom(data, zoom_factor, order=1, mode='wrap')
# Copy data to save as bias pattern
bias_image = data.copy()
# Renormalize for PCA0 noise stuff
data -= np.median(data) # Zero offset
data /= (1.4826 * mad(data)) # Renormalize
# Select region of pca0 associated with window position
if self.wind_mode == 'WINDOW':
x1 = self.x0; y1 = self.y0
elif self.wind_mode == 'STRIPE':
x1 = 0; y1 = self.y0
else:
x1 = 0; y1 = 0
x2 = x1 + self.naxis1
y2 = y1 + self.naxis2
# Make sure x2 and y2 are valid
if (x2 > data.shape[0] or y2 > data.shape[1]):
_log.warning('Specified window size does not fit within detector array!')
_log.warning('X indices: [%s,%s]; Y indices: [%s,%s]; XY Size: [%s, %s]' %
(x1,x2,y1,y2,data.shape[0],data.shape[1]))
os.sys.exit()
# Save as properties
self.pca0 = data[y1:y2,x1:x2]
self.bias_image = bias_image[y1:y2,x1:x2]
# Open dark current file (ADU/sec/pixel)
if self.dark_file is not None:
dark_hdu = fits.open(self.dark_file)
dark_image = dark_hdu[0].data
self.dark_image = dark_image[y1:y2,x1:x2]
# Dark current distributions are very wide because of uncertainties
# This causes certain pixels to fall below 0.
# We can assume all pixels within 5-sigma have the same dark current
# as well as those with negative values.
# Those with large dark currents are likely real.
sig = 1.4826 * mad(dark_image)
med = np.median(dark_image)
l1 = med - 5*sig; l2 = med + 5*sig
self.dark_image[(self.dark_image > l1) & (self.dark_image < l2)] = med
# Set negative values to median
self.dark_image[self.dark_image<0] = med
# Set negative values to median
#self.dark_image[self.dark_image<0] = np.median(self.dark_image)
#self.dark_image[self.dark_image<0.005] = 0.001
else:
self.dark_image = None
# How many reference pixels on each border?
w = self.reference_pixel_border_width # Easier to work with
lower = w-y1; upper = w-(det_size-y2)
left = w-x1; right = w-(det_size-x2)
ref_all = np.array([lower,upper,left,right])
ref_all[ref_all<0] = 0
self.ref_all = ref_all
def message(self, message_text):
"""Used for status reporting"""
if self.verbose is True:
print('NG: ' + message_text + ' at DATETIME = ', (datetime.datetime.now().time()))
def white_noise(self, nstep=None):
"""Gaussian noise
Generate white noise for an HxRG including all time steps
(actual pixels and overheads).
Parameters
----------
nstep : int
Length of vector returned
"""
return(np.random.standard_normal(nstep))
def pink_noise(self, mode, fmin=None):
"""Generate a vector of non-periodic pink noise.
Parameters
----------
mode : str
Selected from 'pink', 'acn', or 'ref_inst'.
fmin : float, optional
Low-frequency cutoff. A value of 0 means no cut-off.
"""
# Configure depending on mode setting
if 'pink' in mode:
nstep = 2*self.nstep
nstep2 = 2*self.nstep2
f = self.f2
p_filter = self.p_filter2
elif 'acn' in mode:
nstep = self.nstep
nstep2 = self.nstep2
f = self.f1
p_filter = self.p_filter1
elif 'ref_inst' in mode:
nstep = 2*self.naxis3
nstep2 = 2*self.naxis3
f = self.f3
p_filter = self.p_filter3
# Build scaling factors for all frequencies
fmin = 1./nstep2 if fmin is None else np.max([fmin, 1./nstep2])
ix = np.sum(f < fmin) # Index of the cutoff
if ix > 1 and ix < len(f):
f = f.copy()
p_filter = p_filter.copy()
f[:ix] = f[ix]
p_filter[:ix] = p_filter[ix]
# Calculate theoretical output standard deviation from scaling
w = p_filter[1:-1]
w_last = p_filter[-1] * (1 + (nstep2 % 2)) / 2. # correct f = +-0.5
the_std = 2 * np.sqrt(np.sum(w**2) + w_last**2) / nstep2
# Generate scaled random power + phase
sr = np.random.normal(scale=p_filter)
si = np.random.normal(scale=p_filter)
# If the signal length is even, frequencies +/- 0.5 are equal
# so the coefficient must be real.
if (nstep2 % 2) == 0:
si[-1] = 0
# Regardless of signal length, the DC component must be real
si[0] = 0
# Combine power + corrected phase to Fourier components
thefft = sr + 1J * si
#p0 = time.time()
# Apply the pinkening filter.
if self.use_fftw:
result = pyfftw.interfaces.numpy_fft.irfft(thefft, overwrite_input=True,\
planner_effort='FFTW_ESTIMATE', threads=self.ncores)
else:
result = np.fft.irfft(thefft)
#p1 = time.time()
#print("FFT and IFFT took",p1-p0," seconds")
# Keep 1st half of nstep and scale to unit variance
result = result[:nstep//2] / the_std
return(result)
def mknoise(self, o_file=None, gain=None,
rd_noise=None, c_pink=None, u_pink=None,
acn=None, aco_a=None, aco_b=None, pca0_amp=None,
reference_pixel_noise_ratio=None, ktc_noise=None,
bias_off_avg=None, bias_off_sig=None, bias_amp=None,
ch_off=None, ref_f2f_corr=None, ref_f2f_ucorr=None, ref_inst=None,
out_ADU=True):
"""Create FITS cube containing only noise
Parameters
----------
o_file : str, None
Output filename. If None, then no output.
gain : float
Gain in e/ADU. Defaults to 1.0.
ktc_noise : float
kTC noise in electrons. Set this equal to
sqrt(k*T*C_pixel)/q_e, where k is Boltzmann's constant,
T is detector temperature, and C_pixel is pixel capacitance.
For an H2RG, the pixel capacitance is typically about 40 fF.
rd_noise : float
Standard deviation of read noise in electrons.
Can be an array for individual amplifiers.
c_pink :float
Standard deviation of correlated pink noise in electrons.
u_pink : float
Standard deviation of uncorrelated pink noise in electrons.
Can be an array for individual amplifiers.
acn : float
Standard deviation of alterating column noise in electrons
pca0_amp : float
Standard deviation of pca0 in electrons
reference_pixel_noise_ratio : float
Ratio of the standard deviation of the reference pixels to
the science pixels. Reference pixels are usually a little
lower noise.
bias_off_avg : float
On average, integrations start here in electrons.
Set this so that all pixels are in range.
bias_off_sig : float
bias_off_avg has some variation. This is its std dev.
bias_amp : float
A multiplicative factor that we multiply bias_image by
to simulate a bias pattern. This is completely
independent from adding in "picture frame" noise. Set to
0.0 remove bias pattern. For NIRCam, default is 1.0.
ch_off : float
Offset of each channel relative to bias_off_avg.
Can be an array for individual amplifiers.
ref_f2f_corr : float
Random frame-to-frame reference offsets due to PA reset,
correlated between channels.
ref_f2f_ucorr : float
Random frame-to-frame reference offsets due to PA reset,
per channel. Can be an array for individual amplifiers.
aco_a : float
Relative offsets of altnernating columns "a".
Can be an array for individual amplifiers.
aco_b : float
Relative offsets of altnernating columns "b".
Can be an array for individual amplifiers.
ref_inst : float
Reference instability relative to active pixels.
out_ADU : bool
Return as converted to ADU (True) or raw electrons?
Notes
-----
Because of the noise correlations, there is no simple way to
predict the noise of the simulated images. However, to a
crude first approximation, these components add in
quadrature.
The units in the above are mostly "electrons". This follows convention
in the astronomical community. From a physics perspective, holes are
actually the physical entity that is collected in Teledyne's p-on-n
(p-type implants in n-type bulk) HgCdTe architecture.
"""
self.message('Starting mknoise()')
# ======================================================================
#
# DEFAULT NOISE PARAMETERS
#
# These defaults create noise similar to that seen in the JWST NIRSpec.
#
# ======================================================================
#self.gain = 1.0 if gain is None else gain
#self.rd_noise = 5.2 if rd_noise is None else rd_noise
#self.c_pink = 3.0 if c_pink is None else c_pink
#self.u_pink = 1.0 if u_pink is None else u_pink
#self.acn = 0.5 if acn is None else acn
#self.pca0_amp = 0.2 if pca0_amp is None else pca0_amp
self.gain = 1.0 if gain is None else gain
# Set certain values equal to None if they are set to 0.
#self.rd_noise = None if rd_noise == 0.0 else rd_noise
self.rd_noise = rd_noise
self.c_pink = None if c_pink == 0.0 else c_pink
self.u_pink = u_pink
self.acn = None if acn == 0.0 else acn
self.pca0_amp = None if pca0_amp == 0.0 else pca0_amp
# Change this only if you know that your detector is different from a
# typical H2RG.
self.reference_pixel_noise_ratio = 0.8 if \
reference_pixel_noise_ratio is None else reference_pixel_noise_ratio
# These are used only when generating cubes. They are
# completely removed when the data are calibrated to
# correlated double sampling or slope images. We include
# them in here to make more realistic looking raw cubes.
self.ktc_noise = 29. if ktc_noise is None else ktc_noise
self.bias_off_avg = 5000. if bias_off_avg is None else bias_off_avg
self.bias_off_sig = 0. if bias_off_sig is None else bias_off_sig
self.bias_amp = 1. if bias_amp is None else bias_amp
self.ch_off = 0. if ch_off is None else ch_off
self.aco_a = 0. if aco_a is None else aco_a
self.aco_b = 0. if aco_b is None else aco_b
self.ref_f2f_corr = None if ref_f2f_corr is None else ref_f2f_corr
self.ref_f2f_ucorr = None if ref_f2f_ucorr is None else ref_f2f_ucorr
self.ref_inst = None if ref_inst is None else ref_inst
# ======================================================================
# Initialize the result cube and add a bias pattern.
self.message('Initializing results cube')
result = np.zeros((self.naxis3, self.naxis2, self.naxis1), dtype=np.float32)
# Inject a bias pattern.
bias_pattern = self.bias_image*self.bias_amp
# Add overall bias offset plus random component
bias_pattern += self.bias_off_avg + self.bias_off_sig * np.random.randn()
# Add in some kTC noise. Since this should always come out
# in calibration, we do not attempt to model it in detail.
if self.ktc_noise > 0:
bias_pattern += self.ktc_noise * np.random.standard_normal((self.naxis2, self.naxis1))
# Add pedestal offset to each output channel
# Check if self.ch_off is a numpy array or list
if isinstance(self.ch_off, (np.ndarray,list)):
temp = np.asarray(self.ch_off)
if temp.size != self.n_out:
_log.warning('Number of elements in ch_off not equal to n_out')
os.sys.exit()
for ch in range(self.n_out):
bias_pattern[:,self.xsize*ch:self.xsize*(ch+1)] += temp[ch]
else:
bias_pattern += self.ch_off
# Add in alternating column offsets to bias pattern
inda = np.arange(0,self.xsize,2)
indb = inda+1
# Check if self.aco_a/b are numpy arrays or lists
if isinstance(self.aco_a, (np.ndarray,list)):
temp = np.asarray(self.aco_a) # Always set to a numpy array
if temp.size != self.n_out:
_log.warning('Number of elements in aco_a not equal to n_out')
os.sys.exit()
else: # Assumes aco_a is a single value as opposed to an array or list
temp = np.ones(self.n_out) * self.aco_a
# Add alternating column offsets for each channel
for ch in range(self.n_out):
chan = bias_pattern[:,self.xsize*ch:self.xsize*(ch+1)]
chan[:,inda] += temp[ch]
# Do the same, but with column b
if isinstance(self.aco_b, (np.ndarray,list)):
temp = np.asarray(self.aco_b) # Always set to a numpy array
if temp.size != self.n_out:
_log.warning('Number of elements in aco_b not equal to n_out')
os.sys.exit()
else: # Assumes aco_b is a single value as opposed to an array or list
temp = np.ones(self.n_out) * self.aco_b
# Add alternating column offsets for each channel
for ch in range(self.n_out):
chan = bias_pattern[:,self.xsize*ch:self.xsize*(ch+1)]
chan[:,indb] += temp[ch]
# Add in the bias pattern
for z in np.arange(self.naxis3):
result[z,:,:] += bias_pattern
# Add in random frame-to-frame bias offsets
# First, correlated bias between channels
if self.ref_f2f_corr is not None:
for z in np.arange(self.naxis3):
result[z,:,:] += self.ref_f2f_corr * np.random.randn()
# Next, channel-specific bias offsets
if self.ref_f2f_ucorr is not None:
if isinstance(self.ref_f2f_ucorr, (np.ndarray,list)):
temp = np.asarray(self.ref_f2f_ucorr)
if temp.size != self.n_out:
_log.warning('Number of elements in ref_f2f_ucorr not equal to n_out')
os.sys.exit()
else: # Single value as opposed to an array or list
temp = np.ones(self.n_out) * self.ref_f2f_ucorr
for z in np.arange(self.naxis3):
for ch in range(self.n_out):
result[z,:,self.xsize*ch:self.xsize*(ch+1)] += temp[ch] * np.random.randn()
# Reference instability (frame-to-frame reference offset not recorded in active pixels)
if self.ref_inst is not None:
ref_noise = self.ref_inst * self.pink_noise('ref_inst')
w = self.ref_all
for z in np.arange(self.naxis3):
if w[0] > 0:
result[z, :w[0], :] += ref_noise[z]
if w[1] > 0:
result[z, -w[1]:,:] += ref_noise[z]
if w[2] > 0:
result[z, :, :w[2]] += ref_noise[z]
if w[3] > 0:
result[z, :,-w[3]:] += ref_noise[z]
# Make white read noise. This is the same for all pixels.
if self.rd_noise is not None:
self.message('Generating rd_noise')
# We want self.rd_noise to be an array or list
if isinstance(self.rd_noise, (np.ndarray,list)):
temp = np.asarray(self.rd_noise)
if temp.size != self.n_out:
_log.warning('Number of elements in rd_noise not equal to n_out')
os.sys.exit()
else: # Single value as opposed to an array or list
self.rd_noise = np.ones(self.n_out) * self.rd_noise
w = self.ref_all
r = self.reference_pixel_noise_ratio # Easier to work with
for z in np.arange(self.naxis3):
here = np.zeros((self.naxis2, self.naxis1))
# First assume no ref pixels and just add in random noise
for op in np.arange(self.n_out):
x0 = op * self.xsize
x1 = x0 + self.xsize
here[:,x0:x1] = self.rd_noise[op] * np.random.standard_normal((self.naxis2,self.xsize))
# If there are reference pixels, overwrite with appropriate noise values
# Noisy reference pixels for each side of detector
rd_ref = r * np.mean(self.rd_noise)
if w[0] > 0: # lower
here[:w[0],:] = rd_ref * np.random.standard_normal((w[0],self.naxis1))
if w[1] > 0: # upper
here[-w[1]:,:] = rd_ref * np.random.standard_normal((w[1],self.naxis1))
if w[2] > 0: # left
here[:,:w[2]] = rd_ref * np.random.standard_normal((self.naxis2,w[2]))
if w[3] > 0: # right
here[:,-w[3]:] = rd_ref * np.random.standard_normal((self.naxis2,w[3]))
# Add the noise in to the result
result[z,:,:] += here
# Add correlated pink noise.
if self.c_pink is not None:
# c_pink_map was used to hold the entire correlated pink noise cube
# c_pink_map was useful for debugging, but eats up a lot of space
#self.c_pink_map = np.zeros((self.naxis3,self.naxis2,self.naxis1))
self.message('Adding c_pink noise')
tt = self.c_pink * self.pink_noise('pink') # tt is a temp. variable
#self.c_pink_full = tt.copy()
tt = np.reshape(tt, (self.naxis3, self.naxis2+self.nfoh, \
self.xsize+self.nroh))[:,:self.naxis2,:self.xsize]
for op in np.arange(self.n_out):
x0 = op * self.xsize
x1 = x0 + self.xsize
# By default fast-scan readout direction is [-->,<--,-->,<--]
# If reverse_scan_direction is True, then [<--,-->,<--,-->]
# TODO: Include option for all --> or all <--
modnum = 1 if self.reverse_scan_direction else 0
if np.mod(op,2) == modnum:
#self.c_pink_map[:,:,x0:x1] = tt
result[:,:,x0:x1] += tt
else:
#self.c_pink_map[:,:,x0:x1] = tt[:,:,::-1]
result[:,:,x0:x1] += tt[:,:,::-1]
del tt
#result += self.c_pink_map
#del self.c_pink_map
# Add uncorrelated pink noise. Because this pink noise is stationary and
# different for each output, we don't need to flip it.
if self.u_pink is not None:
# We want self.u_pink to be an array or list
if isinstance(self.u_pink, (np.ndarray,list)):
temp = np.asarray(self.u_pink)
if temp.size != self.n_out:
_log.warning('Number of elements in u_pink not equal to n_out')
os.sys.exit()
else: # Single value as opposed to an array or list
self.u_pink = np.ones(self.n_out) * self.u_pink
# Only do the rest if any values are not 0
if self.u_pink.any():
# u_pink_map was used to hold the entire correlated pink noise cube
# u_pink_map was useful for debugging, but eats up a lot of space
#self.u_pink_map = np.zeros((self.naxis3,self.naxis2,self.naxis1))
self.message('Adding u_pink noise')
for op in np.arange(self.n_out):
x0 = op * self.xsize
x1 = x0 + self.xsize
tt = self.u_pink[op] * self.pink_noise('pink')
tt = np.reshape(tt, (self.naxis3, self.naxis2+self.nfoh, \
self.xsize+self.nroh))[:,:self.naxis2,:self.xsize]
#self.u_pink_map[:,:,x0:x1] = tt
result[:,:,x0:x1] += tt
del tt
#result += self.u_pink_map
#del self.u_pink_map
# Add ACN
if self.acn is not None:
self.message('Adding acn noise')
for op in np.arange(self.n_out):
# Generate new pink noise for each even and odd vector.
# We give these the abstract names 'a' and 'b' so that we
# can use a previously worked out formula to turn them
# back into an image section.
a = self.acn * self.pink_noise('acn')
b = self.acn * self.pink_noise('acn')
# Pick out just the real pixels (i.e. ignore the gaps)
a = a[np.where(self.m_short == 1)]
b = b[np.where(self.m_short == 1)]
# Reformat into an image section. This uses the formula
# mentioned above.
acn_cube = np.reshape(np.transpose(np.vstack((a,b))),
(self.naxis3,self.naxis2,self.xsize))
# Add in the ACN. Because pink noise is stationary, we can
# ignore the readout directions. There is no need to flip
# acn_cube before adding it in.
x0 = op * self.xsize
x1 = x0 + self.xsize
result[:,:,x0:x1] += acn_cube
del acn_cube
# Add PCA-zero. The PCA-zero template is modulated by 1/f.
if self.pca0_amp is not None:
self.message('Adding PCA-zero "picture frame" noise')
gamma = self.pink_noise(mode='pink')
zoom_factor = self.naxis2 * self.naxis3 / np.size(gamma)
gamma = zoom(gamma, zoom_factor, order=1, mode='mirror')
gamma = np.reshape(gamma, (self.naxis3,self.naxis2))
for z in np.arange(self.naxis3):
for y in np.arange(self.naxis2):
result[z,y,:] += self.pca0_amp*self.pca0[y,:]*gamma[z,y]
# Add in dark current for each frame
#k = np.array([[0,0.01,0],[0.01,0.96,0.01],[0,0.01,0]])
if self.dark_image is not None:
self.message('Adding dark current')
gain_temp = self.gain # 2.0 # Temporary gain (e-/ADU)
dark_frame = self.dark_image * self.tframe * gain_temp # electrons
# Set reference pixels' dark current equal to 0
if w[0] > 0: # lower
dark_frame[:w[0],:] = 0
if w[1] > 0: # upper
dark_frame[-w[1]:,:] = 0
if w[2] > 0: # left
dark_frame[:,:w[2]] = 0
if w[3] > 0: # right
dark_frame[:,-w[3]:] = 0
# For each read frame, create random dark current instance based on Poisson
dark_temp = np.zeros([self.naxis2,self.naxis1])
for z in np.arange(self.naxis3):
dark_temp += np.random.poisson(dark_frame, size=None)
#dark_ipc = convolve(dark_temp, k, mode='constant', cval=0.0)
result[z,:,:] += dark_temp
# If the data cube has only 1 frame, reformat into a 2-dimensional image.
if self.naxis3 == 1:
self.message('Reformatting cube into image')
result = result[0,:,:]
# Convert to ADU and unsigned int
if out_ADU:
# Apply Gain (e/ADU) to convert to ADU (DN)
if self.gain != 1:
self.message('Applying Gain')
result /= self.gain
# If the data cube has more than one frame, convert to unsigned int
#if self.naxis3 > 1:
self.message('Converting to 16-bit unsigned integer')
# Ensure that there are no negative pixel values.
result[result < 0] = 0
# And that anything higher than 65535 gets tacked to the top end
result[result >= 2**16] = 2**16 - 1
result = result.astype('uint16')
# Create HDU
# THIS NEEDS TO BE UPDATED WITH BETTER INFO/FORMAT
hdu = fits.PrimaryHDU(result)
# hdu.header.append()
# hdu.header.append(('TFRAME', self.tframe, 'Time in seconds between frames'))
# hdu.header.append(('INTTIME', self.inttime, 'Total integration time for one MULTIACCUM'))
# hdu.header.append(('RD_NOISE', self.rd_noise, 'Read noise'))
# #hdu.header.append(('PEDESTAL', self.pedestal, 'Pedestal drifts'))
# hdu.header.append(('C_PINK', self.c_pink, 'Correlated pink'))
# #hdu.header.append(('U_PINK', self.u_pink, 'Uncorrelated pink'))
# hdu.header.append(('ACN', self.acn, 'Alternating column noise'))
# hdu.header.append(('PCA0', self.pca0_amp, \
# 'PCA zero, AKA picture frame'))
hdu.header['HISTORY'] = 'Created by NGHXRG version ' \
+ str(self.nghxrg_version)
# Write the result to a FITS file
if o_file is not None:
self.message('Writing FITS file')
hdu.writeto(o_file, clobber='True')
self.message('Exiting mknoise()')
return hdu | [
"numpy.sum",
"astropy.io.fits.PrimaryHDU",
"numpy.ones",
"os.path.isfile",
"numpy.mean",
"numpy.arange",
"numpy.random.normal",
"astropy.io.fits.HDUList",
"multiprocessing.cpu_count",
"numpy.zeros_like",
"numpy.fft.irfft",
"numpy.random.randn",
"scipy.ndimage.interpolation.zoom",
"numpy.ma... | [((4061, 4094), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (4084, 4094), False, 'import warnings\n'), ((4117, 4144), 'logging.getLogger', 'logging.getLogger', (['"""nghxrg"""'], {}), "('nghxrg')\n", (4134, 4144), False, 'import logging\n'), ((12090, 12138), 'numpy.zeros', 'np.zeros', (['(self.naxis3, self.naxis2, self.xsize)'], {}), '((self.naxis3, self.naxis2, self.xsize))\n', (12098, 12138), True, 'import numpy as np\n'), ((12158, 12184), 'numpy.zeros_like', 'np.zeros_like', (['self.m_even'], {}), '(self.m_even)\n', (12171, 12184), True, 'import numpy as np\n'), ((12202, 12229), 'numpy.arange', 'np.arange', (['(0)', 'self.xsize', '(2)'], {}), '(0, self.xsize, 2)\n', (12211, 12229), True, 'import numpy as np\n'), ((12633, 12712), 'numpy.zeros', 'np.zeros', (['(self.naxis3, self.naxis2 + self.nfoh, (self.xsize + self.nroh) // 2)'], {}), '((self.naxis3, self.naxis2 + self.nfoh, (self.xsize + self.nroh) // 2))\n', (12641, 12712), True, 'import numpy as np\n'), ((12934, 12962), 'numpy.fft.rfftfreq', 'np.fft.rfftfreq', (['self.nstep2'], {}), '(self.nstep2)\n', (12949, 12962), True, 'import numpy as np\n'), ((13014, 13046), 'numpy.fft.rfftfreq', 'np.fft.rfftfreq', (['(2 * self.nstep2)'], {}), '(2 * self.nstep2)\n', (13029, 13046), True, 'import numpy as np\n'), ((13090, 13122), 'numpy.fft.rfftfreq', 'np.fft.rfftfreq', (['(2 * self.naxis3)'], {}), '(2 * self.naxis3)\n', (13105, 13122), True, 'import numpy as np\n'), ((13568, 13598), 'numpy.sqrt', 'np.sqrt', (['(self.f1 ** self.alpha)'], {}), '(self.f1 ** self.alpha)\n', (13575, 13598), True, 'import numpy as np\n'), ((13622, 13652), 'numpy.sqrt', 'np.sqrt', (['(self.f2 ** self.alpha)'], {}), '(self.f2 ** self.alpha)\n', (13629, 13652), True, 'import numpy as np\n'), ((13676, 13706), 'numpy.sqrt', 'np.sqrt', (['(self.f3 ** self.alpha)'], {}), '(self.f3 ** self.alpha)\n', (13683, 13706), True, 'import numpy as np\n'), ((15182, 15197), 'numpy.median', 'np.median', (['data'], {}), '(data)\n', (15191, 15197), True, 'import numpy as np\n'), ((17411, 17448), 'numpy.array', 'np.array', (['[lower, upper, left, right]'], {}), '([lower, upper, left, right])\n', (17419, 17448), True, 'import numpy as np\n'), ((18019, 18051), 'numpy.random.standard_normal', 'np.random.standard_normal', (['nstep'], {}), '(nstep)\n', (18044, 18051), True, 'import numpy as np\n'), ((19044, 19060), 'numpy.sum', 'np.sum', (['(f < fmin)'], {}), '(f < fmin)\n', (19050, 19060), True, 'import numpy as np\n'), ((19553, 19585), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'p_filter'}), '(scale=p_filter)\n', (19569, 19585), True, 'import numpy as np\n'), ((19599, 19631), 'numpy.random.normal', 'np.random.normal', ([], {'scale': 'p_filter'}), '(scale=p_filter)\n', (19615, 19631), True, 'import numpy as np\n'), ((26911, 26978), 'numpy.zeros', 'np.zeros', (['(self.naxis3, self.naxis2, self.naxis1)'], {'dtype': 'np.float32'}), '((self.naxis3, self.naxis2, self.naxis1), dtype=np.float32)\n', (26919, 26978), True, 'import numpy as np\n'), ((28106, 28133), 'numpy.arange', 'np.arange', (['(0)', 'self.xsize', '(2)'], {}), '(0, self.xsize, 2)\n', (28115, 28133), True, 'import numpy as np\n'), ((29530, 29552), 'numpy.arange', 'np.arange', (['self.naxis3'], {}), '(self.naxis3)\n', (29539, 29552), True, 'import numpy as np\n'), ((40153, 40176), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['result'], {}), '(result)\n', (40168, 40176), False, 'from astropy.io import fits\n'), ((8084, 8097), 'os.sys.exit', 'os.sys.exit', ([], {}), '()\n', (8095, 8097), False, 'import os\n'), ((12368, 12388), 'numpy.size', 'np.size', (['self.m_even'], {}), '(self.m_even)\n', (12375, 12388), True, 'import numpy as np\n'), ((12434, 12453), 'numpy.size', 'np.size', (['self.m_odd'], {}), '(self.m_odd)\n', (12441, 12453), True, 'import numpy as np\n'), ((12851, 12872), 'numpy.size', 'np.size', (['self.m_short'], {}), '(self.m_short)\n', (12858, 12872), True, 'import numpy as np\n'), ((14096, 14113), 'astropy.io.fits.HDUList', 'fits.HDUList', (['[h]'], {}), '([h])\n', (14108, 14113), False, 'from astropy.io import fits\n'), ((14146, 14171), 'astropy.io.fits.open', 'fits.open', (['self.bias_file'], {}), '(self.bias_file)\n', (14155, 14171), False, 'from astropy.io import fits\n'), ((14570, 14594), 'numpy.max', 'np.max', (['[scale1, scale2]'], {}), '([scale1, scale2])\n', (14576, 14594), True, 'import numpy as np\n'), ((14865, 14889), 'numpy.max', 'np.max', (['[scale1, scale2]'], {}), '([scale1, scale2])\n', (14871, 14889), True, 'import numpy as np\n'), ((14994, 15039), 'scipy.ndimage.interpolation.zoom', 'zoom', (['data', 'zoom_factor'], {'order': '(1)', 'mode': '"""wrap"""'}), "(data, zoom_factor, order=1, mode='wrap')\n", (14998, 15039), False, 'from scipy.ndimage.interpolation import zoom\n'), ((15238, 15247), 'astropy.stats.funcs.median_absolute_deviation', 'mad', (['data'], {}), '(data)\n', (15241, 15247), True, 'from astropy.stats.funcs import median_absolute_deviation as mad\n'), ((15930, 15943), 'os.sys.exit', 'os.sys.exit', ([], {}), '()\n', (15941, 15943), False, 'import os\n'), ((16178, 16203), 'astropy.io.fits.open', 'fits.open', (['self.dark_file'], {}), '(self.dark_file)\n', (16187, 16203), False, 'from astropy.io import fits\n'), ((16697, 16718), 'numpy.median', 'np.median', (['dark_image'], {}), '(dark_image)\n', (16706, 16718), True, 'import numpy as np\n'), ((19004, 19032), 'numpy.max', 'np.max', (['[fmin, 1.0 / nstep2]'], {}), '([fmin, 1.0 / nstep2])\n', (19010, 19032), True, 'import numpy as np\n'), ((20096, 20216), 'pyfftw.interfaces.numpy_fft.irfft', 'pyfftw.interfaces.numpy_fft.irfft', (['thefft'], {'overwrite_input': '(True)', 'planner_effort': '"""FFTW_ESTIMATE"""', 'threads': 'self.ncores'}), "(thefft, overwrite_input=True,\n planner_effort='FFTW_ESTIMATE', threads=self.ncores)\n", (20129, 20216), False, 'import pyfftw\n'), ((20265, 20285), 'numpy.fft.irfft', 'np.fft.irfft', (['thefft'], {}), '(thefft)\n', (20277, 20285), True, 'import numpy as np\n'), ((27685, 27708), 'numpy.asarray', 'np.asarray', (['self.ch_off'], {}), '(self.ch_off)\n', (27695, 27708), True, 'import numpy as np\n'), ((28287, 28309), 'numpy.asarray', 'np.asarray', (['self.aco_a'], {}), '(self.aco_a)\n', (28297, 28309), True, 'import numpy as np\n'), ((28942, 28964), 'numpy.asarray', 'np.asarray', (['self.aco_b'], {}), '(self.aco_b)\n', (28952, 28964), True, 'import numpy as np\n'), ((29762, 29784), 'numpy.arange', 'np.arange', (['self.naxis3'], {}), '(self.naxis3)\n', (29771, 29784), True, 'import numpy as np\n'), ((30384, 30406), 'numpy.arange', 'np.arange', (['self.naxis3'], {}), '(self.naxis3)\n', (30393, 30406), True, 'import numpy as np\n'), ((30801, 30823), 'numpy.arange', 'np.arange', (['self.naxis3'], {}), '(self.naxis3)\n', (30810, 30823), True, 'import numpy as np\n'), ((31924, 31946), 'numpy.arange', 'np.arange', (['self.naxis3'], {}), '(self.naxis3)\n', (31933, 31946), True, 'import numpy as np\n'), ((33836, 33857), 'numpy.arange', 'np.arange', (['self.n_out'], {}), '(self.n_out)\n', (33845, 33857), True, 'import numpy as np\n'), ((36308, 36329), 'numpy.arange', 'np.arange', (['self.n_out'], {}), '(self.n_out)\n', (36317, 36329), True, 'import numpy as np\n'), ((37790, 37838), 'scipy.ndimage.interpolation.zoom', 'zoom', (['gamma', 'zoom_factor'], {'order': '(1)', 'mode': '"""mirror"""'}), "(gamma, zoom_factor, order=1, mode='mirror')\n", (37794, 37838), False, 'from scipy.ndimage.interpolation import zoom\n'), ((37859, 37904), 'numpy.reshape', 'np.reshape', (['gamma', '(self.naxis3, self.naxis2)'], {}), '(gamma, (self.naxis3, self.naxis2))\n', (37869, 37904), True, 'import numpy as np\n'), ((37925, 37947), 'numpy.arange', 'np.arange', (['self.naxis3'], {}), '(self.naxis3)\n', (37934, 37947), True, 'import numpy as np\n'), ((38890, 38926), 'numpy.zeros', 'np.zeros', (['[self.naxis2, self.naxis1]'], {}), '([self.naxis2, self.naxis1])\n', (38898, 38926), True, 'import numpy as np\n'), ((38949, 38971), 'numpy.arange', 'np.arange', (['self.naxis3'], {}), '(self.naxis3)\n', (38958, 38971), True, 'import numpy as np\n'), ((7239, 7253), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (7251, 7253), True, 'import multiprocessing as mp\n'), ((9351, 9364), 'os.sys.exit', 'os.sys.exit', ([], {}), '()\n', (9362, 9364), False, 'import os\n'), ((9528, 9541), 'os.sys.exit', 'os.sys.exit', ([], {}), '()\n', (9539, 9541), False, 'import os\n'), ((9922, 9952), 'os.path.isfile', 'os.path.isfile', (['self.bias_file'], {}), '(self.bias_file)\n', (9936, 9952), False, 'import os\n'), ((10678, 10708), 'os.path.isfile', 'os.path.isfile', (['self.dark_file'], {}), '(self.dark_file)\n', (10692, 10708), False, 'import os\n'), ((14046, 14076), 'numpy.zeros', 'np.zeros', (['[det_size, det_size]'], {}), '([det_size, det_size])\n', (14054, 14076), True, 'import numpy as np\n'), ((16663, 16678), 'astropy.stats.funcs.median_absolute_deviation', 'mad', (['dark_image'], {}), '(dark_image)\n', (16666, 16678), True, 'from astropy.stats.funcs import median_absolute_deviation as mad\n'), ((27205, 27222), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (27220, 27222), True, 'import numpy as np\n'), ((27447, 27500), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(self.naxis2, self.naxis1)'], {}), '((self.naxis2, self.naxis1))\n', (27472, 27500), True, 'import numpy as np\n'), ((27845, 27858), 'os.sys.exit', 'os.sys.exit', ([], {}), '()\n', (27856, 27858), False, 'import os\n'), ((28475, 28488), 'os.sys.exit', 'os.sys.exit', ([], {}), '()\n', (28486, 28488), False, 'import os\n'), ((28587, 28606), 'numpy.ones', 'np.ones', (['self.n_out'], {}), '(self.n_out)\n', (28594, 28606), True, 'import numpy as np\n'), ((29130, 29143), 'os.sys.exit', 'os.sys.exit', ([], {}), '()\n', (29141, 29143), False, 'import os\n'), ((29242, 29261), 'numpy.ones', 'np.ones', (['self.n_out'], {}), '(self.n_out)\n', (29249, 29261), True, 'import numpy as np\n'), ((30035, 30065), 'numpy.asarray', 'np.asarray', (['self.ref_f2f_ucorr'], {}), '(self.ref_f2f_ucorr)\n', (30045, 30065), True, 'import numpy as np\n'), ((31475, 31500), 'numpy.asarray', 'np.asarray', (['self.rd_noise'], {}), '(self.rd_noise)\n', (31485, 31500), True, 'import numpy as np\n'), ((31971, 32007), 'numpy.zeros', 'np.zeros', (['(self.naxis2, self.naxis1)'], {}), '((self.naxis2, self.naxis1))\n', (31979, 32007), True, 'import numpy as np\n'), ((32117, 32138), 'numpy.arange', 'np.arange', (['self.n_out'], {}), '(self.n_out)\n', (32126, 32138), True, 'import numpy as np\n'), ((33676, 33754), 'numpy.reshape', 'np.reshape', (['tt', '(self.naxis3, self.naxis2 + self.nfoh, self.xsize + self.nroh)'], {}), '(tt, (self.naxis3, self.naxis2 + self.nfoh, self.xsize + self.nroh))\n', (33686, 33754), True, 'import numpy as np\n'), ((34906, 34929), 'numpy.asarray', 'np.asarray', (['self.u_pink'], {}), '(self.u_pink)\n', (34916, 34929), True, 'import numpy as np\n'), ((35645, 35666), 'numpy.arange', 'np.arange', (['self.n_out'], {}), '(self.n_out)\n', (35654, 35666), True, 'import numpy as np\n'), ((37755, 37769), 'numpy.size', 'np.size', (['gamma'], {}), '(gamma)\n', (37762, 37769), True, 'import numpy as np\n'), ((37974, 37996), 'numpy.arange', 'np.arange', (['self.naxis2'], {}), '(self.naxis2)\n', (37983, 37996), True, 'import numpy as np\n'), ((39002, 39042), 'numpy.random.poisson', 'np.random.poisson', (['dark_frame'], {'size': 'None'}), '(dark_frame, size=None)\n', (39019, 39042), True, 'import numpy as np\n'), ((11777, 11796), 'numpy.log2', 'np.log2', (['self.nstep'], {}), '(self.nstep)\n', (11784, 11796), True, 'import numpy as np\n'), ((29839, 29856), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (29854, 29856), True, 'import numpy as np\n'), ((30221, 30234), 'os.sys.exit', 'os.sys.exit', ([], {}), '()\n', (30232, 30234), False, 'import os\n'), ((30322, 30341), 'numpy.ones', 'np.ones', (['self.n_out'], {}), '(self.n_out)\n', (30329, 30341), True, 'import numpy as np\n'), ((31651, 31664), 'os.sys.exit', 'os.sys.exit', ([], {}), '()\n', (31662, 31664), False, 'import os\n'), ((31761, 31780), 'numpy.ones', 'np.ones', (['self.n_out'], {}), '(self.n_out)\n', (31768, 31780), True, 'import numpy as np\n'), ((32524, 32546), 'numpy.mean', 'np.mean', (['self.rd_noise'], {}), '(self.rd_noise)\n', (32531, 32546), True, 'import numpy as np\n'), ((34234, 34247), 'numpy.mod', 'np.mod', (['op', '(2)'], {}), '(op, 2)\n', (34240, 34247), True, 'import numpy as np\n'), ((35078, 35091), 'os.sys.exit', 'os.sys.exit', ([], {}), '()\n', (35089, 35091), False, 'import os\n'), ((35186, 35205), 'numpy.ones', 'np.ones', (['self.n_out'], {}), '(self.n_out)\n', (35193, 35205), True, 'import numpy as np\n'), ((36797, 36824), 'numpy.where', 'np.where', (['(self.m_short == 1)'], {}), '(self.m_short == 1)\n', (36805, 36824), True, 'import numpy as np\n'), ((36848, 36875), 'numpy.where', 'np.where', (['(self.m_short == 1)'], {}), '(self.m_short == 1)\n', (36856, 36875), True, 'import numpy as np\n'), ((17682, 17705), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (17703, 17705), False, 'import datetime\n'), ((19457, 19471), 'numpy.sum', 'np.sum', (['(w ** 2)'], {}), '(w ** 2)\n', (19463, 19471), True, 'import numpy as np\n'), ((30531, 30548), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (30546, 30548), True, 'import numpy as np\n'), ((32278, 32330), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(self.naxis2, self.xsize)'], {}), '((self.naxis2, self.xsize))\n', (32303, 32330), True, 'import numpy as np\n'), ((32629, 32675), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(w[0], self.naxis1)'], {}), '((w[0], self.naxis1))\n', (32654, 32675), True, 'import numpy as np\n'), ((32758, 32804), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(w[1], self.naxis1)'], {}), '((w[1], self.naxis1))\n', (32783, 32804), True, 'import numpy as np\n'), ((32885, 32931), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(self.naxis2, w[2])'], {}), '((self.naxis2, w[2]))\n', (32910, 32931), True, 'import numpy as np\n'), ((33014, 33060), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(self.naxis2, w[3])'], {}), '((self.naxis2, w[3]))\n', (33039, 33060), True, 'import numpy as np\n'), ((35842, 35920), 'numpy.reshape', 'np.reshape', (['tt', '(self.naxis3, self.naxis2 + self.nfoh, self.xsize + self.nroh)'], {}), '(tt, (self.naxis3, self.naxis2 + self.nfoh, self.xsize + self.nroh))\n', (35852, 35920), True, 'import numpy as np\n'), ((37036, 37053), 'numpy.vstack', 'np.vstack', (['(a, b)'], {}), '((a, b))\n', (37045, 37053), True, 'import numpy as np\n')] |
from train import Agent, utils as U
import gym
import numpy as np
import matplotlib.pyplot as plt
import nn
class PPO(Agent):
def __init__(self,
policy,
critic,
epsilon=.1,
T=16,
epochs=4,
batch_size=None,
optimizer=None,
transitions=-1,
**kwargs):
super(PPO, self).__init__(transitions=transitions, **kwargs)
self.policy = policy
self.old = nn.models.clone_model(policy)
self.old.trainable = False
self.critic = critic
self.epsilon = epsilon
self.T = T
self.epochs = epochs
self.batch_size = batch_size or T // 4
self.optimizer = optimizer or nn.Adam()
def act(self, state):
probs = self.policy(state[None])[0].numpy()
action = np.random.choice(len(probs), p=probs)
return action
def on_step_end(self):
if not self.old.built:
S = self.transitions.sample(1).state
self.old(S) # initialize weights
self.update_old()
if len(self.transitions) == self.T:
self._learn()
self.update_old()
def update_old(self):
self.old.set_weights(self.policy.get_weights())
def _learn(self):
data = self.transitions.get()
self.transitions.reset()
self.learn(data)
def learn(self, data):
S, A, R, Snext, dones = data
S, A = nn.tensors((S, A))
A = A.reshape([-1, 1])
T = S.shape[0]
batch_shape = (T, )
gamma, lambd = self.gamma, self.lambd
policy, old, critic = self.policy, self.old, self.critic
old_probs = old(S).detach().gather(A, batch_dims=1).flatten()
U.check_shape(old_probs, batch_shape)
targets, deltas = self.compute_td_zero(data,
V=lambda x: critic(x).detach())
advantages = self.compute_gae(deltas=deltas.numpy(), dones=dones)
advantages = nn.tensor(advantages)
indices = np.arange(T)
for _ in range(self.epochs):
np.random.shuffle(indices)
for batch in np.array_split(indices, self.batch_size):
batch = nn.tensor(batch)
self.optimize(
(S.gather(batch), A.gather(batch), old_probs.gather(batch),
advantages.gather(batch), targets.gather(batch)))
def optimize(self, batch):
S, A, old_probs, advantages, targets = batch
batch_size = S.shape[0]
batch_shape = (batch_size, )
epsilon, policy, critic = self.epsilon, self.policy, self.critic
with nn.GradientTape() as tape:
# Policy Objective
probs = policy(S).gather(A, batch_dims=1).flatten()
U.check_shape(probs, batch_shape)
ratios = probs / old_probs
Lcpi = ratios * advantages
Lclip = ratios.clip(1 - epsilon, 1 + epsilon) * advantages
policy_objective = Lcpi.minimum(Lclip)
U.check_shape(policy_objective, batch_shape)
policy_objective = policy_objective.mean()
U.check_shape(policy_objective, ())
# Critic Loss
V = critic(S).flatten()
U.check_shape(V, batch_shape)
critic_loss = (targets - V).pow(2).mean()
U.check_shape(critic_loss, ())
# Total Loss
loss = -policy_objective + critic_loss
grads = tape.gradient(loss, self.parameters)
self.optimizer.apply_gradients(zip(grads, self.parameters))
@property
def parameters(self):
if self._parameters is None:
params = self.policy.trainable_variables + self.critic.trainable_variables
params = U.unique(params)
self._parameters = params
return self._parameters
def main():
env = gym.make('CartPole-v0')
n_actions = env.action_space.n
hidden = 16
policy = nn.Sequential([
nn.Dense(hidden, activation='relu'),
nn.Dense(n_actions, activation='softmax'),
])
critic = nn.Sequential([
nn.Dense(hidden, activation='relu'),
nn.Dense(1),
])
agent = PPO(policy=policy, critic=critic, env=env)
scores = agent.train(episodes=200)
plt.plot(scores)
plt.show()
if __name__ == '__main__':
main()
| [
"train.utils.unique",
"matplotlib.pyplot.show",
"gym.make",
"matplotlib.pyplot.plot",
"nn.Dense",
"train.utils.check_shape",
"nn.tensors",
"nn.Adam",
"nn.GradientTape",
"numpy.arange",
"nn.models.clone_model",
"nn.tensor",
"numpy.array_split",
"numpy.random.shuffle"
] | [((3956, 3979), 'gym.make', 'gym.make', (['"""CartPole-v0"""'], {}), "('CartPole-v0')\n", (3964, 3979), False, 'import gym\n'), ((4363, 4379), 'matplotlib.pyplot.plot', 'plt.plot', (['scores'], {}), '(scores)\n', (4371, 4379), True, 'import matplotlib.pyplot as plt\n'), ((4384, 4394), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4392, 4394), True, 'import matplotlib.pyplot as plt\n'), ((526, 555), 'nn.models.clone_model', 'nn.models.clone_model', (['policy'], {}), '(policy)\n', (547, 555), False, 'import nn\n'), ((1516, 1534), 'nn.tensors', 'nn.tensors', (['(S, A)'], {}), '((S, A))\n', (1526, 1534), False, 'import nn\n'), ((1807, 1844), 'train.utils.check_shape', 'U.check_shape', (['old_probs', 'batch_shape'], {}), '(old_probs, batch_shape)\n', (1820, 1844), True, 'from train import Agent, utils as U\n'), ((2073, 2094), 'nn.tensor', 'nn.tensor', (['advantages'], {}), '(advantages)\n', (2082, 2094), False, 'import nn\n'), ((2114, 2126), 'numpy.arange', 'np.arange', (['T'], {}), '(T)\n', (2123, 2126), True, 'import numpy as np\n'), ((784, 793), 'nn.Adam', 'nn.Adam', ([], {}), '()\n', (791, 793), False, 'import nn\n'), ((2176, 2202), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (2193, 2202), True, 'import numpy as np\n'), ((2228, 2268), 'numpy.array_split', 'np.array_split', (['indices', 'self.batch_size'], {}), '(indices, self.batch_size)\n', (2242, 2268), True, 'import numpy as np\n'), ((2733, 2750), 'nn.GradientTape', 'nn.GradientTape', ([], {}), '()\n', (2748, 2750), False, 'import nn\n'), ((2867, 2900), 'train.utils.check_shape', 'U.check_shape', (['probs', 'batch_shape'], {}), '(probs, batch_shape)\n', (2880, 2900), True, 'from train import Agent, utils as U\n'), ((3113, 3157), 'train.utils.check_shape', 'U.check_shape', (['policy_objective', 'batch_shape'], {}), '(policy_objective, batch_shape)\n', (3126, 3157), True, 'from train import Agent, utils as U\n'), ((3225, 3260), 'train.utils.check_shape', 'U.check_shape', (['policy_objective', '()'], {}), '(policy_objective, ())\n', (3238, 3260), True, 'from train import Agent, utils as U\n'), ((3335, 3364), 'train.utils.check_shape', 'U.check_shape', (['V', 'batch_shape'], {}), '(V, batch_shape)\n', (3348, 3364), True, 'from train import Agent, utils as U\n'), ((3431, 3461), 'train.utils.check_shape', 'U.check_shape', (['critic_loss', '()'], {}), '(critic_loss, ())\n', (3444, 3461), True, 'from train import Agent, utils as U\n'), ((3845, 3861), 'train.utils.unique', 'U.unique', (['params'], {}), '(params)\n', (3853, 3861), True, 'from train import Agent, utils as U\n'), ((4068, 4103), 'nn.Dense', 'nn.Dense', (['hidden'], {'activation': '"""relu"""'}), "(hidden, activation='relu')\n", (4076, 4103), False, 'import nn\n'), ((4113, 4154), 'nn.Dense', 'nn.Dense', (['n_actions'], {'activation': '"""softmax"""'}), "(n_actions, activation='softmax')\n", (4121, 4154), False, 'import nn\n'), ((4200, 4235), 'nn.Dense', 'nn.Dense', (['hidden'], {'activation': '"""relu"""'}), "(hidden, activation='relu')\n", (4208, 4235), False, 'import nn\n'), ((4245, 4256), 'nn.Dense', 'nn.Dense', (['(1)'], {}), '(1)\n', (4253, 4256), False, 'import nn\n'), ((2294, 2310), 'nn.tensor', 'nn.tensor', (['batch'], {}), '(batch)\n', (2303, 2310), False, 'import nn\n')] |
import face_recognition
import cv2
import os
import numpy as np
import time
import datetime
video_capture = cv2.VideoCapture(0)
ummu_image = face_recognition.load_image_file("/Users/alisher/Desktop/known_face/Ummu.jpg")
ummu_face_encoding = face_recognition.face_encodings(ummu_image)[0]
merve_image = face_recognition.load_image_file("/Users/alisher/Desktop/known_face/Merve.jpg")
merve_face_encoding = face_recognition.face_encodings(merve_image)[0]
known_face_encodings = [ummu_face_encoding, merve_face_encoding]
known_face_names = ["Ummu", "Merve"]
unknown_face_dir = "/Users/alisher/Desktop/unknown_faces/"
if not os.path.exists(unknown_face_dir):
os.makedirs(unknown_face_dir)
while True:
ret, frame = video_capture.read()
rgb_frame = frame[:, :, ::-1]
face_locations = face_recognition.face_locations(rgb_frame)
face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
if len(face_encodings) == 0:
pass
else:
names = ["Unknown"] * len(face_encodings)
for i, ((top, right, bottom, left), face_encoding) in enumerate(zip(face_locations, face_encodings)):
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
names[i] = known_face_names[best_match_index]
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
if all(name == 'Unknown' for name in names):
for (top, right, bottom, left) in face_locations:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')
unknown_face = frame[top:bottom, left:right]
cv2.imwrite(unknown_face_dir + st + '.jpg', unknown_face)
os.system("pmset sleepnow")
exit()
for (top, right, bottom, left), name in zip(face_locations, names):
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
| [
"face_recognition.face_distance",
"os.makedirs",
"face_recognition.compare_faces",
"cv2.putText",
"cv2.waitKey",
"cv2.imwrite",
"face_recognition.face_encodings",
"face_recognition.load_image_file",
"os.path.exists",
"cv2.imshow",
"numpy.argmin",
"os.system",
"cv2.VideoCapture",
"time.time... | [((109, 128), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (125, 128), False, 'import cv2\n'), ((143, 221), 'face_recognition.load_image_file', 'face_recognition.load_image_file', (['"""/Users/alisher/Desktop/known_face/Ummu.jpg"""'], {}), "('/Users/alisher/Desktop/known_face/Ummu.jpg')\n", (175, 221), False, 'import face_recognition\n'), ((305, 384), 'face_recognition.load_image_file', 'face_recognition.load_image_file', (['"""/Users/alisher/Desktop/known_face/Merve.jpg"""'], {}), "('/Users/alisher/Desktop/known_face/Merve.jpg')\n", (337, 384), False, 'import face_recognition\n'), ((2524, 2547), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2545, 2547), False, 'import cv2\n'), ((243, 286), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['ummu_image'], {}), '(ummu_image)\n', (274, 286), False, 'import face_recognition\n'), ((407, 451), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['merve_image'], {}), '(merve_image)\n', (438, 451), False, 'import face_recognition\n'), ((625, 657), 'os.path.exists', 'os.path.exists', (['unknown_face_dir'], {}), '(unknown_face_dir)\n', (639, 657), False, 'import os\n'), ((663, 692), 'os.makedirs', 'os.makedirs', (['unknown_face_dir'], {}), '(unknown_face_dir)\n', (674, 692), False, 'import os\n'), ((800, 842), 'face_recognition.face_locations', 'face_recognition.face_locations', (['rgb_frame'], {}), '(rgb_frame)\n', (831, 842), False, 'import face_recognition\n'), ((864, 922), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['rgb_frame', 'face_locations'], {}), '(rgb_frame, face_locations)\n', (895, 922), False, 'import face_recognition\n'), ((2345, 2371), 'cv2.imshow', 'cv2.imshow', (['"""Video"""', 'frame'], {}), "('Video', frame)\n", (2355, 2371), False, 'import cv2\n'), ((1162, 1229), 'face_recognition.compare_faces', 'face_recognition.compare_faces', (['known_face_encodings', 'face_encoding'], {}), '(known_face_encodings, face_encoding)\n', (1192, 1229), False, 'import face_recognition\n'), ((1259, 1326), 'face_recognition.face_distance', 'face_recognition.face_distance', (['known_face_encodings', 'face_encoding'], {}), '(known_face_encodings, face_encoding)\n', (1289, 1326), False, 'import face_recognition\n'), ((1358, 1383), 'numpy.argmin', 'np.argmin', (['face_distances'], {}), '(face_distances)\n', (1367, 1383), True, 'import numpy as np\n'), ((1502, 1568), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left, top)', '(right, bottom)', '(0, 0, 255)', '(2)'], {}), '(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n', (1515, 1568), False, 'import cv2\n'), ((1951, 1978), 'os.system', 'os.system', (['"""pmset sleepnow"""'], {}), "('pmset sleepnow')\n", (1960, 1978), False, 'import os\n'), ((2087, 2175), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(left, bottom - 35)', '(right, bottom)', '(0, 0, 255)', 'cv2.FILLED'], {}), '(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2\n .FILLED)\n', (2100, 2175), False, 'import cv2\n'), ((2226, 2305), 'cv2.putText', 'cv2.putText', (['frame', 'name', '(left + 6, bottom - 6)', 'font', '(1.0)', '(255, 255, 255)', '(1)'], {}), '(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n', (2237, 2305), False, 'import cv2\n'), ((2419, 2433), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2430, 2433), False, 'import cv2\n'), ((1705, 1716), 'time.time', 'time.time', ([], {}), '()\n', (1714, 1716), False, 'import time\n'), ((1881, 1938), 'cv2.imwrite', 'cv2.imwrite', (["(unknown_face_dir + st + '.jpg')", 'unknown_face'], {}), "(unknown_face_dir + st + '.jpg', unknown_face)\n", (1892, 1938), False, 'import cv2\n'), ((1738, 1773), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (1769, 1773), False, 'import datetime\n')] |
import numpy as np
from copy import deepcopy
def default_zero(dtype):
"""
For a given dtype, return the zero value (which will indicate the absence of
an edge). Raises an error on unknown datatypes.
Inputs:
dtype (class): Datatype of property or array.
Outputs:
def_zero (dtype): Default zero value of given dtype.
"""
if dtype == np.bool:
return False
elif np.issubdtype(dtype, np.number):
return 0
elif np.issubdtype(dtype, np.str_):
# Empty string!
return ""
else:
raise ValueError(f"unknown dtype {dtype}")
def default_one(dtype):
"""
For a given dtype, return the one value (which will indicate the presence of
an edge). Raises an error on unknown datatypes.
Inputs:
dtype (class): Datatype of property or array.
Outputs:
def_one (dtype): Default one value of given dtype.
"""
if dtype == np.bool:
return True
elif np.issubdtype(dtype, np.number):
return 1
else:
# Fails on strings
raise ValueError(f"unknown dtype {dtype}")
class EdgeProxy:
"""
EdgeProxy is a way of accessing an edge property in a graph safely.
EdgeProxy enforces rules onto how and when edge properties are assigned.
"""
def __init__(self, g, prop):
"""
Initialize a new EdgeProxy instance.
Inputs:
g (TinyGraph): The graph which EdgeProxy is accessing a property of.
EdgeProxy will alter one property in g.e_p.
property (str): The name of the property to access.
Outputs:
ep (EdgeProxy): new EdgeProxy object.
"""
self.__g = g
self.__prop = prop
self.__dtype = self.__g.e_p[self.__prop].dtype
@property
def dtype(self):
return self.__dtype
def __setitem__(self, key, value):
"""
Set an edge's property.
Inputs:
key ((int, int)): Endpoints of edge to set the property of.
value (dtype): Value to set edge property to.
Outputs:
None
"""
if key.__class__ != tuple:
raise KeyError("Expecting exactly two endpoints.")
elif len(key) != 2:
raise KeyError("Expecting exactly two endpoints.")
else:
e1, e2 = key
if self.__g[e1, e2] == default_zero(self.dtype):
raise IndexError("No such edge.")
else:
self.__g.e_p[self.__prop][e1, e2] = value
self.__g.e_p[self.__prop][e2, e1] = value
def __getitem__(self, key):
"""
Get an edge's property.
Inputs:
key ((int, int)): Endpoints of edge to get the property of.
Outputs:
value (dtype): Value of edge property.
"""
if key.__class__ != tuple:
raise KeyError("Expecting exactly two endpoints.")
elif len(key) != 2:
raise KeyError("Expecting exactly two endpoints.")
else:
e1, e2 = key
if self.__g[e1, e2] == default_zero(self.dtype):
raise IndexError("No such edge.")
else:
return self.__g.e_p[self.__prop][e1, e2]
class EdgeProxyGenerator:
"""
EdgeProxyGenerator is the go between for TinyGraph and EdgeProxy. TinyGraph
gives its users EdgeProxyGenerators that then give them access to edges
through a generator EdgeProxy
"""
def __init__(self, g):
"""
Create a new Generator.
Inputs:
g (TinyGraph): The graph that this generator is linked to.
Outputs:
epg (EdgeProxyGenerator): New generator
"""
self.__g = g
def keys(self):
return self.__g.e_p.keys()
def items(self):
return self.__g.e_p.items()
def __len__(self):
return len(self.__g.e_p)
def __contains__(self, key):
return key in self.__g.e_p
def __getitem__(self, key):
"""
Generates an EdgeProxy object for a user to access an edge property.
Inputs:
key (str): The property name to access
Outputs:
ep (EdgeProxy): An EdgeProxy object with access to the desired
property and the current graph.
"""
return EdgeProxy(self.__g, key)
class TinyGraph:
"""
TinyGraph is centered around our representation of graphs through numpy
arrays using the class TinyGraph. The central feature is the adjacency
matrix, which defines the graph stucture under the assumption that we are
using undirected, weighted graphs without self-loops. Each graph also has a
set of vertex properties and a set of edge properties. We will also use
numpy arrays to store the properties at each vertex or edge.
"""
def __init__(self, vert_N, adj_type=np.float32, vp_types={}, ep_types={}):
"""
Initalize a new TinyGraph instance.
Inputs:
vert_N (int): Number of vertices in the graph. Adding and removing
vertices is much slower than adding or removing edges - setting
this value accurately initially can improve efficiency.
adj_type (numpy type): The type of the edge weights.
vp_types (str:numpy type): A map from vertex property names to
the types of each property.
ep_types (str:numpy type): A map from edge property names to
the types of each property.
Outputs:
tg (TinyGraph): new TinyGraph instance.
"""
self.__vert_N = vert_N
self.adjacency = np.zeros((vert_N, vert_N), dtype = adj_type)
self.v = {}
self.e_p = {}
self.e = EdgeProxyGenerator(self)
for k, dt in vp_types.items():
self.add_vert_prop(k, dt)
for k, dt in ep_types.items():
self.add_edge_prop(k, dt)
self.props = {}
@property
def vert_N(self):
return self.__vert_N
@property
def edge_N(self):
e = np.count_nonzero(self.adjacency)
if e%2 != 0:
raise Exception("Adjacency matrix has become asymmetric - number of\
edges ambiguous")
else:
return e//2
def add_vert_prop(self, name, dtype):
"""
Add the vertex property named 'name' to the graph.
Inputs:
name (str): property name
dtype (class): numpy dtype of property
Outputs:
None
"""
if name in self.v:
raise KeyError(f"Graph already has vertex property named {name}")
self.v[name] = np.zeros(self.__vert_N, dtype=dtype)
def add_edge_prop(self, name, dtype):
"""
Add the edge property named 'name' to the graph.
Inputs:
name (str): property name
dtype (class): numpy dtype of property
Outputs:
None
"""
if name in self.e_p:
raise KeyError(f"Graph already has edge property named {name}")
self.e_p[name] = np.zeros((self.__vert_N, self.__vert_N), dtype=dtype)
def remove_vert_prop(self, name):
"""
Removes the indicated vertex property from the graph
Inputs:
name (str): the name of the property
Outputs:
None
"""
del self.v[name]
def remove_edge_prop(self, name):
"""
Removes the indicated edge property from the graph
Inputs:
name (str): the name of the property
Outputs:
None
"""
del self.e_p[name]
def add_vertex(self, props = {}, **kwargs):
"""
Add a vertex to a TinyGraph instance. This process can be slow because
it requires reshaping the adjacency and property arrays.
The new vertex will have the highest index (vert_N - 1).
Inputs:
properties are passed as key=value pairs or as a props dictionary
If a key is not recognized, it will raise an error
If a key is missing, the corresponding value will be left as 0
for whatever the corresponding dtype is
Outputs:
None - modifications are made in place.
"""
self.adjacency = np.insert(self.adjacency, self.__vert_N, 0, axis=0)
self.adjacency = np.insert(self.adjacency, self.__vert_N, 0, axis=1)
combined_props = {**props, **kwargs}
# New vertex property arrays
for key in self.v.keys():
# Can resize because it's flat
self.v[key].resize(self.__vert_N+1)
# Grab the argument value
if key in combined_props.keys():
self.v[key][self.__vert_N] = props[key]
# Reshape edge property arrays
for key in self.e_p.keys():
self.e_p[key] = np.insert(self.e_p[key], self.__vert_N, 0, axis=0)
self.e_p[key] = np.insert(self.e_p[key], self.__vert_N, 0, axis=1)
# Update the vertex count
self.__vert_N += 1
def remove_vertex(self, n):
"""
Remove a vertex from a TinyGraph instance. This process can be slow
because it requires reshaping the adjacency and property arrays.
Moves up the vertices after n so that the numbering remains dense.
Inputs:
n (int): Vertex to remove. Vertices are indexed numerically
(0...vert_N-1).
Outputs:
None - modifications are made in place.
"""
# First update adjacency matrix
self.adjacency = np.delete(self.adjacency, n, axis=0)
self.adjacency = np.delete(self.adjacency, n, axis=1)
# Trim the vertex property arrays
for key in self.v.keys():
self.v[key] = np.delete(self.v[key], n)
# Trim the edge property arrays
for key in self.e_p.keys():
self.e_p[key] = np.delete(self.e_p[key], n, axis = 0)
self.e_p[key] = np.delete(self.e_p[key], n, axis = 1)
# Update the vertex count
self.__vert_N -= 1
def __setitem__(self, key, newValue):
"""
Create an edge or change the weight of an existing edge. This operation
is fast. Edges are undirected. If an existing edge is set to its zero
value, it is removed, setting all of its property values to their zeros.
Inputs:
key (int, int): Endpoint vertices of edge.
newValue (adj_type): Weight of edge.
Outputs:
None - modifications are made in place.
"""
if key.__class__ != tuple:
raise KeyError("Expecting exactly two endpoints.")
elif len(key) != 2:
raise KeyError("Expecting exactly two endpoints.")
e1, e2 = key
if e1 == e2:
raise IndexError("Self-loops are not allowed.")
self.adjacency[e1, e2] = newValue
self.adjacency[e2, e1] = newValue
if newValue == default_zero(self.adjacency.dtype):
for k, prop in self.e_p.items():
self.e_p[k][e1, e2] = default_zero(prop.dtype)
self.e_p[k][e2, e1] = default_zero(prop.dtype)
def __getitem__(self, key):
"""
Get the weight of an edge. This operation is fast.
Inputs:
key (int, int): Endpoint vertices of edge.
Outputs:
weight (adj_type): Weight of edge, or None (0?) if no edge exists.
"""
if key.__class__ != tuple:
raise KeyError("Expecting exactly two endpoints.")
elif len(key) != 2:
raise KeyError("Expecting exactly two endpoints.")
return self.adjacency[key[0]][key[1]]
def copy(self):
"""
Get a copy of the TinyGraph instance.
Inputs:
None
Outputs:
new_graph (TinyGraph): Deep copy of TinyGraph instance.
"""
v_p = {k : v.dtype for k, v in self.v.items()}
e_p = {k : e.dtype for k, e in self.e_p.items()}
new_graph = TinyGraph(self.__vert_N, self.adjacency.dtype, v_p, e_p)
new_graph.adjacency[:] = self.adjacency
# Set vertex properties
for key, arr in self.v.items():
new_graph.v[key][:] = self.v[key]
# Set edge properties
for key, arr in self.e.items():
new_graph.e_p[key][:] = self.e_p[key]
for k, v in self.props.items():
new_graph.props[k] = deepcopy(v)
return new_graph
def get_vert_props(self, n, vert_props = None):
"""
Get the properties at a given vertex.
Inputs:
n (int): Vertex to get properties of.
vert_props ([str]): A list of the vertex properties to return, by
name.
Outputs:
props (str:prop_type): A dictionary mapping each of the vertex
property names to the property at the input vertex.
"""
if vert_props is None:
vert_props = self.v.keys()
props = {}
for key in vert_props:
props[key] = self.v[key][n]
return props
def get_edge_props(self, n1, n2, edge_props = None):
"""
Get the properties at a given edge.
Inputs:
n1 (int): Endpoint vertex 1 of edge to get properties of.
n2 (int): Endpoint vertex 2 of edge to get properties of.
edge_props ([str]): A list of the edge properties to return, by
name.
Outputs:
props (str:prop_type): A dictionary mapping each of the edge
property names to the property at the input edge.
"""
if edge_props is None:
edge_props = self.e_p.keys()
props = {}
for key in edge_props:
props[key] = self.e_p[key][n1,n2]
return props
def __repr__(self):
"""
Printable representation of a graph.
Inputs:
None
Outputs:
rep (str): TinyGraph Representation.
"""
rep = "TinyGraph dtype=" + str(self.adjacency.dtype) + ", vert_N=" + \
str(self.vert_N) + ", edge_N=" + str(self.edge_N) + "\n"
return rep
def print_full_graph(self):
"""
Full representation of a graph. Includes all global, vertex and edge
properties.
Inputs:
None
Outputs:
None - prints representation.
"""
rep = "Global Properties:\n"
for name, prop in self.props.items():
rep += str(name) + ": " + str(prop) + "\n"
rep += "\nVertices:\n"
for i, props in self.vertices(vert_props = self.v.keys()):
rep += str(i) + ": " + str(props) + "\n"
rep += "\nEdges:\n"
for i,j,w,props in self.edges(weight = True,edge_props = self.e.keys()):
rep += "(" + str(i) + ", " + str(j) + "): Weight - " + str(w) +\
", Props - " + str(props) + "\n"
print(rep[:-1]) # strip last newline
def get_neighbors(self, n):
"""
Get the neighbors of a vertex.
Inputs:
n (int): The vertex to get the neighbors of.
Outputs:
neighbors ([int]): A list of the neighbor vertices.
"""
neighbors = np.argwhere(self.adjacency[n] != \
default_zero(self.adjacency.dtype)).flatten()
# for i, w in enumerate(self.adjacency[n]):
# if not i == n and not w == 0:
# neighbors.append(i)
return neighbors
def edges(self, weight = False, edge_props = None):
"""
Get a list of the edges by endpoint vertices, optionally with their
weight and some properties.
Inputs:
weight (bool): Whether to return the weight of each edge. By default
this if false and the weight is not returned.
edge_props ([str]): A list of edge properties to return, by name.
By default this is empty and no properties are returned. Must be
a list of existing properties.
Outputs:
edges ([edge]): A list of edges, where each edge is represented by a
tuple. The first two elements of the tuple are the endpoints of
the edge. If weights is true, the third element is the weight of
the edge. If edge_props is not empty, a dictionary mapping the
properties provided to the value for the edge is the final
element of the tuple.
"""
edges = []
for i, j in np.argwhere(self.adjacency !=
default_zero(self.adjacency.dtype)):
if i < j:
e = (i, j)
if weight:
e += (self[i,j],)
if not edge_props is None:
d = self.get_edge_props(i,j,edge_props)
e += (d,)
edges.append(e)
return edges
def vertices(self, vert_props = []):
"""
Get a list of the vertices with some of their properties.
Inputs:
vert_props ([str]): A list of vertex properties to return, by name.
By default this is empty and an empty map is returned. Must be
a list of existing properties.
Outputs:
vertices ([vertex]): A list of vertices, where each vertex is
represented by a tuple. The first element of the tuple is the
vertex index. The second element is a map from the provided
vertex properties to the values at the vertex. Even when no
properties are provided, a map is returned, since the list of
vertices is simply 0...N-1, which can be retrieved more
efficiently in other ways.
"""
vertices = []
for i in range(self.__vert_N):
n = (i, self.get_vert_props(i,vert_props),)
vertices.append(n)
return vertices
| [
"copy.deepcopy",
"numpy.count_nonzero",
"numpy.zeros",
"numpy.insert",
"numpy.delete",
"numpy.issubdtype"
] | [((417, 448), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.number'], {}), '(dtype, np.number)\n', (430, 448), True, 'import numpy as np\n'), ((978, 1009), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.number'], {}), '(dtype, np.number)\n', (991, 1009), True, 'import numpy as np\n'), ((5716, 5758), 'numpy.zeros', 'np.zeros', (['(vert_N, vert_N)'], {'dtype': 'adj_type'}), '((vert_N, vert_N), dtype=adj_type)\n', (5724, 5758), True, 'import numpy as np\n'), ((6157, 6189), 'numpy.count_nonzero', 'np.count_nonzero', (['self.adjacency'], {}), '(self.adjacency)\n', (6173, 6189), True, 'import numpy as np\n'), ((6769, 6805), 'numpy.zeros', 'np.zeros', (['self.__vert_N'], {'dtype': 'dtype'}), '(self.__vert_N, dtype=dtype)\n', (6777, 6805), True, 'import numpy as np\n'), ((7228, 7281), 'numpy.zeros', 'np.zeros', (['(self.__vert_N, self.__vert_N)'], {'dtype': 'dtype'}), '((self.__vert_N, self.__vert_N), dtype=dtype)\n', (7236, 7281), True, 'import numpy as np\n'), ((8477, 8528), 'numpy.insert', 'np.insert', (['self.adjacency', 'self.__vert_N', '(0)'], {'axis': '(0)'}), '(self.adjacency, self.__vert_N, 0, axis=0)\n', (8486, 8528), True, 'import numpy as np\n'), ((8554, 8605), 'numpy.insert', 'np.insert', (['self.adjacency', 'self.__vert_N', '(0)'], {'axis': '(1)'}), '(self.adjacency, self.__vert_N, 0, axis=1)\n', (8563, 8605), True, 'import numpy as np\n'), ((9788, 9824), 'numpy.delete', 'np.delete', (['self.adjacency', 'n'], {'axis': '(0)'}), '(self.adjacency, n, axis=0)\n', (9797, 9824), True, 'import numpy as np\n'), ((9850, 9886), 'numpy.delete', 'np.delete', (['self.adjacency', 'n'], {'axis': '(1)'}), '(self.adjacency, n, axis=1)\n', (9859, 9886), True, 'import numpy as np\n'), ((476, 505), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.str_'], {}), '(dtype, np.str_)\n', (489, 505), True, 'import numpy as np\n'), ((9058, 9108), 'numpy.insert', 'np.insert', (['self.e_p[key]', 'self.__vert_N', '(0)'], {'axis': '(0)'}), '(self.e_p[key], self.__vert_N, 0, axis=0)\n', (9067, 9108), True, 'import numpy as np\n'), ((9137, 9187), 'numpy.insert', 'np.insert', (['self.e_p[key]', 'self.__vert_N', '(0)'], {'axis': '(1)'}), '(self.e_p[key], self.__vert_N, 0, axis=1)\n', (9146, 9187), True, 'import numpy as np\n'), ((9990, 10015), 'numpy.delete', 'np.delete', (['self.v[key]', 'n'], {}), '(self.v[key], n)\n', (9999, 10015), True, 'import numpy as np\n'), ((10121, 10156), 'numpy.delete', 'np.delete', (['self.e_p[key]', 'n'], {'axis': '(0)'}), '(self.e_p[key], n, axis=0)\n', (10130, 10156), True, 'import numpy as np\n'), ((10187, 10222), 'numpy.delete', 'np.delete', (['self.e_p[key]', 'n'], {'axis': '(1)'}), '(self.e_p[key], n, axis=1)\n', (10196, 10222), True, 'import numpy as np\n'), ((12673, 12684), 'copy.deepcopy', 'deepcopy', (['v'], {}), '(v)\n', (12681, 12684), False, 'from copy import deepcopy\n')] |
from decimal import Decimal
from binance.client import Client
from binance.enums import *
from binance import ThreadedWebsocketManager
import config as Config
from datetime import datetime
import numpy
import talib
class Trade:
RSI_PERIOD = 14
RSI_OVERSOLD = 30
RSI_OVERBOUGHT = 70
def __init__(self, twm: ThreadedWebsocketManager, client: Client) -> None:
self.twm = twm
self.client = client
self.closes = []
self.close = 0
self.buy_price = 0
self.last_rsi = 0
self.bail_out_at = 0.02
self.at_loss = False
self.BOUGHT = False
self.SOLD = True
self.minQty = 0
self.maxQty = 0
self.stepSize = 0
def get_first_set_of_closes(self) -> None:
for kline in self.client.get_historical_klines(Config.TRADESYMBOL, Client.KLINE_INTERVAL_1MINUTE, "1 hour ago UTC"):
self.closes.append(float(kline[4]))
def start(self) -> None:
self.get_first_set_of_closes()
self.twm.start()
self.twm.start_kline_socket(callback=self.handle_socket_message,
symbol=Config.TRADESYMBOL, interval=Client.KLINE_INTERVAL_1MINUTE)
def get_round_step_quantity(self, qty):
info = self.client.get_symbol_info(Config.TRADESYMBOL)
for x in info["filters"]:
if x["filterType"] == "LOT_SIZE":
self.minQty = float(x["minQty"])
self.maxQty = float(x["maxQty"])
self.stepSize = x["stepSize"]
if qty < self.minQty:
qty = self.minQty
return self.floor_step_size(qty)
def get_quantity(self, asset):
balance = self.get_balance(asset=asset)
quantity = self.get_round_step_quantity(float(balance))
return quantity
def floor_step_size(self, quantity):
step_size_dec = Decimal(str(self.stepSize))
return float(int(Decimal(str(quantity)) / step_size_dec) * step_size_dec)
def get_balance(self, asset) -> str:
balance = self.client.get_asset_balance(asset=asset)
return balance['free']
def buy(self):
self.client.order_market_buy(
symbol=Config.TRADESYMBOL,
quoteOrderQty=self.get_quantity(Config.QUOTE_ASSET))
def sell(self):
self.client.order_market_sell(
symbol=Config.TRADESYMBOL,
quantity=self.get_quantity(Config.BASE_ASSET))
def order(self, side: str) -> bool:
try:
if side == SIDE_BUY:
self.buy()
self.buy_price = self.close
self.at_loss = False
self.BOUGHT = True
self.SOLD = False
else:
self.sell()
self.SOLD = True
self.BOUGHT = False
except Exception as e:
print(
"Error placing order - price: {} - rsi: {}".format(self.close, self.last_rsi))
print(e)
return False
return True
def should_buy(self) -> bool:
if self.at_loss:
return False
if(self.last_rsi < Trade.RSI_OVERSOLD and not self.BOUGHT):
return True
else:
return False
def should_sell(self) -> bool:
if self.at_loss:
if self.last_rsi >= Trade.RSI_OVERBOUGHT:
self.at_loss = False
return False
if self.shouldStopLoss():
self.at_loss = True
return True
if(self.last_rsi > Trade.RSI_OVERBOUGHT and not self.SOLD):
return True
else:
return False
def shouldStopLoss(self) -> bool:
stop_loss_price = self.buy_price - (self.buy_price * self.bail_out_at)
if(self.close <= stop_loss_price):
print("At Loss -- BUY PRICE - {}".format(self.buy_price))
return True
else:
return False
def buy_or_sell(self) -> None:
if self.should_buy():
print(
"Placing buy order - price: {} - rsi: {}".format(self.close, self.last_rsi))
self.order(SIDE_BUY)
if self.should_sell():
print(
"Placing sell order - price: {} - rsi: {}".format(self.close, self.last_rsi))
self.order(SIDE_SELL)
def handle_socket_message(self, msg) -> None:
candle = msg['k']
self.close = float(candle['c'])
is_candle_closed = candle['x']
if is_candle_closed:
self.closes.append(self.close)
if len(self.closes) > 30:
# We done't want the arry to get too big for the RAM
self.closes.pop(0)
np_closes = numpy.array(self.closes)
rsi = talib.RSI(np_closes, Trade.RSI_PERIOD)
self.last_rsi = rsi[-1]
self.buy_or_sell()
print("PRICE - {} -- RSI - {} -- TIME - {}".format(self.close, self.last_rsi,
datetime.now().strftime('%H:%M:%S')))
# Start The Trade
twm = ThreadedWebsocketManager(
api_key=Config.API_KEY, api_secret=Config.API_SECRET)
client = Client(Config.API_KEY, Config.API_SECRET)
trade = Trade(twm, client)
if __name__ == '__main__':
trade.start()
| [
"numpy.array",
"talib.RSI",
"binance.client.Client",
"datetime.datetime.now",
"binance.ThreadedWebsocketManager"
] | [((5069, 5147), 'binance.ThreadedWebsocketManager', 'ThreadedWebsocketManager', ([], {'api_key': 'Config.API_KEY', 'api_secret': 'Config.API_SECRET'}), '(api_key=Config.API_KEY, api_secret=Config.API_SECRET)\n', (5093, 5147), False, 'from binance import ThreadedWebsocketManager\n'), ((5163, 5204), 'binance.client.Client', 'Client', (['Config.API_KEY', 'Config.API_SECRET'], {}), '(Config.API_KEY, Config.API_SECRET)\n', (5169, 5204), False, 'from binance.client import Client\n'), ((4728, 4752), 'numpy.array', 'numpy.array', (['self.closes'], {}), '(self.closes)\n', (4739, 4752), False, 'import numpy\n'), ((4775, 4813), 'talib.RSI', 'talib.RSI', (['np_closes', 'Trade.RSI_PERIOD'], {}), '(np_closes, Trade.RSI_PERIOD)\n', (4784, 4813), False, 'import talib\n'), ((5005, 5019), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5017, 5019), False, 'from datetime import datetime\n')] |
import numpy as np
import torch
class _NumpyTransducer(torch.autograd.Function):
@staticmethod
def forward(
ctx,
log_probs,
logit_lengths,
target_lengths,
targets,
blank=-1,
):
device = log_probs.device
log_probs = log_probs.cpu().data.numpy()
logit_lengths = logit_lengths.cpu().data.numpy()
target_lengths = target_lengths.cpu().data.numpy()
targets = targets.cpu().data.numpy()
gradients, costs, _, _ = __class__.compute(
log_probs=log_probs,
logit_lengths=logit_lengths,
target_lengths=target_lengths,
targets=targets,
blank=blank,
)
costs = torch.FloatTensor(costs).to(device=device)
gradients = torch.FloatTensor(gradients).to(device=device)
ctx.grads = torch.autograd.Variable(gradients)
return costs
@staticmethod
def backward(ctx, output_gradients):
return ctx.grads, None, None, None, None, None, None, None, None
@staticmethod
def compute_alpha_one_sequence(log_probs, targets, blank=-1):
max_T, max_U, D = log_probs.shape
alpha = np.zeros((max_T, max_U), dtype=np.float32)
for t in range(1, max_T):
alpha[t, 0] = alpha[t - 1, 0] + log_probs[t - 1, 0, blank]
for u in range(1, max_U):
alpha[0, u] = alpha[0, u - 1] + log_probs[0, u - 1, targets[u - 1]]
for t in range(1, max_T):
for u in range(1, max_U):
skip = alpha[t - 1, u] + log_probs[t - 1, u, blank]
emit = alpha[t, u - 1] + log_probs[t, u - 1, targets[u - 1]]
alpha[t, u] = np.logaddexp(skip, emit)
cost = -(alpha[-1, -1] + log_probs[-1, -1, blank])
return alpha, cost
@staticmethod
def compute_beta_one_sequence(log_probs, targets, blank=-1):
max_T, max_U, D = log_probs.shape
beta = np.zeros((max_T, max_U), dtype=np.float32)
beta[-1, -1] = log_probs[-1, -1, blank]
for t in reversed(range(max_T - 1)):
beta[t, -1] = beta[t + 1, -1] + log_probs[t, -1, blank]
for u in reversed(range(max_U - 1)):
beta[-1, u] = beta[-1, u + 1] + log_probs[-1, u, targets[u]]
for t in reversed(range(max_T - 1)):
for u in reversed(range(max_U - 1)):
skip = beta[t + 1, u] + log_probs[t, u, blank]
emit = beta[t, u + 1] + log_probs[t, u, targets[u]]
beta[t, u] = np.logaddexp(skip, emit)
cost = -beta[0, 0]
return beta, cost
@staticmethod
def compute_gradients_one_sequence(
log_probs, alpha, beta, targets, blank=-1
):
max_T, max_U, D = log_probs.shape
gradients = np.full(log_probs.shape, float("-inf"))
cost = -beta[0, 0]
gradients[-1, -1, blank] = alpha[-1, -1]
gradients[:-1, :, blank] = alpha[:-1, :] + beta[1:, :]
for u, l in enumerate(targets):
gradients[:, u, l] = alpha[:, u] + beta[:, u + 1]
gradients = -(np.exp(gradients + log_probs + cost))
return gradients
@staticmethod
def compute(
log_probs,
logit_lengths,
target_lengths,
targets,
blank=-1,
):
gradients = np.zeros_like(log_probs)
B_tgt, max_T, max_U, D = log_probs.shape
B_src = logit_lengths.shape[0]
H = int(B_tgt / B_src)
alphas = np.zeros((B_tgt, max_T, max_U))
betas = np.zeros((B_tgt, max_T, max_U))
betas.fill(float("-inf"))
alphas.fill(float("-inf"))
costs = np.zeros(B_tgt)
for b_tgt in range(B_tgt):
b_src = int(b_tgt / H)
T = int(logit_lengths[b_src])
# NOTE: see https://arxiv.org/pdf/1211.3711.pdf Section 2.1
U = int(target_lengths[b_tgt]) + 1
seq_log_probs = log_probs[b_tgt, :T, :U, :]
seq_targets = targets[b_tgt, : int(target_lengths[b_tgt])]
alpha, alpha_cost = __class__.compute_alpha_one_sequence(
log_probs=seq_log_probs, targets=seq_targets, blank=blank
)
beta, beta_cost = __class__.compute_beta_one_sequence(
log_probs=seq_log_probs, targets=seq_targets, blank=blank
)
seq_gradients = __class__.compute_gradients_one_sequence(
log_probs=seq_log_probs,
alpha=alpha,
beta=beta,
targets=seq_targets,
blank=blank,
)
np.testing.assert_almost_equal(alpha_cost, beta_cost, decimal=2)
gradients[b_tgt, :T, :U, :] = seq_gradients
costs[b_tgt] = beta_cost
alphas[b_tgt, :T, :U] = alpha
betas[b_tgt, :T, :U] = beta
return gradients, costs, alphas, betas
class NumpyTransducerLoss(torch.nn.Module):
def __init__(self, blank=-1):
super().__init__()
self.blank = blank
def forward(
self,
logits,
logit_lengths,
target_lengths,
targets,
):
log_probs = torch.nn.functional.log_softmax(logits, dim=-1)
return _NumpyTransducer.apply(
log_probs,
logit_lengths,
target_lengths,
targets,
self.blank,
)
| [
"numpy.zeros_like",
"torch.autograd.Variable",
"numpy.testing.assert_almost_equal",
"numpy.zeros",
"torch.FloatTensor",
"numpy.logaddexp",
"numpy.exp",
"torch.nn.functional.log_softmax"
] | [((864, 898), 'torch.autograd.Variable', 'torch.autograd.Variable', (['gradients'], {}), '(gradients)\n', (887, 898), False, 'import torch\n'), ((1197, 1239), 'numpy.zeros', 'np.zeros', (['(max_T, max_U)'], {'dtype': 'np.float32'}), '((max_T, max_U), dtype=np.float32)\n', (1205, 1239), True, 'import numpy as np\n'), ((1961, 2003), 'numpy.zeros', 'np.zeros', (['(max_T, max_U)'], {'dtype': 'np.float32'}), '((max_T, max_U), dtype=np.float32)\n', (1969, 2003), True, 'import numpy as np\n'), ((3331, 3355), 'numpy.zeros_like', 'np.zeros_like', (['log_probs'], {}), '(log_probs)\n', (3344, 3355), True, 'import numpy as np\n'), ((3494, 3525), 'numpy.zeros', 'np.zeros', (['(B_tgt, max_T, max_U)'], {}), '((B_tgt, max_T, max_U))\n', (3502, 3525), True, 'import numpy as np\n'), ((3542, 3573), 'numpy.zeros', 'np.zeros', (['(B_tgt, max_T, max_U)'], {}), '((B_tgt, max_T, max_U))\n', (3550, 3573), True, 'import numpy as np\n'), ((3659, 3674), 'numpy.zeros', 'np.zeros', (['B_tgt'], {}), '(B_tgt)\n', (3667, 3674), True, 'import numpy as np\n'), ((5169, 5216), 'torch.nn.functional.log_softmax', 'torch.nn.functional.log_softmax', (['logits'], {'dim': '(-1)'}), '(logits, dim=-1)\n', (5200, 5216), False, 'import torch\n'), ((3104, 3140), 'numpy.exp', 'np.exp', (['(gradients + log_probs + cost)'], {}), '(gradients + log_probs + cost)\n', (3110, 3140), True, 'import numpy as np\n'), ((4608, 4672), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['alpha_cost', 'beta_cost'], {'decimal': '(2)'}), '(alpha_cost, beta_cost, decimal=2)\n', (4638, 4672), True, 'import numpy as np\n'), ((734, 758), 'torch.FloatTensor', 'torch.FloatTensor', (['costs'], {}), '(costs)\n', (751, 758), False, 'import torch\n'), ((797, 825), 'torch.FloatTensor', 'torch.FloatTensor', (['gradients'], {}), '(gradients)\n', (814, 825), False, 'import torch\n'), ((1708, 1732), 'numpy.logaddexp', 'np.logaddexp', (['skip', 'emit'], {}), '(skip, emit)\n', (1720, 1732), True, 'import numpy as np\n'), ((2540, 2564), 'numpy.logaddexp', 'np.logaddexp', (['skip', 'emit'], {}), '(skip, emit)\n', (2552, 2564), True, 'import numpy as np\n')] |
from .grasp_sampling import GraspSampler
from .wholebody_planning import WholeBodyPlanner
from mp.const import VIRTUAL_CUBOID_HALF_SIZE
from mp.utils import keep_state
import mp.align_rotation as rot_util
from scipy.spatial.transform import Rotation as R
import copy
import numpy as np
def get_heuristic_grasp(env, pos, quat):
sampler = GraspSampler(env, pos, quat)
try:
return sampler.get_heuristic_grasps()[0]
except StopIteration:
return sampler()
def get_all_heuristic_grasps(env, pos, quat, avoid_edge_faces=True):
return GraspSampler(
env, pos, quat, avoid_edge_faces=avoid_edge_faces
).get_heuristic_grasps()
def sample_grasp(env, pos, quat):
return GraspSampler(env, pos, quat)()
def sample_partial_grasp(env, pos, quat):
return GraspSampler(env, pos, quat, allow_partial_sol=True)()
def get_planned_grasp(env, pos, quat, goal_pos, goal_quat, tight=False,
**kwargs):
planner = WholeBodyPlanner(env)
path = planner.plan(pos, quat, goal_pos, goal_quat, **kwargs)
grasp = copy.deepcopy(path.grasp)
if tight:
path = path.tighten(env, path, coef=0.5)
# save planned trajectory
env.unwrapped.register_custom_log('wholebody_path', {'cube': path.cube, 'tip_path': path.tip_path})
env.unwrapped.save_custom_logs()
return grasp, path
def get_pitching_grasp(env, pos, quat, goal_quat):
axis, angle = rot_util.get_roll_pitch_axis_and_angle(quat, goal_quat)
vert, _ = rot_util.get_most_vertical_axis(quat)
turning_axis = np.cross(vert, axis)
cube_tip_positions = np.asarray([
VIRTUAL_CUBOID_HALF_SIZE * axis,
VIRTUAL_CUBOID_HALF_SIZE * turning_axis * np.sign(angle),
VIRTUAL_CUBOID_HALF_SIZE * -axis,
])
grasp_sampler = GraspSampler(env, pos, quat)
with keep_state(env):
try:
return grasp_sampler.get_feasible_grasps_from_tips(
grasp_sampler.T_cube_to_base(cube_tip_positions)
).__next__(), axis, angle
except StopIteration:
grasp_sampler = GraspSampler(env, pos, quat, ignore_collision=True)
return grasp_sampler.get_feasible_grasps_from_tips(
grasp_sampler.T_cube_to_base(cube_tip_positions)
).__next__(), axis, angle
def get_yawing_grasp(env, pos, quat, goal_quat, step_angle=np.pi / 2):
from mp.align_rotation import get_yaw_diff
from mp.const import COLLISION_TOLERANCE
print("[get_yawing_grasp] step_angle:", step_angle * 180 / np.pi)
angle = get_yaw_diff(quat, goal_quat)
print('[get_yawing_grasp] get_yaw_diff:', angle * 180 / np.pi)
angle_clip = np.clip(angle, -step_angle, step_angle)
print('[get_yawing_grasp] clipped angle:', angle_clip * 180 / np.pi)
goal_quat = (R.from_euler('Z', angle_clip) * R.from_quat(quat)).as_quat()
planner = WholeBodyPlanner(env)
try:
path = planner.plan(pos, quat, pos, goal_quat, use_ori=True, avoid_edge_faces=True, yawing_grasp=True,
collision_tolerance=-COLLISION_TOLERANCE * 3, retry_grasp=0, direct_path=True)
except RuntimeError as e:
print(f'[get_yawing_grasp] wholebody planning failed for step_angle: {step_angle}')
return None, None
grasp = copy.deepcopy(path.grasp)
# save planned trajectory
env.unwrapped.register_custom_log('wholebody_path', {'cube': path.cube, 'tip_path': path.tip_path})
env.unwrapped.save_custom_logs()
return grasp, path
| [
"copy.deepcopy",
"mp.align_rotation.get_roll_pitch_axis_and_angle",
"mp.align_rotation.get_yaw_diff",
"scipy.spatial.transform.Rotation.from_euler",
"numpy.cross",
"numpy.clip",
"scipy.spatial.transform.Rotation.from_quat",
"numpy.sign",
"mp.utils.keep_state",
"mp.align_rotation.get_most_vertical_... | [((1073, 1098), 'copy.deepcopy', 'copy.deepcopy', (['path.grasp'], {}), '(path.grasp)\n', (1086, 1098), False, 'import copy\n'), ((1428, 1483), 'mp.align_rotation.get_roll_pitch_axis_and_angle', 'rot_util.get_roll_pitch_axis_and_angle', (['quat', 'goal_quat'], {}), '(quat, goal_quat)\n', (1466, 1483), True, 'import mp.align_rotation as rot_util\n'), ((1498, 1535), 'mp.align_rotation.get_most_vertical_axis', 'rot_util.get_most_vertical_axis', (['quat'], {}), '(quat)\n', (1529, 1535), True, 'import mp.align_rotation as rot_util\n'), ((1555, 1575), 'numpy.cross', 'np.cross', (['vert', 'axis'], {}), '(vert, axis)\n', (1563, 1575), True, 'import numpy as np\n'), ((2552, 2581), 'mp.align_rotation.get_yaw_diff', 'get_yaw_diff', (['quat', 'goal_quat'], {}), '(quat, goal_quat)\n', (2564, 2581), False, 'from mp.align_rotation import get_yaw_diff\n'), ((2666, 2705), 'numpy.clip', 'np.clip', (['angle', '(-step_angle)', 'step_angle'], {}), '(angle, -step_angle, step_angle)\n', (2673, 2705), True, 'import numpy as np\n'), ((3281, 3306), 'copy.deepcopy', 'copy.deepcopy', (['path.grasp'], {}), '(path.grasp)\n', (3294, 3306), False, 'import copy\n'), ((1830, 1845), 'mp.utils.keep_state', 'keep_state', (['env'], {}), '(env)\n', (1840, 1845), False, 'from mp.utils import keep_state\n'), ((1706, 1720), 'numpy.sign', 'np.sign', (['angle'], {}), '(angle)\n', (1713, 1720), True, 'import numpy as np\n'), ((2796, 2825), 'scipy.spatial.transform.Rotation.from_euler', 'R.from_euler', (['"""Z"""', 'angle_clip'], {}), "('Z', angle_clip)\n", (2808, 2825), True, 'from scipy.spatial.transform import Rotation as R\n'), ((2828, 2845), 'scipy.spatial.transform.Rotation.from_quat', 'R.from_quat', (['quat'], {}), '(quat)\n', (2839, 2845), True, 'from scipy.spatial.transform import Rotation as R\n')] |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
"""
Version 2.0
"""
import numpy as np
class CActivator(object):
# z = 本层的wx+b计算值矩阵
def forward(self, z):
pass
# z = 本层的wx+b计算值矩阵
# a = 本层的激活函数输出值矩阵
# delta = 上(后)层反传回来的梯度值矩阵
def backward(self, z, a, delta):
pass
# 直传函数,相当于无激活
class Identity(CActivator):
def forward(self, z):
return z
def backward(self, z, a, delta):
return delta, a
class Sigmoid(CActivator):
def forward(self, z):
a = 1.0 / (1.0 + np.exp(-z))
return a
def backward(self, z, a, delta):
da = np.multiply(a, 1-a)
dz = np.multiply(delta, da)
return dz, da
class Tanh(CActivator):
def forward(self, z):
a = 2.0 / (1.0 + np.exp(-2*z)) - 1.0
return a
def backward(self, z, a, delta):
da = 1 - np.multiply(a, a)
dz = np.multiply(delta, da)
return dz, da
class Relu(CActivator):
def forward(self, z):
a = np.maximum(z, 0)
return a
# 注意relu函数判断是否大于1的根据是正向的wx+b=z的值,而不是a值
def backward(self, z, a, delta):
da = np.zeros(z.shape)
da[z>0] = 1
dz = da * delta
return dz, da
| [
"numpy.zeros",
"numpy.exp",
"numpy.multiply",
"numpy.maximum"
] | [((715, 736), 'numpy.multiply', 'np.multiply', (['a', '(1 - a)'], {}), '(a, 1 - a)\n', (726, 736), True, 'import numpy as np\n'), ((748, 770), 'numpy.multiply', 'np.multiply', (['delta', 'da'], {}), '(delta, da)\n', (759, 770), True, 'import numpy as np\n'), ((992, 1014), 'numpy.multiply', 'np.multiply', (['delta', 'da'], {}), '(delta, da)\n', (1003, 1014), True, 'import numpy as np\n'), ((1101, 1117), 'numpy.maximum', 'np.maximum', (['z', '(0)'], {}), '(z, 0)\n', (1111, 1117), True, 'import numpy as np\n'), ((1229, 1246), 'numpy.zeros', 'np.zeros', (['z.shape'], {}), '(z.shape)\n', (1237, 1246), True, 'import numpy as np\n'), ((961, 978), 'numpy.multiply', 'np.multiply', (['a', 'a'], {}), '(a, a)\n', (972, 978), True, 'import numpy as np\n'), ((635, 645), 'numpy.exp', 'np.exp', (['(-z)'], {}), '(-z)\n', (641, 645), True, 'import numpy as np\n'), ((869, 883), 'numpy.exp', 'np.exp', (['(-2 * z)'], {}), '(-2 * z)\n', (875, 883), True, 'import numpy as np\n')] |
import time
import numpy as np
import math
from scipy import special
from scipy import optimize
EPS = 10e-15
class TruncatedMVN:
"""
Create a normal distribution :math:`X \sim N ({\mu}, {\Sigma})` subject to linear inequality constraints
:math:`lb < X < ub` and sample from it using minimax tilting. Based on the MATLAB implemention by the authors
(reference below).
:param np.ndarray mu: (size D) mean of the normal distribution :math:`\mathbf {\mu}`.
:param np.ndarray cov: (size D x D) covariance of the normal distribution :math:`\mathbf {\Sigma}`.
:param np.ndarray lb: (size D) lower bound constrain of the multivariate normal distribution :math:`\mathbf lb`.
:param np.ndarray ub: (size D) upper bound constrain of the multivariate normal distribution :math:`\mathbf ub`.
Note that the algorithm may not work if 'cov' is close to being rank deficient.
Reference:
<NAME>., (2016), The normal law under linear restrictions: simulation and estimation via minimax tilting,
Journal of the Royal Statistical Society Series B, 79, issue 1, p. 125-148,
Example:
>>> d = 10 # dimensions
>>>
>>> # random mu and cov
>>> mu = np.random.rand(d)
>>> cov = 0.5 - np.random.rand(d ** 2).reshape((d, d))
>>> cov = np.triu(cov)
>>> cov += cov.T - np.diag(cov.diagonal())
>>> cov = np.dot(cov, cov)
>>>
>>> # constraints
>>> lb = np.zeros_like(mu) - 2
>>> ub = np.ones_like(mu) * np.inf
>>>
>>> # create truncated normal and sample from it
>>> n_samples = 100000
>>> samples = TruncatedMVN(mu, cov, lb, ub).sample(n_samples)
Reimplementation by <NAME> --> https://github.com/brunzema/truncated-mvn-sampler
"""
def __init__(self, mu, cov, lb, ub):
self.dim = len(mu)
if not cov.shape[0] == cov.shape[1]:
raise RuntimeError("Covariance matrix must be of shape DxD!")
if not (self.dim == cov.shape[0] and self.dim == len(lb) and self.dim == len(ub)):
raise RuntimeError("Dimensions D of mean (mu), covariance matric (cov), lower bound (lb) "
"and upper bound (ub) must be the same!")
self.cov = cov
self.orig_mu = mu
self.orig_lb = lb
self.orig_ub = ub
self.moved_lb = lb - mu
self.moved_ub = ub - mu
# permutated
self.lb = lb - mu # move distr./bounds to have zero mean
self.ub = ub - mu # move distr./bounds to have zero mean
if np.any(self.ub <= self.lb):
raise RuntimeError("Upper bound (ub) must be strictly greater than lower bound (lb) for all D dimensions!")
# scaled Cholesky with zero diagonal, permutated
self.L = np.empty_like(cov)
self.unscaled_L = np.empty_like(cov)
self.scaled_L = np.empty_like(cov)
# placeholder for optimization
self.perm = None
self.x = None
self.mu = None
self.psistar = None
# options for Gibbs sampling
self.switch_to_gibbs_sampling = None
self.start_gibbs_at_iteration = None
# for numerics
self.eps = EPS
def sample(self, n, max_iterations=10 ** 5, switch_to_gibbs_sampling=True, iteration_gibbs_sampling=1000):
"""
Create n samples from the truncated normal distribution.
:param int n: Number of samples to create.
:param int max_iterations: max number of simulation iterations.
:param boolean switch_to_gibbs_sampling: switch to Gibbs sampling, if iteration > iteration_gibbs_sampling.
:param int iteration_gibbs_sampling: iteration at which Gibbs sampling should start.
:return: D x n array with the samples.
:rtype: np.ndarray
"""
if not isinstance(n, int):
raise RuntimeError("Number of samples must be an integer!")
# factors (Cholesky, etc.) only need to be computed once!
if self.psistar is None:
self.compute_factors()
# start acceptance rejection sampling
rv = np.array([], dtype=np.float32).reshape(self.dim, 0)
accept, iteration = 0, 0
t0 = time.time()
while accept < n:
logpr, Z = self.mvnrnd(n, self.mu) # simulate n proposals
idx = -np.log(np.random.rand(n)) > (self.psistar - logpr) # acceptance tests
rv = np.concatenate((rv, Z[:, idx]), axis=1) # accumulate accepted
accept = rv.shape[1] # keep track of # of accepted
iteration += 1
if iteration % 200 == 0:
print(f'Iteration: {iteration}, Accepted samples: {accept}. Time taken: {time.time() - t0:10.3f}s.')
t0 = time.time()
if iteration == 10 ** 3:
print('Warning: Acceptance prob. smaller than 0.001.')
if switch_to_gibbs_sampling and iteration > iteration_gibbs_sampling:
if accept < 2: # create initialization, if no samples where accepted so far
accept = 0
rv = np.random.multivariate_normal(np.zeros_like(self.lb), self.cov).reshape(-1, 1)
rv = np.where(rv < self.moved_lb, self.moved_lb + EPS * np.random.rand(), rv)
rv = np.where(rv > self.moved_ub, self.moved_ub - EPS * np.random.rand(), rv)
else:
# rescale minimax samples - Gibbs sampling works on the unscaled covariance matrix
rv = rv[:, :-1]
rv = self.unscaled_L @ rv
n_remaining = n - accept
print(f'Switching to Gibbs sampling. Remaining: {n_remaining}. SAMPLES ARE NO LONGER IID!')
t0 = time.time()
# print('Compile Gibbs sampler...')
# test_L, test_rv = np.array([[1, 0], [0.2, 1]]), np.array([[0.1], [0.1]])
# _ = gibbs_sampling(test_L.astype('float32'), test_rv.astype('float32'), 2)
# print('Finished.')
# Z = gibbs_sampling(self.scaled_L.astype('float32'), rv.astype('float32'), n_remaining)
Z_gibbs, overall_time, calc_time, sample_time = self.gibbs_sampling(rv.astype('float32'), n_remaining)
print(f'Time taken for Gibbs sampling: {time.time() - t0:10.3f}s.'
f'Overall time: {overall_time:10.3f}s. Calculation time: {calc_time:10.3f}s.'
f'Sampling time: {sample_time:10.3f}s.')
# combine, reorder, and move to original mean
rv = np.concatenate((rv, Z_gibbs), axis=1)
rv = rv[:, :n]
order = self.perm.argsort(axis=0)
rv = rv[order, :]
rv += np.tile(self.orig_mu.reshape(self.dim, 1), (1, rv.shape[-1]))
return rv
elif iteration > max_iterations:
accept = n
rv = np.concatenate((rv, Z), axis=1)
print('Warning: Sample is only approximately distributed.')
# finish sampling and postprocess the samples!
order = self.perm.argsort(axis=0)
rv = rv[:, :n]
rv = self.unscaled_L @ rv
rv = rv[order, :]
# retransfer to original mean
rv += np.tile(self.orig_mu.reshape(self.dim, 1), (1, rv.shape[-1])) # Z = X + mu
return rv
def compute_factors(self):
# compute permutated Cholesky factor and solve optimization
# Cholesky decomposition of matrix with permuation
self.unscaled_L, self.perm = self.colperm()
D = np.diag(self.unscaled_L)
if np.any(D < self.eps):
print('Warning: Method might fail as covariance matrix is singular!')
# rescale
self.scaled_L = self.unscaled_L / np.tile(D.reshape(self.dim, 1), (1, self.dim))
self.lb = self.lb / D
self.ub = self.ub / D
# remove diagonal
self.L = self.scaled_L - np.eye(self.dim)
# get gradient/Jacobian function
gradpsi = self.get_gradient_function()
x0 = np.zeros(2 * (self.dim - 1))
# find optimal tilting parameter non-linear equation solver
sol = optimize.root(gradpsi, x0, args=(self.L, self.lb, self.ub), method='hybr', jac=True)
if not sol.success:
print('Warning: Method may fail as covariance matrix is close to singular!')
self.x = sol.x[:self.dim - 1]
self.mu = sol.x[self.dim - 1:]
# compute psi star
self.psistar = self.psy(self.x, self.mu)
def reset(self):
# reset factors -> when sampling, optimization for optimal tilting parameters is performed again
# permutated
self.lb = self.orig_lb - self.orig_mu # move distr./bounds to have zero mean
self.ub = self.orig_ub - self.orig_mu
# scaled Cholesky with zero diagonal, permutated
self.L = np.empty_like(self.cov)
self.unscaled_L = np.empty_like(self.cov)
# placeholder for optimization
self.perm = None
self.x = None
self.mu = None
self.psistar = None
def gibbs_sampling(self, samples, n_remaining):
cov_s = self.cov
t0_overall = time.time()
Z = samples
# variance for ith element
t0 = time.time()
var = np.zeros_like(self.lb)
for i in range(self.dim):
mask = np.ones(self.dim, dtype=bool)
mask[i] = 0
var[i] = cov_s[i, i] - cov_s[mask, i].T @ np.linalg.solve(cov_s[mask][:, mask], cov_s[mask, i])
calc_time = time.time() - t0
sample_time = 0
t0_interval = time.time()
for j in range(n_remaining):
last_Z = Z[:, -1].copy()
new_Z = np.empty_like(last_Z)
for i in range(self.dim):
# create mask
mask = np.ones(self.dim, dtype=bool)
mask[i] = 0
# mean for ith element
t0 = time.time()
mu_i = cov_s[mask, i] @ np.linalg.solve(cov_s[mask][:, mask], last_Z[mask])
calc_time += time.time() - t0
# stdv
s = np.sqrt(var[i])
# scaled bounds
lb = np.array([(self.moved_lb[i] - mu_i) / s])
ub = np.array([(self.moved_ub[i] - mu_i) / s])
t0 = time.time()
X = TruncatedMVN.trandn(lb, ub)
new_Z[i] = mu_i + X * s
# print(new_Z[i] > -1)
# update dimension of last Z^j = Z^{j+1}_1 ... Z^{j+1}_{i-1}, Z^{j}_{i+1}, Z^{j}_{m}
last_Z[i] = new_Z[i]
sample_time += time.time() - t0
# print('Package:', time.time() - t0)
Z = np.concatenate((Z, new_Z.reshape(-1, 1)), axis=1)
if j % 300 == 0 and j > 0:
print(f'Created {j} samples. Time for Gibbs sampling so far: {time.time() - t0_interval:10.3f}s.'
f' Continue...')
t0_interval = time.time()
overall_time = time.time() - t0_overall
return Z, overall_time, calc_time, sample_time
def mvnrnd(self, n, mu):
# generates the proposals from the exponentially tilted sequential importance sampling pdf
# output: logpr, log-likelihood of sample
# Z, random sample
mu = np.append(mu, [0.])
Z = np.zeros((self.dim, n))
logpr = 0
for k in range(self.dim):
# compute matrix multiplication L @ Z
col = self.L[k, :k] @ Z[:k, :]
# compute limits of truncation
tl = self.lb[k] - mu[k] - col
tu = self.ub[k] - mu[k] - col
# simulate N(mu,1) conditional on [tl,tu]
Z[k, :] = mu[k] + TruncatedMVN.trandn(tl, tu)
# update likelihood ratio
logpr += lnNormalProb(tl, tu) + .5 * mu[k] ** 2 - mu[k] * Z[k, :]
return logpr, Z
@staticmethod
def trandn(lb, ub):
"""
Sample generator for the truncated standard multivariate normal distribution :math:`X \sim N(0,I)` s.t.
:math:`lb<X<ub`.
If you wish to simulate a random variable 'Z' from the non-standard Gaussian :math:`N(m,s^2)`
conditional on :math:`lb<Z<ub`, then first simulate x=TruncatedMVN.trandn((l-m)/s,(u-m)/s) and set
Z=m+s*x.
Infinite values for 'ub' and 'lb' are accepted.
:param np.ndarray lb: (size D) lower bound constrain of the normal distribution :math:`\mathbf lb`.
:param np.ndarray ub: (size D) upper bound constrain of the normal distribution :math:`\mathbf lb`.
:return: D samples if the truncated normal distribition x ~ N(0, I) subject to lb < x < ub.
:rtype: np.ndarray
"""
if not len(lb) == len(ub):
raise RuntimeError("Lower bound (lb) and upper bound (ub) must be of the same length!")
x = np.empty_like(lb)
a = 0.66 # threshold used in MATLAB implementation, other threshold might speed up python3 implementation
# three cases to consider
# case 1: a<lb<ub
I = lb > a
if np.any(I):
tl = lb[I]
tu = ub[I]
x[I] = TruncatedMVN.ntail(tl, tu)
# case 2: lb<ub<-a
J = ub < -a
if np.any(J):
tl = -ub[J]
tu = -lb[J]
x[J] = - TruncatedMVN.ntail(tl, tu)
# case 3: otherwise use inverse transform or accept-reject
I = ~(I | J)
if np.any(I):
tl = lb[I]
tu = ub[I]
x[I] = TruncatedMVN.tn(tl, tu)
return x
@staticmethod
def tn(lb, ub, tol=2):
# samples a column vector of length=len(lb)=len(ub) from the standard multivariate normal distribution
# truncated over the region [lb,ub], where -a<lb<ub<a for some 'a' and lb and ub are column vectors
# uses acceptance rejection and inverse-transform method
sw = tol # controls switch between methods, threshold can be tuned for maximum speed for each platform
x = np.empty_like(lb)
# case 1: abs(ub-lb)>tol, uses accept-reject from randn
I = abs(ub - lb) > sw
if np.any(I):
tl = lb[I]
tu = ub[I]
x[I] = TruncatedMVN.trnd(tl, tu)
# case 2: abs(u-l)<tol, uses inverse-transform
I = ~I
if np.any(I):
tl = lb[I]
tu = ub[I]
pl = special.erfc(tl / np.sqrt(2)) / 2
pu = special.erfc(tu / np.sqrt(2)) / 2
x[I] = np.sqrt(2) * special.erfcinv(2 * (pl - (pl - pu) * np.random.rand(len(tl))))
return x
@staticmethod
def trnd(lb, ub):
# uses acceptance rejection to simulate from truncated normal
x = np.random.randn(len(lb)) # sample normal
test = (x < lb) | (x > ub)
I = np.where(test)[0]
d = len(I)
while d > 0: # while there are rejections
ly = lb[I]
uy = ub[I]
y = np.random.randn(len(uy)) # resample
idx = (y > ly) & (y < uy) # accepted
x[I[idx]] = y[idx]
I = I[~idx]
d = len(I)
return x
@staticmethod
def ntail(lb, ub):
# samples a column vector of length=len(lb)=len(ub) from the standard multivariate normal distribution
# truncated over the region [lb,ub], where lb>0 and lb and ub are column vectors
# uses acceptance-rejection from Rayleigh distr. similar to Marsaglia (1964)
if not len(lb) == len(ub):
raise RuntimeError("Lower bound (lb) and upper bound (ub) must be of the same length!")
c = (lb ** 2) / 2
n = len(lb)
f = np.expm1(c - ub ** 2 / 2)
x = c - np.log(1 + np.random.rand(n) * f) # sample using Rayleigh
# keep list of rejected
I = np.where(np.random.rand(n) ** 2 * x > c)[0]
d = len(I)
while d > 0: # while there are rejections
cy = c[I]
y = cy - np.log(1 + np.random.rand(d) * f[I])
idx = (np.random.rand(d) ** 2 * y) < cy # accepted
x[I[idx]] = y[idx] # store the accepted
I = I[~idx] # remove accepted from the list
d = len(I)
return np.sqrt(2 * x) # this Rayleigh transform can be delayed till the end
def psy(self, x, mu):
# implements psi(x,mu); assumes scaled 'L' without diagonal
x = np.append(x, [0.])
mu = np.append(mu, [0.])
c = self.L @ x
lt = self.lb - mu - c
ut = self.ub - mu - c
p = np.sum(lnNormalProb(lt, ut) + 0.5 * mu ** 2 - x * mu)
return p
def get_gradient_function(self):
# wrapper to avoid dependancy on 'self'
def gradpsi(y, L, l, u):
# implements gradient of psi(x) to find optimal exponential twisting, returns also the Jacobian
# NOTE: assumes scaled 'L' with zero diagonal
d = len(u)
c = np.zeros(d)
mu, x = c.copy(), c.copy()
x[0:d - 1] = y[0:d - 1]
mu[0:d - 1] = y[d - 1:]
# compute now ~l and ~u
c[1:d] = L[1:d, :] @ x
lt = l - mu - c
ut = u - mu - c
# compute gradients avoiding catastrophic cancellation
w = lnNormalProb(lt, ut)
pl = np.exp(-0.5 * lt ** 2 - w) / np.sqrt(2 * math.pi)
pu = np.exp(-0.5 * ut ** 2 - w) / np.sqrt(2 * math.pi)
P = pl - pu
# output the gradient
dfdx = - mu[0:d - 1] + (P.T @ L[:, 0:d - 1]).T
dfdm = mu - x + P
grad = np.concatenate((dfdx, dfdm[:-1]), axis=0)
# construct jacobian
lt[np.isinf(lt)] = 0
ut[np.isinf(ut)] = 0
dP = - P ** 2 + lt * pl - ut * pu
DL = np.tile(dP.reshape(d, 1), (1, d)) * L
mx = DL - np.eye(d)
xx = L.T @ DL
mx = mx[:-1, :-1]
xx = xx[:-1, :-1]
J = np.block([[xx, mx.T],
[mx, np.diag(1 + dP[:-1])]])
return (grad, J)
return gradpsi
def colperm(self):
perm = np.arange(self.dim)
L = np.zeros_like(self.cov)
z = np.zeros_like(self.orig_mu)
for j in perm.copy():
pr = np.ones_like(z) * np.inf # compute marginal prob.
I = np.arange(j, self.dim) # search remaining dimensions
D = np.diag(self.cov)
s = D[I] - np.sum(L[I, 0:j] ** 2, axis=1)
s[s < 0] = self.eps
s = np.sqrt(s)
tl = (self.lb[I] - L[I, 0:j] @ z[0:j]) / s
tu = (self.ub[I] - L[I, 0:j] @ z[0:j]) / s
pr[I] = lnNormalProb(tl, tu)
# find smallest marginal dimension
k = np.argmin(pr)
# flip dimensions k-->j
jk = [j, k]
kj = [k, j]
self.cov[jk, :] = self.cov[kj, :] # update rows of cov
self.cov[:, jk] = self.cov[:, kj] # update cols of cov
L[jk, :] = L[kj, :] # update only rows of L
self.lb[jk] = self.lb[kj] # update integration limits
self.ub[jk] = self.ub[kj] # update integration limits
perm[jk] = perm[kj] # keep track of permutation
# construct L sequentially via Cholesky computation
s = self.cov[j, j] - np.sum(L[j, 0:j] ** 2, axis=0)
if s < -0.1:
raise RuntimeError("Sigma is not positive semi-definite")
elif s < 0:
s = self.eps
L[j, j] = np.sqrt(s)
new_L = self.cov[j + 1:self.dim, j] - L[j + 1:self.dim, 0:j] @ L[j, 0:j].T
L[j + 1:self.dim, j] = new_L / L[j, j]
# find mean value, z(j), of truncated normal
tl = (self.lb[j] - L[j, 0:j - 1] @ z[0:j - 1]) / L[j, j]
tu = (self.ub[j] - L[j, 0:j - 1] @ z[0:j - 1]) / L[j, j]
w = lnNormalProb(tl, tu) # aids in computing expected value of trunc. normal
z[j] = (np.exp(-.5 * tl ** 2 - w) - np.exp(-.5 * tu ** 2 - w)) / np.sqrt(2 * math.pi)
return L, perm
def lnNormalProb(a, b):
# computes ln(P(a<Z<b)) where Z~N(0,1) very accurately for any 'a', 'b'
p = np.zeros_like(a)
# case b>a>0
I = a > 0
if np.any(I):
pa = lnPhi(a[I])
pb = lnPhi(b[I])
p[I] = pa + np.log1p(-np.exp(pb - pa))
# case a<b<0
idx = b < 0
if np.any(idx):
pa = lnPhi(-a[idx]) # log of lower tail
pb = lnPhi(-b[idx])
p[idx] = pb + np.log1p(-np.exp(pa - pb))
# case a < 0 < b
I = (~I) & (~idx)
if np.any(I):
pa = special.erfc(-a[I] / np.sqrt(2)) / 2 # lower tail
pb = special.erfc(b[I] / np.sqrt(2)) / 2 # upper tail
p[I] = np.log1p(-pa - pb)
return p
def lnPhi(x):
# computes logarithm of tail of Z~N(0,1) mitigating numerical roundoff errors
out = -0.5 * x ** 2 - np.log(2) + np.log(special.erfcx(x / np.sqrt(2)) + EPS) # divide by zeros error -> add eps
return out | [
"numpy.sum",
"numpy.ones",
"numpy.argmin",
"numpy.arange",
"numpy.exp",
"numpy.diag",
"numpy.linalg.solve",
"numpy.zeros_like",
"numpy.empty_like",
"numpy.append",
"numpy.expm1",
"numpy.log1p",
"numpy.ones_like",
"numpy.isinf",
"scipy.optimize.root",
"numpy.concatenate",
"numpy.log",... | [((20362, 20378), 'numpy.zeros_like', 'np.zeros_like', (['a'], {}), '(a)\n', (20375, 20378), True, 'import numpy as np\n'), ((20417, 20426), 'numpy.any', 'np.any', (['I'], {}), '(I)\n', (20423, 20426), True, 'import numpy as np\n'), ((20565, 20576), 'numpy.any', 'np.any', (['idx'], {}), '(idx)\n', (20571, 20576), True, 'import numpy as np\n'), ((20754, 20763), 'numpy.any', 'np.any', (['I'], {}), '(I)\n', (20760, 20763), True, 'import numpy as np\n'), ((2585, 2611), 'numpy.any', 'np.any', (['(self.ub <= self.lb)'], {}), '(self.ub <= self.lb)\n', (2591, 2611), True, 'import numpy as np\n'), ((2808, 2826), 'numpy.empty_like', 'np.empty_like', (['cov'], {}), '(cov)\n', (2821, 2826), True, 'import numpy as np\n'), ((2853, 2871), 'numpy.empty_like', 'np.empty_like', (['cov'], {}), '(cov)\n', (2866, 2871), True, 'import numpy as np\n'), ((2896, 2914), 'numpy.empty_like', 'np.empty_like', (['cov'], {}), '(cov)\n', (2909, 2914), True, 'import numpy as np\n'), ((4236, 4247), 'time.time', 'time.time', ([], {}), '()\n', (4245, 4247), False, 'import time\n'), ((7642, 7666), 'numpy.diag', 'np.diag', (['self.unscaled_L'], {}), '(self.unscaled_L)\n', (7649, 7666), True, 'import numpy as np\n'), ((7678, 7698), 'numpy.any', 'np.any', (['(D < self.eps)'], {}), '(D < self.eps)\n', (7684, 7698), True, 'import numpy as np\n'), ((8129, 8157), 'numpy.zeros', 'np.zeros', (['(2 * (self.dim - 1))'], {}), '(2 * (self.dim - 1))\n', (8137, 8157), True, 'import numpy as np\n'), ((8241, 8329), 'scipy.optimize.root', 'optimize.root', (['gradpsi', 'x0'], {'args': '(self.L, self.lb, self.ub)', 'method': '"""hybr"""', 'jac': '(True)'}), "(gradpsi, x0, args=(self.L, self.lb, self.ub), method='hybr',\n jac=True)\n", (8254, 8329), False, 'from scipy import optimize\n'), ((8953, 8976), 'numpy.empty_like', 'np.empty_like', (['self.cov'], {}), '(self.cov)\n', (8966, 8976), True, 'import numpy as np\n'), ((9003, 9026), 'numpy.empty_like', 'np.empty_like', (['self.cov'], {}), '(self.cov)\n', (9016, 9026), True, 'import numpy as np\n'), ((9265, 9276), 'time.time', 'time.time', ([], {}), '()\n', (9274, 9276), False, 'import time\n'), ((9346, 9357), 'time.time', 'time.time', ([], {}), '()\n', (9355, 9357), False, 'import time\n'), ((9372, 9394), 'numpy.zeros_like', 'np.zeros_like', (['self.lb'], {}), '(self.lb)\n', (9385, 9394), True, 'import numpy as np\n'), ((9694, 9705), 'time.time', 'time.time', ([], {}), '()\n', (9703, 9705), False, 'import time\n'), ((11440, 11460), 'numpy.append', 'np.append', (['mu', '[0.0]'], {}), '(mu, [0.0])\n', (11449, 11460), True, 'import numpy as np\n'), ((11472, 11495), 'numpy.zeros', 'np.zeros', (['(self.dim, n)'], {}), '((self.dim, n))\n', (11480, 11495), True, 'import numpy as np\n'), ((13000, 13017), 'numpy.empty_like', 'np.empty_like', (['lb'], {}), '(lb)\n', (13013, 13017), True, 'import numpy as np\n'), ((13223, 13232), 'numpy.any', 'np.any', (['I'], {}), '(I)\n', (13229, 13232), True, 'import numpy as np\n'), ((13384, 13393), 'numpy.any', 'np.any', (['J'], {}), '(J)\n', (13390, 13393), True, 'import numpy as np\n'), ((13590, 13599), 'numpy.any', 'np.any', (['I'], {}), '(I)\n', (13596, 13599), True, 'import numpy as np\n'), ((14162, 14179), 'numpy.empty_like', 'np.empty_like', (['lb'], {}), '(lb)\n', (14175, 14179), True, 'import numpy as np\n'), ((14285, 14294), 'numpy.any', 'np.any', (['I'], {}), '(I)\n', (14291, 14294), True, 'import numpy as np\n'), ((14469, 14478), 'numpy.any', 'np.any', (['I'], {}), '(I)\n', (14475, 14478), True, 'import numpy as np\n'), ((15805, 15830), 'numpy.expm1', 'np.expm1', (['(c - ub ** 2 / 2)'], {}), '(c - ub ** 2 / 2)\n', (15813, 15830), True, 'import numpy as np\n'), ((16356, 16370), 'numpy.sqrt', 'np.sqrt', (['(2 * x)'], {}), '(2 * x)\n', (16363, 16370), True, 'import numpy as np\n'), ((16533, 16552), 'numpy.append', 'np.append', (['x', '[0.0]'], {}), '(x, [0.0])\n', (16542, 16552), True, 'import numpy as np\n'), ((16565, 16585), 'numpy.append', 'np.append', (['mu', '[0.0]'], {}), '(mu, [0.0])\n', (16574, 16585), True, 'import numpy as np\n'), ((18280, 18299), 'numpy.arange', 'np.arange', (['self.dim'], {}), '(self.dim)\n', (18289, 18299), True, 'import numpy as np\n'), ((18312, 18335), 'numpy.zeros_like', 'np.zeros_like', (['self.cov'], {}), '(self.cov)\n', (18325, 18335), True, 'import numpy as np\n'), ((18348, 18375), 'numpy.zeros_like', 'np.zeros_like', (['self.orig_mu'], {}), '(self.orig_mu)\n', (18361, 18375), True, 'import numpy as np\n'), ((20907, 20925), 'numpy.log1p', 'np.log1p', (['(-pa - pb)'], {}), '(-pa - pb)\n', (20915, 20925), True, 'import numpy as np\n'), ((4452, 4491), 'numpy.concatenate', 'np.concatenate', (['(rv, Z[:, idx])'], {'axis': '(1)'}), '((rv, Z[:, idx]), axis=1)\n', (4466, 4491), True, 'import numpy as np\n'), ((8010, 8026), 'numpy.eye', 'np.eye', (['self.dim'], {}), '(self.dim)\n', (8016, 8026), True, 'import numpy as np\n'), ((9448, 9477), 'numpy.ones', 'np.ones', (['self.dim'], {'dtype': 'bool'}), '(self.dim, dtype=bool)\n', (9455, 9477), True, 'import numpy as np\n'), ((9631, 9642), 'time.time', 'time.time', ([], {}), '()\n', (9640, 9642), False, 'import time\n'), ((9800, 9821), 'numpy.empty_like', 'np.empty_like', (['last_Z'], {}), '(last_Z)\n', (9813, 9821), True, 'import numpy as np\n'), ((11125, 11136), 'time.time', 'time.time', ([], {}), '()\n', (11134, 11136), False, 'import time\n'), ((14953, 14967), 'numpy.where', 'np.where', (['test'], {}), '(test)\n', (14961, 14967), True, 'import numpy as np\n'), ((17076, 17087), 'numpy.zeros', 'np.zeros', (['d'], {}), '(d)\n', (17084, 17087), True, 'import numpy as np\n'), ((17733, 17774), 'numpy.concatenate', 'np.concatenate', (['(dfdx, dfdm[:-1])'], {'axis': '(0)'}), '((dfdx, dfdm[:-1]), axis=0)\n', (17747, 17774), True, 'import numpy as np\n'), ((18491, 18513), 'numpy.arange', 'np.arange', (['j', 'self.dim'], {}), '(j, self.dim)\n', (18500, 18513), True, 'import numpy as np\n'), ((18561, 18578), 'numpy.diag', 'np.diag', (['self.cov'], {}), '(self.cov)\n', (18568, 18578), True, 'import numpy as np\n'), ((18681, 18691), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (18688, 18691), True, 'import numpy as np\n'), ((18906, 18919), 'numpy.argmin', 'np.argmin', (['pr'], {}), '(pr)\n', (18915, 18919), True, 'import numpy as np\n'), ((19696, 19706), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (19703, 19706), True, 'import numpy as np\n'), ((21064, 21073), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (21070, 21073), True, 'import numpy as np\n'), ((4138, 4168), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.float32'}), '([], dtype=np.float32)\n', (4146, 4168), True, 'import numpy as np\n'), ((4781, 4792), 'time.time', 'time.time', ([], {}), '()\n', (4790, 4792), False, 'import time\n'), ((5786, 5797), 'time.time', 'time.time', ([], {}), '()\n', (5795, 5797), False, 'import time\n'), ((6625, 6662), 'numpy.concatenate', 'np.concatenate', (['(rv, Z_gibbs)'], {'axis': '(1)'}), '((rv, Z_gibbs), axis=1)\n', (6639, 6662), True, 'import numpy as np\n'), ((9913, 9942), 'numpy.ones', 'np.ones', (['self.dim'], {'dtype': 'bool'}), '(self.dim, dtype=bool)\n', (9920, 9942), True, 'import numpy as np\n'), ((10032, 10043), 'time.time', 'time.time', ([], {}), '()\n', (10041, 10043), False, 'import time\n'), ((10226, 10241), 'numpy.sqrt', 'np.sqrt', (['var[i]'], {}), '(var[i])\n', (10233, 10241), True, 'import numpy as np\n'), ((10296, 10337), 'numpy.array', 'np.array', (['[(self.moved_lb[i] - mu_i) / s]'], {}), '([(self.moved_lb[i] - mu_i) / s])\n', (10304, 10337), True, 'import numpy as np\n'), ((10359, 10400), 'numpy.array', 'np.array', (['[(self.moved_ub[i] - mu_i) / s]'], {}), '([(self.moved_ub[i] - mu_i) / s])\n', (10367, 10400), True, 'import numpy as np\n'), ((10423, 10434), 'time.time', 'time.time', ([], {}), '()\n', (10432, 10434), False, 'import time\n'), ((11090, 11101), 'time.time', 'time.time', ([], {}), '()\n', (11099, 11101), False, 'import time\n'), ((14647, 14657), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (14654, 14657), True, 'import numpy as np\n'), ((17449, 17475), 'numpy.exp', 'np.exp', (['(-0.5 * lt ** 2 - w)'], {}), '(-0.5 * lt ** 2 - w)\n', (17455, 17475), True, 'import numpy as np\n'), ((17478, 17498), 'numpy.sqrt', 'np.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (17485, 17498), True, 'import numpy as np\n'), ((17516, 17542), 'numpy.exp', 'np.exp', (['(-0.5 * ut ** 2 - w)'], {}), '(-0.5 * ut ** 2 - w)\n', (17522, 17542), True, 'import numpy as np\n'), ((17545, 17565), 'numpy.sqrt', 'np.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (17552, 17565), True, 'import numpy as np\n'), ((17824, 17836), 'numpy.isinf', 'np.isinf', (['lt'], {}), '(lt)\n', (17832, 17836), True, 'import numpy as np\n'), ((17857, 17869), 'numpy.isinf', 'np.isinf', (['ut'], {}), '(ut)\n', (17865, 17869), True, 'import numpy as np\n'), ((17999, 18008), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (18005, 18008), True, 'import numpy as np\n'), ((18424, 18439), 'numpy.ones_like', 'np.ones_like', (['z'], {}), '(z)\n', (18436, 18439), True, 'import numpy as np\n'), ((18602, 18632), 'numpy.sum', 'np.sum', (['(L[I, 0:j] ** 2)'], {'axis': '(1)'}), '(L[I, 0:j] ** 2, axis=1)\n', (18608, 18632), True, 'import numpy as np\n'), ((19491, 19521), 'numpy.sum', 'np.sum', (['(L[j, 0:j] ** 2)'], {'axis': '(0)'}), '(L[j, 0:j] ** 2, axis=0)\n', (19497, 19521), True, 'import numpy as np\n'), ((20208, 20228), 'numpy.sqrt', 'np.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (20215, 20228), True, 'import numpy as np\n'), ((6982, 7013), 'numpy.concatenate', 'np.concatenate', (['(rv, Z)'], {'axis': '(1)'}), '((rv, Z), axis=1)\n', (6996, 7013), True, 'import numpy as np\n'), ((9556, 9609), 'numpy.linalg.solve', 'np.linalg.solve', (['cov_s[mask][:, mask]', 'cov_s[mask, i]'], {}), '(cov_s[mask][:, mask], cov_s[mask, i])\n', (9571, 9609), True, 'import numpy as np\n'), ((10084, 10135), 'numpy.linalg.solve', 'np.linalg.solve', (['cov_s[mask][:, mask]', 'last_Z[mask]'], {}), '(cov_s[mask][:, mask], last_Z[mask])\n', (10099, 10135), True, 'import numpy as np\n'), ((10165, 10176), 'time.time', 'time.time', ([], {}), '()\n', (10174, 10176), False, 'import time\n'), ((10731, 10742), 'time.time', 'time.time', ([], {}), '()\n', (10740, 10742), False, 'import time\n'), ((20151, 20177), 'numpy.exp', 'np.exp', (['(-0.5 * tl ** 2 - w)'], {}), '(-0.5 * tl ** 2 - w)\n', (20157, 20177), True, 'import numpy as np\n'), ((20179, 20205), 'numpy.exp', 'np.exp', (['(-0.5 * tu ** 2 - w)'], {}), '(-0.5 * tu ** 2 - w)\n', (20185, 20205), True, 'import numpy as np\n'), ((20508, 20523), 'numpy.exp', 'np.exp', (['(pb - pa)'], {}), '(pb - pa)\n', (20514, 20523), True, 'import numpy as np\n'), ((20687, 20702), 'numpy.exp', 'np.exp', (['(pa - pb)'], {}), '(pa - pb)\n', (20693, 20702), True, 'import numpy as np\n'), ((20799, 20809), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (20806, 20809), True, 'import numpy as np\n'), ((20862, 20872), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (20869, 20872), True, 'import numpy as np\n'), ((4371, 4388), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (4385, 4388), True, 'import numpy as np\n'), ((14561, 14571), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (14568, 14571), True, 'import numpy as np\n'), ((14612, 14622), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (14619, 14622), True, 'import numpy as np\n'), ((15858, 15875), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (15872, 15875), True, 'import numpy as np\n'), ((16163, 16180), 'numpy.random.rand', 'np.random.rand', (['d'], {}), '(d)\n', (16177, 16180), True, 'import numpy as np\n'), ((18164, 18184), 'numpy.diag', 'np.diag', (['(1 + dP[:-1])'], {}), '(1 + dP[:-1])\n', (18171, 18184), True, 'import numpy as np\n'), ((21101, 21111), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (21108, 21111), True, 'import numpy as np\n'), ((15959, 15976), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (15973, 15976), True, 'import numpy as np\n'), ((16118, 16135), 'numpy.random.rand', 'np.random.rand', (['d'], {}), '(d)\n', (16132, 16135), True, 'import numpy as np\n'), ((4732, 4743), 'time.time', 'time.time', ([], {}), '()\n', (4741, 4743), False, 'import time\n'), ((5163, 5185), 'numpy.zeros_like', 'np.zeros_like', (['self.lb'], {}), '(self.lb)\n', (5176, 5185), True, 'import numpy as np\n'), ((5288, 5304), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5302, 5304), True, 'import numpy as np\n'), ((5386, 5402), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5400, 5402), True, 'import numpy as np\n'), ((6351, 6362), 'time.time', 'time.time', ([], {}), '()\n', (6360, 6362), False, 'import time\n'), ((10985, 10996), 'time.time', 'time.time', ([], {}), '()\n', (10994, 10996), False, 'import time\n')] |
import numpy as np
import json
from ofdft_ml.statslib import Forward_PCA_transform, Backward_PCA_transform, ScalarGP, MultitaskGP, SeqModel
class Model_loader(object):
def __init__(self, train_data_dir, param_fname, train_size=None):
train_features = np.load(train_data_dir + 'features.npy')
train_targets = np.load(train_data_dir + 'targets.npy')
if train_size is not None:
train_features = train_features[:train_size]
train_targets = train_targets[:train_size]
with open(param_fname, 'r') as f:
parameters = json.load(f)
n_components = parameters['n_components']
gamma, noise = parameters['mean_square_error']['hyperparameter']
self.train_data = {'features': train_features, 'targets': train_targets}
self.n_cmps = n_components
self.gamma = gamma
self.noise = noise
def load(self):
forward_transformer = Forward_PCA_transform(self.n_cmps)
tr_train_data = forward_transformer.fit_transform(self.train_data)
tr_train_features = tr_train_data['features']
tr_train_targets = tr_train_data['targets']
regressor = SeqModel(self.gamma, self.noise, ((1e-4, 5.0), (1e-5, 0.1)), ScalarGP, MultitaskGP, 'eval')
regressor.fit(tr_train_features, tr_train_targets)
backward_transformer = Backward_PCA_transform(forward_transformer)
chained_model = Composer(forward_transformer, regressor, backward_transformer)
return chained_model
class Composer(object):
def __init__(self, forward_transformer, model, backward_transformer):
self.forward_transformer = forward_transformer
self.model = model
self.backward_transformer = backward_transformer
def __call__(self, feature):
if feature.ndim < 2:
feature = feature[np.newaxis, :]
tr_feature = self.forward_transformer.transform_x(feature)
pred_targets =self.model.predict(tr_feature)
Ek = pred_targets[:, 0]
Ek_derivative = self.backward_transformer(pred_targets[:, 1:])
return Ek, Ek_derivative
| [
"numpy.load",
"ofdft_ml.statslib.Backward_PCA_transform",
"json.load",
"ofdft_ml.statslib.Forward_PCA_transform",
"ofdft_ml.statslib.SeqModel"
] | [((266, 306), 'numpy.load', 'np.load', (["(train_data_dir + 'features.npy')"], {}), "(train_data_dir + 'features.npy')\n", (273, 306), True, 'import numpy as np\n'), ((331, 370), 'numpy.load', 'np.load', (["(train_data_dir + 'targets.npy')"], {}), "(train_data_dir + 'targets.npy')\n", (338, 370), True, 'import numpy as np\n'), ((944, 978), 'ofdft_ml.statslib.Forward_PCA_transform', 'Forward_PCA_transform', (['self.n_cmps'], {}), '(self.n_cmps)\n', (965, 978), False, 'from ofdft_ml.statslib import Forward_PCA_transform, Backward_PCA_transform, ScalarGP, MultitaskGP, SeqModel\n'), ((1181, 1279), 'ofdft_ml.statslib.SeqModel', 'SeqModel', (['self.gamma', 'self.noise', '((0.0001, 5.0), (1e-05, 0.1))', 'ScalarGP', 'MultitaskGP', '"""eval"""'], {}), "(self.gamma, self.noise, ((0.0001, 5.0), (1e-05, 0.1)), ScalarGP,\n MultitaskGP, 'eval')\n", (1189, 1279), False, 'from ofdft_ml.statslib import Forward_PCA_transform, Backward_PCA_transform, ScalarGP, MultitaskGP, SeqModel\n'), ((1364, 1407), 'ofdft_ml.statslib.Backward_PCA_transform', 'Backward_PCA_transform', (['forward_transformer'], {}), '(forward_transformer)\n', (1386, 1407), False, 'from ofdft_ml.statslib import Forward_PCA_transform, Backward_PCA_transform, ScalarGP, MultitaskGP, SeqModel\n'), ((586, 598), 'json.load', 'json.load', (['f'], {}), '(f)\n', (595, 598), False, 'import json\n')] |
#!/usr/bin/env python3
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import itertools
import random
class PCFGVMap(nn.Module):
def __init__(self, nt_states, t_states):
super(PCFGVMap, self).__init__()
self.nt_states = nt_states # non-terminal states (NT)
self.t_states = t_states # terminal states (T)
self.states = nt_states + t_states # total_states(NT+T) = terminal states (T) + non-terminal states (NT)
self.huge = 1e9
def logadd(self, x, y):
d = torch.max(x,y)
return torch.log(torch.exp(x-d) + torch.exp(y-d)) + d
def logsumexp(self, x, dim=1):
d = torch.max(x, dim)[0]
if x.dim() == 1:
return torch.log(torch.exp(x - d).sum(dim)) + d
else:
return torch.log(torch.exp(x - d.unsqueeze(dim).expand_as(x)).sum(dim)) + d
def _inside(self, unary_scores, rule_scores, root_scores):
#inside step
#unary scores : n x T
#rule scores : NT x (NT+T) x (NT+T) ()
#root : NT
# sequence length (n)
n = unary_scores.size(0)
alpha = unary_scores.new_zeros(n, n, self.states).fill_(-self.huge)
for k in range(n):
for state in range(self.t_states):
alpha[k, k, self.nt_states + state] = unary_scores[k, state]
for i in range(n):
for l in np.arange(1, n+1):
j = i + l
if j > n-1:
break
tmp_u = []
for k in np.arange(i, j):
if i == k:
# If the span is of length 1, then only pre-terminals parents exist.
l_start = self.nt_states
l_end = self.states
else:
l_start = 0
l_end = self.nt_states
if k+1 == j:
# If the span is of length 1, then only pre-terminals parents exist.
r_start = self.nt_states
r_end = self.states
else:
r_start = 0
r_end = self.nt_states
# P(A..., B...->C...)
tmp_rule_scores = rule_scores[:, l_start:l_end, r_start:r_end] # NT x NT+T X NT+T
# log(\alpha(i, k, B...))
alpha_left = alpha[i, k, l_start:l_end] # (NT + T)
alpha_left = alpha_left.unsqueeze(1).unsqueeze(0) # 1 x (NT+T) x 1
# log(\alpha(k+1, j, C...))
alpha_right = alpha[k+1, j, r_start:r_end] # NT
alpha_right = alpha_right.unsqueeze(0).unsqueeze(1) # 1 x 1 x (NT+T)
# log(\alpha(i, k, j, A... -> B..., C...)) = \
# log(\alpha(i, k, B...)) + log(\alpha(k+1, j, C...)) + log(p(A... -> B..., C...))
tmp_scores = alpha_left + alpha_right + tmp_rule_scores # NT x NT+T x NT+T
tmp_scores = tmp_scores.view(self.nt_states, -1)
tmp_u.append(self.logsumexp(tmp_scores, 1).unsqueeze(1))
tmp_u = torch.cat(tmp_u, 1)
tmp_u = self.logsumexp(tmp_u, 1)
# log(\alpha(i, j, A...))
alpha[i, j, :self.nt_states] = tmp_u[:self.nt_states]
# log(\alpha(0, n-1, A...))
log_Z = alpha[0, n-1, :self.nt_states] + root_scores
# \alpha(0, n-1)
log_Z = self.logsumexp(log_Z, 0)
return log_Z
def _viterbi(self, unary_scores, rule_scores, root_scores):
#unary scores :n x T
#rule scores :NT x (NT+T) x (NT+T)
# sequence length (n)
n = unary_scores.size(0)
scores = unary_scores.new_zeros(n, n, self.states).fill_(-self.huge)
bp = unary_scores.new_zeros(n, n, self.states).fill_(-1)
left_bp = unary_scores.new_zeros(n, n, self.states).fill_(-1)
right_bp = unary_scores.new_zeros(n, n, self.states).fill_(-1)
argmax = unary_scores.new_zeros(n, n).fill_(-1)
argmax_tags = unary_scores.new_zeros(n).fill_(-1)
spans = []
for k in range(n):
for state in range(self.t_states):
scores[k, k, self.nt_states + state] = unary_scores[k, state]
for i in range(n):
for l in np.arange(1, n+1):
j = i + l
if j > n-1:
break
tmp_max_score = []
tmp_left_child = []
tmp_right_child = []
for k in np.arange(i, j):
if i == k:
l_start = self.nt_states
l_end = self.states
else:
l_start = 0
l_end = self.nt_states
if k+1 == j:
r_start = self.nt_states
r_end = self.states
else:
r_start = 0
r_end = self.nt_states
tmp_rule_scores = rule_scores[:, l_start:l_end, r_start:r_end] # NT x NT+T X NT+T
beta_left = scores[i, k, l_start:l_end] # NT
beta_left = beta_left.unsqueeze(1).unsqueeze(0) # 1 x (NT+T) x 1
beta_right = scores[k+1, j, r_start:r_end] # NT
beta_right = beta_right.unsqueeze(0).unsqueeze(1) # 1 x 1 x (NT+T)
tmp_scores = beta_left + beta_right + tmp_rule_scores # NT x NT+T x NT+T
tmp_scores_flat = tmp_scores.view(self.nt_states, -1)
max_score, max_idx = torch.max(tmp_scores_flat, -1) # NT
tmp_max_score.append(max_score.unsqueeze(1)) # NT x 1
# Using rstates as it can be 60 (NT) or 90 (T+NT).
r_states = tmp_scores.size(2)
left_child = (max_idx.float() / r_states).floor().long()
tmp_left_child.append(left_child.unsqueeze(1) + l_start) # NT x 1
right_child = torch.remainder(max_idx, r_states)
tmp_right_child.append(right_child.unsqueeze(1) + r_start) # NT x 1
tmp_max_score = torch.cat(tmp_max_score, 1) # NT x l
tmp_left_child = torch.cat(tmp_left_child, 1) # NT x l
tmp_right_child = torch.cat(tmp_right_child, 1) # NT x l
max_score, max_idx = torch.max(tmp_max_score, 1) # NT, NT
max_left_child = torch.gather(tmp_left_child, 1, max_idx.unsqueeze(1)).squeeze(1) # (1,)
max_right_child = torch.gather(tmp_right_child, 1, max_idx.unsqueeze(1)).squeeze(1) # (1,)
scores[i, j, :self.nt_states] = max_score[:self.nt_states]
bp[i, j, :self.nt_states] = max_idx[:self.nt_states] + i
left_bp[i, j, :self.nt_states] = max_left_child[:self.nt_states]
right_bp[i, j, :self.nt_states] = max_right_child[:self.nt_states]
max_score = scores[0, n-1, :self.nt_states] + root_scores
max_score, max_idx = torch.max(max_score, -1)
def _backtrack(i, j, state):
assert(i <= j)
left_state = int(left_bp[i][j][state])
right_state = int(right_bp[i][j][state])
argmax[i][j] = 1
if i == j:
argmax_tags[i] = state - self.nt_states
return None
else:
k = int(bp[i][j][state])
spans.insert(0, (i,j, state))
_backtrack(i, k, left_state)
_backtrack(k+1, j, right_state)
return None
_backtrack(0, n-1, max_idx)
return scores[0, n-1, 0], argmax, spans, argmax_tags
| [
"torch.remainder",
"torch.cat",
"torch.exp",
"torch.max",
"numpy.arange"
] | [((525, 540), 'torch.max', 'torch.max', (['x', 'y'], {}), '(x, y)\n', (534, 540), False, 'import torch\n'), ((6298, 6322), 'torch.max', 'torch.max', (['max_score', '(-1)'], {}), '(max_score, -1)\n', (6307, 6322), False, 'import torch\n'), ((646, 663), 'torch.max', 'torch.max', (['x', 'dim'], {}), '(x, dim)\n', (655, 663), False, 'import torch\n'), ((1305, 1324), 'numpy.arange', 'np.arange', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (1314, 1324), True, 'import numpy as np\n'), ((3890, 3909), 'numpy.arange', 'np.arange', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (3899, 3909), True, 'import numpy as np\n'), ((1415, 1430), 'numpy.arange', 'np.arange', (['i', 'j'], {}), '(i, j)\n', (1424, 1430), True, 'import numpy as np\n'), ((2805, 2824), 'torch.cat', 'torch.cat', (['tmp_u', '(1)'], {}), '(tmp_u, 1)\n', (2814, 2824), False, 'import torch\n'), ((4066, 4081), 'numpy.arange', 'np.arange', (['i', 'j'], {}), '(i, j)\n', (4075, 4081), True, 'import numpy as np\n'), ((5490, 5517), 'torch.cat', 'torch.cat', (['tmp_max_score', '(1)'], {}), '(tmp_max_score, 1)\n', (5499, 5517), False, 'import torch\n'), ((5561, 5589), 'torch.cat', 'torch.cat', (['tmp_left_child', '(1)'], {}), '(tmp_left_child, 1)\n', (5570, 5589), False, 'import torch\n'), ((5625, 5654), 'torch.cat', 'torch.cat', (['tmp_right_child', '(1)'], {}), '(tmp_right_child, 1)\n', (5634, 5654), False, 'import torch\n'), ((5694, 5721), 'torch.max', 'torch.max', (['tmp_max_score', '(1)'], {}), '(tmp_max_score, 1)\n', (5703, 5721), False, 'import torch\n'), ((563, 579), 'torch.exp', 'torch.exp', (['(x - d)'], {}), '(x - d)\n', (572, 579), False, 'import torch\n'), ((580, 596), 'torch.exp', 'torch.exp', (['(y - d)'], {}), '(y - d)\n', (589, 596), False, 'import torch\n'), ((4961, 4991), 'torch.max', 'torch.max', (['tmp_scores_flat', '(-1)'], {}), '(tmp_scores_flat, -1)\n', (4970, 4991), False, 'import torch\n'), ((5352, 5386), 'torch.remainder', 'torch.remainder', (['max_idx', 'r_states'], {}), '(max_idx, r_states)\n', (5367, 5386), False, 'import torch\n'), ((715, 731), 'torch.exp', 'torch.exp', (['(x - d)'], {}), '(x - d)\n', (724, 731), False, 'import torch\n')] |
import matplotlib.pyplot as plt
import numpy as np
from numpy import genfromtxt
import argparse
def percent_error(exp_value, theo_value):
error = (exp_value - theo_value) / theo_value
# theo_value * error + theo_value = exp_value
def margin_line(error, exp_val, theo_val):
exp_val = theo_val * error + theo_val
return exp_val
parser = argparse.ArgumentParser()
parser.add_argument("-o",dest="output_fname")
parser.add_argument("-i",dest="input_fname")
args = parser.parse_args()
latencies = genfromtxt(args.input_fname, delimiter=',',skip_header=True)
exp_val = latencies[:,2]*1e3 # measured
theo_val = latencies[:,1]*1e3 # summed
fig, ax = plt.subplots(1,1)
ax.scatter(theo_val, exp_val,s=5,color='orange')
ax.set_xlabel('Summed Latency (ms)')
ax.set_ylabel('Direct Measurement (ms)')
xmin = min(exp_val.min(), theo_val.min())-0.05
xmax = max(exp_val.max(), theo_val.max())+0.05
xlin = np.linspace(0,100,100)
ax.plot(xlin,xlin,0.5)
ax.plot(xlin,margin_line(0.1,exp_val,xlin),linestyle='--',color='gray')
ax.plot(xlin,margin_line(-0.1,exp_val,xlin),linestyle='--',color='gray')
ax.plot(xlin,margin_line(0.2,exp_val,xlin),linestyle='--',color='gray')
ax.plot(xlin,margin_line(-0.2,exp_val,xlin),linestyle='--',color='gray')
ax.set_xlim(xmin,xmax)
ax.set_ylim(xmin,xmax)
ax.set_aspect('equal')
plt.savefig(args.output_fname) | [
"argparse.ArgumentParser",
"numpy.genfromtxt",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((354, 379), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (377, 379), False, 'import argparse\n'), ((511, 572), 'numpy.genfromtxt', 'genfromtxt', (['args.input_fname'], {'delimiter': '""","""', 'skip_header': '(True)'}), "(args.input_fname, delimiter=',', skip_header=True)\n", (521, 572), False, 'from numpy import genfromtxt\n'), ((662, 680), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (674, 680), True, 'import matplotlib.pyplot as plt\n'), ((908, 932), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(100)'], {}), '(0, 100, 100)\n', (919, 932), True, 'import numpy as np\n'), ((1314, 1344), 'matplotlib.pyplot.savefig', 'plt.savefig', (['args.output_fname'], {}), '(args.output_fname)\n', (1325, 1344), True, 'import matplotlib.pyplot as plt\n')] |
#%%
import os
import sys
import random
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
import pathlib
import skimage
from PIL import Image
import imgaug
from imgaug import augmenters as iaa
from skimage.filters import threshold_otsu
ROOT_DIR = os.path.abspath("../")
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.model import log
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
#%%
# minimum input size = 128
class ShapesConfig(Config):
# Give the configuration a recognizable name
NAME = "skin"
GPU_COUNT = 1
IMAGES_PER_GPU = 16
NUM_CLASSES = 1 + 2 # background + 2 types
IMAGE_MIN_DIM = 128
IMAGE_MAX_DIM = 128
RPN_ANCHOR_SCALES = (16,32,64,128,256) # anchor side in pixels
TRAIN_ROIS_PER_IMAGE = 8
STEPS_PER_EPOCH = 626 // IMAGES_PER_GPU
VALIDATION_STEPS = 626 // IMAGES_PER_GPU
LEARNING_RATE = 0.001
USE_MINI_MASK = False
# gpu_options = True
config = ShapesConfig()
def get_ax(rows=1, cols=1, size=8):
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
class ShapesDataset(utils.Dataset):
def list_images(self,data_dir):
# define classes
self.add_class("skin", 1, "fibroblast")
self.add_class("skin", 2, "falsePositive")
train_images = list(data_dir.glob('*tile*/image/*.png'))
print('# image in this dataset : ',len(train_images))
for idx,train_image in enumerate(train_images):
label = str(train_image).replace("image","mask")
self.add_image("skin",image_id=idx,path=train_image,labelpath=label,
height=config.IMAGE_SHAPE[0],width=config.IMAGE_SHAPE[1])
train_images = list(data_dir.glob('*false_positive*/image/*.png'))
print('# image in this dataset : ',len(train_images))
for idxx,train_image in enumerate(train_images):
label = str(train_image).replace("image","mask")
self.add_image("skin",image_id=idx+idxx,path=train_image,labelpath=label,
height=config.IMAGE_SHAPE[0],width=config.IMAGE_SHAPE[1])
def load_image(self, image_id):
"""Load the specified image and return a [H,W,3] Numpy array.
"""
# Load image
image = skimage.io.imread(self.image_info[image_id]['path'])
# If grayscale. Convert to RGB for consistency.
if image.ndim != 3:
print('grayscale to rgb')
image = skimage.color.gray2rgb(image)
# If has an alpha channel, remove it for consistency
if image.shape[-1] == 4:
print('rgba to rgb')
image = image[..., :3]
# image = cv2.resize(image,dsize=(256,256))
return image.astype(np.uint8)
def load_mask(self, image_id):
label = self.image_info[image_id]['labelpath']
mask = Image.open(label)
mask = np.array(mask).astype('int')
mask = mask[:,:,np.newaxis]
if 'false_positive' in label:
class_ids = np.array([2])
else:
class_ids = np.array([1])
return mask,class_ids
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "skin":
return info["truth"]
else:
super(self.__class__).image_reference(self, image_id)
#%%
class InferenceConfig(ShapesConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
IMAGE_MAX_DIM = 128
inference_config = InferenceConfig()
#%%
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(ROOT_DIR, ".h5 file name here")
model_path = model.find_last()
# Load trained weights
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
#%%
from PIL import Image
from skimage import io
## put folder path here to apply your model to classify
src = r'\\10.162.80.6\Kyu_Sync\Aging\data\svs\20x\segmentation\Wirtz.Denis_OTS-19_5021-003_false_positive_4\image'
##
dst = os.path.join(src,'classified')
if not os.path.exists(dst): os.mkdir(dst)
images = [os.path.join(src,_) for _ in os.listdir(src) if _.endswith('png')]
idd = []
for original_image in images:
original_image2 = skimage.io.imread(original_image)
results = model.detect([original_image2], verbose=1)
r = results[0]
masks = r['masks']
masks = np.moveaxis(masks,2,0)
if len(masks)<1:
continue
maskzero=np.zeros(masks[0].shape)
for mask,id in zip(masks,r['class_ids']):
idd.append(id)
maskzero[mask]=id
im = Image.fromarray(maskzero)
im.save(os.path.join(dst, os.path.basename(original_image).replace('png','tif')))
print(idd)
| [
"sys.path.append",
"os.mkdir",
"os.path.abspath",
"numpy.moveaxis",
"skimage.color.gray2rgb",
"os.path.basename",
"os.path.exists",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"PIL.Image.open",
"numpy.array",
"PIL.Image.fromarray",
"mrcnn.model.MaskRCNN",
"os.path.join",
"os.listdir",
... | [((311, 333), 'os.path.abspath', 'os.path.abspath', (['"""../"""'], {}), "('../')\n", (326, 333), False, 'import os\n'), ((334, 359), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (349, 359), False, 'import sys\n'), ((555, 585), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""logs"""'], {}), "(ROOT_DIR, 'logs')\n", (567, 585), False, 'import os\n'), ((604, 647), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""mask_rcnn_coco.h5"""'], {}), "(ROOT_DIR, 'mask_rcnn_coco.h5')\n", (616, 647), False, 'import os\n'), ((3838, 3924), 'mrcnn.model.MaskRCNN', 'modellib.MaskRCNN', ([], {'mode': '"""inference"""', 'config': 'inference_config', 'model_dir': 'MODEL_DIR'}), "(mode='inference', config=inference_config, model_dir=\n MODEL_DIR)\n", (3855, 3924), True, 'import mrcnn.model as modellib\n'), ((4495, 4526), 'os.path.join', 'os.path.join', (['src', '"""classified"""'], {}), "(src, 'classified')\n", (4507, 4526), False, 'import os\n'), ((1249, 1309), 'matplotlib.pyplot.subplots', 'plt.subplots', (['rows', 'cols'], {'figsize': '(size * cols, size * rows)'}), '(rows, cols, figsize=(size * cols, size * rows))\n', (1261, 1309), True, 'import matplotlib.pyplot as plt\n'), ((4533, 4552), 'os.path.exists', 'os.path.exists', (['dst'], {}), '(dst)\n', (4547, 4552), False, 'import os\n'), ((4554, 4567), 'os.mkdir', 'os.mkdir', (['dst'], {}), '(dst)\n', (4562, 4567), False, 'import os\n'), ((4579, 4599), 'os.path.join', 'os.path.join', (['src', '_'], {}), '(src, _)\n', (4591, 4599), False, 'import os\n'), ((4707, 4740), 'skimage.io.imread', 'skimage.io.imread', (['original_image'], {}), '(original_image)\n', (4724, 4740), False, 'import skimage\n'), ((4852, 4876), 'numpy.moveaxis', 'np.moveaxis', (['masks', '(2)', '(0)'], {}), '(masks, 2, 0)\n', (4863, 4876), True, 'import numpy as np\n'), ((4926, 4950), 'numpy.zeros', 'np.zeros', (['masks[0].shape'], {}), '(masks[0].shape)\n', (4934, 4950), True, 'import numpy as np\n'), ((5055, 5080), 'PIL.Image.fromarray', 'Image.fromarray', (['maskzero'], {}), '(maskzero)\n', (5070, 5080), False, 'from PIL import Image\n'), ((2514, 2566), 'skimage.io.imread', 'skimage.io.imread', (["self.image_info[image_id]['path']"], {}), "(self.image_info[image_id]['path'])\n", (2531, 2566), False, 'import skimage\n'), ((3098, 3115), 'PIL.Image.open', 'Image.open', (['label'], {}), '(label)\n', (3108, 3115), False, 'from PIL import Image\n'), ((4608, 4623), 'os.listdir', 'os.listdir', (['src'], {}), '(src)\n', (4618, 4623), False, 'import os\n'), ((2709, 2738), 'skimage.color.gray2rgb', 'skimage.color.gray2rgb', (['image'], {}), '(image)\n', (2731, 2738), False, 'import skimage\n'), ((3258, 3271), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (3266, 3271), True, 'import numpy as np\n'), ((3310, 3323), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (3318, 3323), True, 'import numpy as np\n'), ((3131, 3145), 'numpy.array', 'np.array', (['mask'], {}), '(mask)\n', (3139, 3145), True, 'import numpy as np\n'), ((5111, 5143), 'os.path.basename', 'os.path.basename', (['original_image'], {}), '(original_image)\n', (5127, 5143), False, 'import os\n')] |
import datetime
import itertools
import math
import os
import random
import glob
import json
from typing import *
import scipy
import imageio
import scipy.misc
import scipy.spatial
import scipy.ndimage
import numpy as np
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import umap
from torch.utils.data import Dataset, DataLoader, Sampler
from torch.utils.tensorboard import SummaryWriter
from PIL import Image
import skimage.transform
import matplotlib.pyplot as plt
import elpips
from PythonExtras.distance_matrix import render_distance_matrix, DistanceMatrixConfig
import models
from dcgan import Discriminator, Generator
class CatDataset(Dataset):
def __init__(self, imageSubdirPath: str, transform: Callable):
self.rootPath = imageSubdirPath
self.pathList = glob.glob(os.path.join(self.rootPath, 'cat*.jpg'))
self.transform = transform
def __getitem__(self, index):
path = self.pathList[index]
image = Image.open(path)
if self.transform is not None:
image = self.transform(image)
return image, os.path.basename(path) # Pass image name as metadata. (Useful for export.)
def __len__(self):
return len(self.pathList)
class AuthorDataset(Dataset):
def __init__(self, jsonPath: str, normalize=True):
self.jsonPath = jsonPath
with open(self.jsonPath, 'r') as file:
self.data = json.load(file)
self.names = self.data['names']
self.vectors = np.asarray(self.data['vectors'])
# Normalize the vectors.
self.vectors = self.vectors / np.linalg.norm(self.vectors, axis=-1, keepdims=True)
def __getitem__(self, index):
return self.vectors[index], self.names[index]
def __len__(self):
return len(self.names)
class InfiniteSampler(Sampler):
"""
From https://discuss.pytorch.org/t/implementing-an-infinite-loop-dataset-dataloader-combo/35567/3
"""
def __init__(self, data_source: Sized):
super().__init__(data_source)
self.dataset_size = len(data_source)
def __iter__(self):
# This wrapper is only needed if we want to use a sample size other than one.
# Otherwise, iteration over the random permutation tensors is already yielding single indices.
yield from itertools.islice(self._infinite(), 0, None, 1) # Infinite iterator
def _infinite(self):
g = torch.Generator()
while True:
yield from torch.randperm(self.dataset_size, generator=g)
def plot_image_scatter(ax, data, images, downscaleRatio: Optional[int] = None):
from matplotlib.offsetbox import AnnotationBbox, OffsetImage
if downscaleRatio:
ratioInv = 1 / downscaleRatio
images = [scipy.ndimage.interpolation.zoom(i, [ratioInv, ratioInv, 1], order=1) for i in images]
ax.scatter(data[:, 0], data[:, 1])
for i in range(len(images)):
x, y = tuple(data[i])
ab = AnnotationBbox(OffsetImage(images[i]), (x, y), frameon=False)
ax.add_artist(ab)
def l2_sqr_dist_matrix(x: torch.Tensor) -> torch.Tensor:
gram = torch.mm(x, x.T)
diag = gram.diagonal()
return diag.unsqueeze(1) + diag.unsqueeze(0) - 2 * gram
def main():
dataSize = 32
batchSize = 8
elpipsBatchSize = 1
# imageSize = 32
imageSize = 64
nz = 100
# discCheckpointPath = r'E:\projects\visus\PyTorch-GAN\implementations\dcgan\checkpoints\2020_07_10_15_53_34\disc_step4800.pth'
discCheckpointPath = r'E:\projects\visus\pytorch-examples\dcgan\out\netD_epoch_24.pth'
genCheckpointPath = r'E:\projects\visus\pytorch-examples\dcgan\out\netG_epoch_24.pth'
gpu = torch.device('cuda')
# For now we normalize the vectors to have norm 1, but don't make sure
# that the data has certain mean/std.
pointDataset = AuthorDataset(
jsonPath=r'E:\out\scripts\metaphor-vis\authors-all.json'
)
# Take top N points.
points = np.asarray([pointDataset[i][0] for i in range(dataSize)])
distPointsCpu = l2_sqr_dist_matrix(torch.tensor(points)).numpy()
latents = torch.tensor(np.random.normal(0.0, 1.0, (dataSize, nz)),
requires_grad=True, dtype=torch.float32, device=gpu)
scale = torch.tensor(2.7, requires_grad=True, dtype=torch.float32, device=gpu) # todo Re-check!
bias = torch.tensor(0.0, requires_grad=True, dtype=torch.float32, device=gpu) # todo Re-check!
lpips = models.PerceptualLoss(model='net-lin', net='vgg', use_gpu=True).to(gpu)
# lossModel = lpips
config = elpips.Config()
config.batch_size = elpipsBatchSize # Ensemble size for ELPIPS.
config.set_scale_levels_by_image_size(imageSize, imageSize)
lossModel = elpips.ElpipsMetric(config, lpips).to(gpu)
discriminator = Discriminator(3, 64, 1)
if discCheckpointPath:
discriminator.load_state_dict(torch.load(discCheckpointPath))
else:
discriminator.init_params()
discriminator = discriminator.to(gpu)
generator = Generator(nz=nz, ngf=64)
if genCheckpointPath:
generator.load_state_dict(torch.load(genCheckpointPath))
else:
generator.init_params()
generator = generator.to(gpu)
# optimizerImages = torch.optim.Adam([images, scale], lr=1e-2, betas=(0.9, 0.999))
optimizerScale = torch.optim.Adam([scale, bias], lr=0.001)
# optimizerGen = torch.optim.Adam(generator.parameters(), lr=0.0002, betas=(0.5, 0.999))
# optimizerDisc = torch.optim.Adam(discriminator.parameters(), lr=2e-4, betas=(0.9, 0.999))
# optimizerDisc = torch.optim.Adam(discriminator.parameters(), lr=0.0002, betas=(0.5, 0.999))
optimizerLatents = torch.optim.Adam([latents], lr=5e-3, betas=(0.9, 0.999))
fig, axes = plt.subplots(nrows=2, ncols=batchSize // 2)
fig2 = plt.figure()
ax2 = fig2.add_subplot(1, 1, 1)
outPath = os.path.join('runs', datetime.datetime.today().strftime('%Y_%m_%d_%H_%M_%S'))
os.makedirs(outPath)
summaryWriter = SummaryWriter(outPath)
for batchIndex in range(10000):
# noinspection PyTypeChecker
randomIndices = np.random.randint(0, dataSize, batchSize).tolist() # type: List[int]
# # randomIndices = list(range(dataSize)) # type: List[int]
distTarget = torch.tensor(distPointsCpu[randomIndices, :][:, randomIndices], dtype=torch.float32, device=gpu)
latentsBatch = latents[randomIndices]
imageBatchFake = generator(latentsBatch[:, :, None, None].float())
# todo It's possible to compute this more efficiently, but would require re-implementing lpips.
# For now, compute the full BSxBS matrix row-by-row to avoid memory issues.
lossDistTotal = torch.tensor(0.0, device=gpu)
distanceRows = []
for iRow in range(batchSize):
distPredFlat = lossModel(imageBatchFake[iRow].repeat(repeats=(batchSize, 1, 1, 1)).contiguous(),
imageBatchFake, normalize=True)
distPred = distPredFlat.reshape((1, batchSize))
distanceRows.append(distPred)
lossDist = torch.sum((distTarget[iRow] - (distPred * scale + bias)) ** 2) # MSE
lossDistTotal += lossDist
lossDistTotal /= batchSize * batchSize # Compute the mean.
distPredFull = torch.cat(distanceRows, dim=0)
# print('{} - {} || {} - {}'.format(
# torch.min(distPred).item(),
# torch.max(distPred).item(),
# torch.min(distTarget).item(),
# torch.max(distTarget).item()
# ))
# discPred = discriminator(imageBatchFake)
# lossRealness = bceLoss(discPred, torch.ones(imageBatchFake.shape[0], device=gpu))
# lossGen = lossDist + 1.0 * lossRealness
lossLatents = lossDistTotal
# optimizerGen.zero_grad()
# optimizerScale.zero_grad()
# lossGen.backward()
# optimizerGen.step()
# optimizerScale.step()
optimizerLatents.zero_grad()
# optimizerScale.zero_grad()
lossLatents.backward()
optimizerLatents.step()
# optimizerScale.step()
# with torch.no_grad():
# # todo We're clamping all the images every batch, can we clamp only the ones updated?
# # images = torch.clamp(images, 0, 1) # For some reason this was making the training worse.
# images.data = torch.clamp(images.data, 0, 1)
if batchIndex % 100 == 0:
msg = 'iter {} loss dist {:.3f} scale: {:.3f} bias: {:.3f}'.format(batchIndex, lossDistTotal.item(), scale.item(), bias.item())
print(msg)
summaryWriter.add_scalar('loss-dist', lossDistTotal.item(), global_step=batchIndex)
def gpu_images_to_numpy(images):
imagesNumpy = images.cpu().data.numpy().transpose(0, 2, 3, 1)
imagesNumpy = (imagesNumpy + 1) / 2
return imagesNumpy
# print(discPred.tolist())
imageBatchFakeCpu = gpu_images_to_numpy(imageBatchFake)
# imageBatchRealCpu = gpu_images_to_numpy(imageBatchReal)
for iCol, ax in enumerate(axes.flatten()[:batchSize]):
ax.imshow(imageBatchFakeCpu[iCol])
fig.suptitle(msg)
with torch.no_grad():
images = gpu_images_to_numpy(generator(latents[..., None, None]))
authorVectorsProj = umap.UMAP(n_neighbors=min(5, dataSize), random_state=1337).fit_transform(points)
plot_image_scatter(ax2, authorVectorsProj, images, downscaleRatio=2)
fig.savefig(os.path.join(outPath, f'batch_{batchIndex}.png'))
fig2.savefig(os.path.join(outPath, f'scatter_{batchIndex}.png'))
plt.close(fig)
plt.close(fig2)
with torch.no_grad():
imagesGpu = generator(latents[..., None, None])
imageNumber = imagesGpu.shape[0]
# Compute LPIPS distances, batch to avoid memory issues.
bs = min(imageNumber, 8)
assert imageNumber % bs == 0
distPredEval = np.zeros((imagesGpu.shape[0], imagesGpu.shape[0]))
for iCol in range(imageNumber // bs):
startA, endA = iCol * bs, (iCol + 1) * bs
imagesA = imagesGpu[startA:endA]
for j in range(imageNumber // bs):
startB, endB = j * bs, (j + 1) * bs
imagesB = imagesGpu[startB:endB]
distBatchEval = lossModel(imagesA.repeat(repeats=(bs, 1, 1, 1)).contiguous(),
imagesB.repeat_interleave(repeats=bs, dim=0).contiguous(),
normalize=True).cpu().numpy()
distPredEval[startA:endA, startB:endB] = distBatchEval.reshape((bs, bs))
distPredEval = (distPredEval * scale.item() + bias.item())
# Move to the CPU and append an alpha channel for rendering.
images = gpu_images_to_numpy(imagesGpu)
images = [np.concatenate([im, np.ones(im.shape[:-1] + (1,))], axis=-1) for im in images]
distPoints = distPointsCpu
assert np.abs(distPoints - distPoints.T).max() < 1e-5
distPoints = np.minimum(distPoints, distPoints.T) # Remove rounding errors, guarantee symmetry.
config = DistanceMatrixConfig()
config.dataRange = (0., 4.)
_, pointIndicesSorted = render_distance_matrix(
os.path.join(outPath, f'dist_point_{batchIndex}.png'),
distPoints,
images,
config=config
)
# print(np.abs(distPredFlat - distPredFlat.T).max())
# assert np.abs(distPredFlat - distPredFlat.T).max() < 1e-5
# todo The symmetry doesn't hold for E-LPIPS, since it's stochastic.
distPredEval = np.minimum(distPredEval, distPredEval.T) # Remove rounding errors, guarantee symmetry.
config = DistanceMatrixConfig()
config.dataRange = (0., 4.)
render_distance_matrix(
os.path.join(outPath, f'dist_images_{batchIndex}.png'),
distPredEval,
images,
config=config
)
config = DistanceMatrixConfig()
config.dataRange = (0., 4.)
render_distance_matrix(
os.path.join(outPath, f'dist_images_aligned_{batchIndex}.png'),
distPredEval,
images,
predefinedOrder=pointIndicesSorted,
config=config
)
fig, axes = plt.subplots(ncols=2)
axes[0].matshow(distTarget.cpu().numpy(), vmin=0, vmax=4)
axes[1].matshow(distPredFull.cpu().numpy() * scale.item(), vmin=0, vmax=4)
fig.savefig(os.path.join(outPath, f'batch_dist_{batchIndex}.png'))
plt.close(fig)
surveySize = 30
fig, axes = plt.subplots(nrows=3, ncols=surveySize, figsize=(surveySize, 3))
assert len(images) == dataSize
allIndices = list(range(dataSize))
with open(os.path.join(outPath, f'survey_{batchIndex}.txt'), 'w') as file:
for iCol in range(surveySize):
randomIndices = random.sample(allIndices, k=3)
leftToMid = distPointsCpu[randomIndices[0], randomIndices[1]]
rightToMid = distPointsCpu[randomIndices[2], randomIndices[1]]
correctAnswer = 'left' if leftToMid < rightToMid else 'right'
file.write("{}\t{}\t{}\t{}\t{}\n".format(iCol, correctAnswer, leftToMid, rightToMid,
str(tuple(randomIndices))))
for iRow in (0, 1, 2):
axes[iRow][iCol].imshow(images[randomIndices[iRow]])
fig.savefig(os.path.join(outPath, f'survey_{batchIndex}.png'))
plt.close(fig)
torch.save(generator.state_dict(), os.path.join(outPath, 'gen_{}.pth'.format(batchIndex)))
torch.save(discriminator.state_dict(), os.path.join(outPath, 'gen_{}.pth'.format(batchIndex)))
summaryWriter.close()
if __name__ == '__main__':
main()
| [
"numpy.abs",
"random.sample",
"torch.mm",
"torch.cat",
"numpy.ones",
"matplotlib.pyplot.figure",
"models.PerceptualLoss",
"numpy.linalg.norm",
"numpy.random.randint",
"numpy.random.normal",
"torch.device",
"dcgan.Generator",
"torch.no_grad",
"os.path.join",
"matplotlib.offsetbox.OffsetIm... | [((3183, 3199), 'torch.mm', 'torch.mm', (['x', 'x.T'], {}), '(x, x.T)\n', (3191, 3199), False, 'import torch\n'), ((3741, 3761), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3753, 3761), False, 'import torch\n'), ((4316, 4386), 'torch.tensor', 'torch.tensor', (['(2.7)'], {'requires_grad': '(True)', 'dtype': 'torch.float32', 'device': 'gpu'}), '(2.7, requires_grad=True, dtype=torch.float32, device=gpu)\n', (4328, 4386), False, 'import torch\n'), ((4416, 4486), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'requires_grad': '(True)', 'dtype': 'torch.float32', 'device': 'gpu'}), '(0.0, requires_grad=True, dtype=torch.float32, device=gpu)\n', (4428, 4486), False, 'import torch\n'), ((4627, 4642), 'elpips.Config', 'elpips.Config', ([], {}), '()\n', (4640, 4642), False, 'import elpips\n'), ((4856, 4879), 'dcgan.Discriminator', 'Discriminator', (['(3)', '(64)', '(1)'], {}), '(3, 64, 1)\n', (4869, 4879), False, 'from dcgan import Discriminator, Generator\n'), ((5082, 5106), 'dcgan.Generator', 'Generator', ([], {'nz': 'nz', 'ngf': '(64)'}), '(nz=nz, ngf=64)\n', (5091, 5106), False, 'from dcgan import Discriminator, Generator\n'), ((5383, 5424), 'torch.optim.Adam', 'torch.optim.Adam', (['[scale, bias]'], {'lr': '(0.001)'}), '([scale, bias], lr=0.001)\n', (5399, 5424), False, 'import torch\n'), ((5735, 5792), 'torch.optim.Adam', 'torch.optim.Adam', (['[latents]'], {'lr': '(0.005)', 'betas': '(0.9, 0.999)'}), '([latents], lr=0.005, betas=(0.9, 0.999))\n', (5751, 5792), False, 'import torch\n'), ((5809, 5852), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(2)', 'ncols': '(batchSize // 2)'}), '(nrows=2, ncols=batchSize // 2)\n', (5821, 5852), True, 'import matplotlib.pyplot as plt\n'), ((5865, 5877), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5875, 5877), True, 'import matplotlib.pyplot as plt\n'), ((6011, 6031), 'os.makedirs', 'os.makedirs', (['outPath'], {}), '(outPath)\n', (6022, 6031), False, 'import os\n'), ((6053, 6075), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['outPath'], {}), '(outPath)\n', (6066, 6075), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((1037, 1053), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (1047, 1053), False, 'from PIL import Image\n'), ((1566, 1598), 'numpy.asarray', 'np.asarray', (["self.data['vectors']"], {}), "(self.data['vectors'])\n", (1576, 1598), True, 'import numpy as np\n'), ((2485, 2502), 'torch.Generator', 'torch.Generator', ([], {}), '()\n', (2500, 2502), False, 'import torch\n'), ((4179, 4221), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)', '(dataSize, nz)'], {}), '(0.0, 1.0, (dataSize, nz))\n', (4195, 4221), True, 'import numpy as np\n'), ((6335, 6436), 'torch.tensor', 'torch.tensor', (['distPointsCpu[randomIndices, :][:, randomIndices]'], {'dtype': 'torch.float32', 'device': 'gpu'}), '(distPointsCpu[randomIndices, :][:, randomIndices], dtype=torch\n .float32, device=gpu)\n', (6347, 6436), False, 'import torch\n'), ((6767, 6796), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'device': 'gpu'}), '(0.0, device=gpu)\n', (6779, 6796), False, 'import torch\n'), ((7365, 7395), 'torch.cat', 'torch.cat', (['distanceRows'], {'dim': '(0)'}), '(distanceRows, dim=0)\n', (7374, 7395), False, 'import torch\n'), ((873, 912), 'os.path.join', 'os.path.join', (['self.rootPath', '"""cat*.jpg"""'], {}), "(self.rootPath, 'cat*.jpg')\n", (885, 912), False, 'import os\n'), ((1159, 1181), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (1175, 1181), False, 'import os\n'), ((1486, 1501), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1495, 1501), False, 'import json\n'), ((1670, 1722), 'numpy.linalg.norm', 'np.linalg.norm', (['self.vectors'], {'axis': '(-1)', 'keepdims': '(True)'}), '(self.vectors, axis=-1, keepdims=True)\n', (1684, 1722), True, 'import numpy as np\n'), ((2820, 2889), 'scipy.ndimage.interpolation.zoom', 'scipy.ndimage.interpolation.zoom', (['i', '[ratioInv, ratioInv, 1]'], {'order': '(1)'}), '(i, [ratioInv, ratioInv, 1], order=1)\n', (2852, 2889), False, 'import scipy\n'), ((3039, 3061), 'matplotlib.offsetbox.OffsetImage', 'OffsetImage', (['images[i]'], {}), '(images[i])\n', (3050, 3061), False, 'from matplotlib.offsetbox import AnnotationBbox, OffsetImage\n'), ((4518, 4581), 'models.PerceptualLoss', 'models.PerceptualLoss', ([], {'model': '"""net-lin"""', 'net': '"""vgg"""', 'use_gpu': '(True)'}), "(model='net-lin', net='vgg', use_gpu=True)\n", (4539, 4581), False, 'import models\n'), ((4792, 4826), 'elpips.ElpipsMetric', 'elpips.ElpipsMetric', (['config', 'lpips'], {}), '(config, lpips)\n', (4811, 4826), False, 'import elpips\n'), ((4945, 4975), 'torch.load', 'torch.load', (['discCheckpointPath'], {}), '(discCheckpointPath)\n', (4955, 4975), False, 'import torch\n'), ((5167, 5196), 'torch.load', 'torch.load', (['genCheckpointPath'], {}), '(genCheckpointPath)\n', (5177, 5196), False, 'import torch\n'), ((7164, 7226), 'torch.sum', 'torch.sum', (['((distTarget[iRow] - (distPred * scale + bias)) ** 2)'], {}), '((distTarget[iRow] - (distPred * scale + bias)) ** 2)\n', (7173, 7226), False, 'import torch\n'), ((9806, 9820), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (9815, 9820), True, 'import matplotlib.pyplot as plt\n'), ((9833, 9848), 'matplotlib.pyplot.close', 'plt.close', (['fig2'], {}), '(fig2)\n', (9842, 9848), True, 'import matplotlib.pyplot as plt\n'), ((2546, 2592), 'torch.randperm', 'torch.randperm', (['self.dataset_size'], {'generator': 'g'}), '(self.dataset_size, generator=g)\n', (2560, 2592), False, 'import torch\n'), ((4121, 4141), 'torch.tensor', 'torch.tensor', (['points'], {}), '(points)\n', (4133, 4141), False, 'import torch\n'), ((5950, 5975), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (5973, 5975), False, 'import datetime\n'), ((6175, 6216), 'numpy.random.randint', 'np.random.randint', (['(0)', 'dataSize', 'batchSize'], {}), '(0, dataSize, batchSize)\n', (6192, 6216), True, 'import numpy as np\n'), ((9340, 9355), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9353, 9355), False, 'import torch\n'), ((9667, 9715), 'os.path.join', 'os.path.join', (['outPath', 'f"""batch_{batchIndex}.png"""'], {}), "(outPath, f'batch_{batchIndex}.png')\n", (9679, 9715), False, 'import os\n'), ((9742, 9792), 'os.path.join', 'os.path.join', (['outPath', 'f"""scatter_{batchIndex}.png"""'], {}), "(outPath, f'scatter_{batchIndex}.png')\n", (9754, 9792), False, 'import os\n'), ((9867, 9882), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9880, 9882), False, 'import torch\n'), ((10188, 10238), 'numpy.zeros', 'np.zeros', (['(imagesGpu.shape[0], imagesGpu.shape[0])'], {}), '((imagesGpu.shape[0], imagesGpu.shape[0]))\n', (10196, 10238), True, 'import numpy as np\n'), ((11428, 11464), 'numpy.minimum', 'np.minimum', (['distPoints', 'distPoints.T'], {}), '(distPoints, distPoints.T)\n', (11438, 11464), True, 'import numpy as np\n'), ((11537, 11559), 'PythonExtras.distance_matrix.DistanceMatrixConfig', 'DistanceMatrixConfig', ([], {}), '()\n', (11557, 11559), False, 'from PythonExtras.distance_matrix import render_distance_matrix, DistanceMatrixConfig\n'), ((12117, 12157), 'numpy.minimum', 'np.minimum', (['distPredEval', 'distPredEval.T'], {}), '(distPredEval, distPredEval.T)\n', (12127, 12157), True, 'import numpy as np\n'), ((12230, 12252), 'PythonExtras.distance_matrix.DistanceMatrixConfig', 'DistanceMatrixConfig', ([], {}), '()\n', (12250, 12252), False, 'from PythonExtras.distance_matrix import render_distance_matrix, DistanceMatrixConfig\n'), ((12553, 12575), 'PythonExtras.distance_matrix.DistanceMatrixConfig', 'DistanceMatrixConfig', ([], {}), '()\n', (12573, 12575), False, 'from PythonExtras.distance_matrix import render_distance_matrix, DistanceMatrixConfig\n'), ((12943, 12964), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)'}), '(ncols=2)\n', (12955, 12964), True, 'import matplotlib.pyplot as plt\n'), ((13229, 13243), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (13238, 13243), True, 'import matplotlib.pyplot as plt\n'), ((13305, 13369), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(3)', 'ncols': 'surveySize', 'figsize': '(surveySize, 3)'}), '(nrows=3, ncols=surveySize, figsize=(surveySize, 3))\n', (13317, 13369), True, 'import matplotlib.pyplot as plt\n'), ((14368, 14382), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (14377, 14382), True, 'import matplotlib.pyplot as plt\n'), ((11688, 11741), 'os.path.join', 'os.path.join', (['outPath', 'f"""dist_point_{batchIndex}.png"""'], {}), "(outPath, f'dist_point_{batchIndex}.png')\n", (11700, 11741), False, 'import os\n'), ((12357, 12411), 'os.path.join', 'os.path.join', (['outPath', 'f"""dist_images_{batchIndex}.png"""'], {}), "(outPath, f'dist_images_{batchIndex}.png')\n", (12369, 12411), False, 'import os\n'), ((12680, 12742), 'os.path.join', 'os.path.join', (['outPath', 'f"""dist_images_aligned_{batchIndex}.png"""'], {}), "(outPath, f'dist_images_aligned_{batchIndex}.png')\n", (12692, 12742), False, 'import os\n'), ((13158, 13211), 'os.path.join', 'os.path.join', (['outPath', 'f"""batch_dist_{batchIndex}.png"""'], {}), "(outPath, f'batch_dist_{batchIndex}.png')\n", (13170, 13211), False, 'import os\n'), ((14301, 14350), 'os.path.join', 'os.path.join', (['outPath', 'f"""survey_{batchIndex}.png"""'], {}), "(outPath, f'survey_{batchIndex}.png')\n", (14313, 14350), False, 'import os\n'), ((13494, 13543), 'os.path.join', 'os.path.join', (['outPath', 'f"""survey_{batchIndex}.txt"""'], {}), "(outPath, f'survey_{batchIndex}.txt')\n", (13506, 13543), False, 'import os\n'), ((13650, 13680), 'random.sample', 'random.sample', (['allIndices'], {'k': '(3)'}), '(allIndices, k=3)\n', (13663, 13680), False, 'import random\n'), ((11226, 11255), 'numpy.ones', 'np.ones', (['(im.shape[:-1] + (1,))'], {}), '(im.shape[:-1] + (1,))\n', (11233, 11255), True, 'import numpy as np\n'), ((11352, 11385), 'numpy.abs', 'np.abs', (['(distPoints - distPoints.T)'], {}), '(distPoints - distPoints.T)\n', (11358, 11385), True, 'import numpy as np\n')] |
import numpy as np
import scipy as sp
import scipy.sparse as sparse
import osqp
import warnings
def __is_vector__(vec):
if vec.ndim == 1:
return True
else:
if vec.ndim == 2:
if vec.shape[0] == 1 or vec.shape[1] == 0:
return True
else:
return False
return False
def __is_matrix__(mat):
if mat.ndim == 2:
return True
else:
return False
class MPCController:
""" This class implements a linear constrained MPC controller
Attributes
----------
Ad : 2D array_like. Size: (nx, nx)
Discrete-time system matrix Ad.
Bd : 2D array-like. Size: (nx, nu)
Discrete-time system matrix Bd.
Np : int
Prediction horizon. Default value: 20.
Nc : int
Control horizon. It must be lower or equal to Np. If None, it is set equal to Np.
x0 : 1D array_like. Size: (nx,)
System state at time instant 0. If None, it is set to np.zeros(nx)
xref : 1D array-like. Size: (nx,) or (Np, nx)
System state reference (aka target, set-point). If size is (Np, nx), reference is time-dependent.
uref : 1D array-like. Size: (nu, )
System input reference. If None, it is set to np.zeros(nx)
uminus1 : 1D array_like
Input value assumed at time instant -1. If None, it is set to uref.
Qx : 2D array_like
State weight matrix. If None, it is set to eye(nx).
QxN : 2D array_like
State weight matrix for the last state. If None, it is set to eye(nx).
Qu : 2D array_like
Input weight matrix. If None, it is set to zeros((nu,nu)).
QDu : 2D array_like
Input delta weight matrix. If None, it is set to zeros((nu,nu)).
xmin : 1D array_like
State minimum value. If None, it is set to -np.inf*ones(nx).
xmax : 1D array_like
State maximum value. If None, it is set to np.inf*ones(nx).
umin : 1D array_like
Input minimum value. If None, it is set to -np.inf*ones(nx).
umax : 1D array_like
Input maximum value. If None, it is set to np.inf*ones(nx).
Dumin : 1D array_like
Input variation minimum value. If None, it is set to np.inf*ones(nx).
Dumax : 1D array_like
Input variation maximum value. If None, it is set to np.inf*ones(nx).
eps_feas : float
Scale factor for the matrix Q_eps. Q_eps = eps_feas*eye(nx).
eps_rel : float
Relative tolerance of the QP solver. Default value: 1e-3.
eps_abs : float
Absolute tolerance of the QP solver. Default value: 1e-3.
"""
def __init__(self, Ad, Bd, Np=20, Nc=None,
x0=None, xref=None, uref=None, uminus1=None,
Qx=None, QxN=None, Qu=None, QDu=None,
xmin=None, xmax=None, umin=None, umax=None, Dumin=None, Dumax=None,
eps_feas=1e6, eps_rel=1e-3, eps_abs=1e-3):
if __is_matrix__(Ad) and (Ad.shape[0] == Ad.shape[1]):
self.Ad = Ad
self.nx = Ad.shape[0] # number of states
else:
raise ValueError("Ad should be a square matrix of dimension (nx,nx)!")
if __is_matrix__(Bd) and Bd.shape[0] == self.nx:
self.Bd = Bd
self.nu = Bd.shape[1] # number of inputs
else:
raise ValueError("Bd should be a matrix of dimension (nx, nu)!")
if Np > 1:
self.Np = Np # assert
else:
raise ValueError("Np should be > 1!")
if Nc is not None:
if Nc <= Np:
self.Nc = Nc
else:
raise ValueError("Nc should be <= Np!")
else:
self.Nc = self.Np
# x0 handling
if x0 is not None:
if __is_vector__(x0) and x0.size == self.nx:
self.x0 = x0.ravel()
else:
raise ValueError("x0 should be an array of dimension (nx,)!")
else:
self.x0 = np.zeros(self.nx)
# reference handing
if xref is not None:
if __is_vector__(xref) and xref.size == self.nx:
self.xref = xref.ravel()
elif __is_matrix__(xref) and xref.shape[1] == self.nx and xref.shape[0] >= self.Np:
self.xref = xref
else:
raise ValueError("xref should be either a vector of shape (nx,) or a matrix of shape (Np+1, nx)!")
else:
self.xref = np.zeros(self.nx)
if uref is not None:
if __is_vector__(uref) and uref.size == self.nu:
self.uref = uref.ravel() # assert...
else:
raise ValueError("uref should be a vector of shape (nu,)!")
else:
self.uref = np.zeros(self.nu)
if uminus1 is not None:
if __is_vector__(uminus1) and uminus1.size == self.nu:
self.uminus1 = uminus1
else:
raise ValueError("uminus1 should be a vector of shape (nu,)!")
else:
self.uminus1 = self.uref
# weights handling
if Qx is not None:
if __is_matrix__(Qx) and Qx.shape[0] == self.nx and Qx.shape[1] == self.nx:
self.Qx = Qx
else:
raise ValueError("Qx should be a matrix of shape (nx, nx)!")
else:
self.Qx = np.zeros((self.nx, self.nx)) # sparse
if QxN is not None:
if __is_matrix__(QxN) and QxN.shape[0] == self.nx and Qx.shape[1] == self.nx:
self.QxN = QxN
else:
raise ValueError("QxN should be a square matrix of shape (nx, nx)!")
else:
self.QxN = self.Qx # sparse
if Qu is not None:
if __is_matrix__(Qu) and Qu.shape[0] == self.nu and Qu.shape[1] == self.nu:
self.Qu = Qu
else:
raise ValueError("Qu should be a square matrix of shape (nu, nu)!")
else:
self.Qu = np.zeros((self.nu, self.nu))
if QDu is not None:
if __is_matrix__(QDu) and QDu.shape[0] == self.nu and QDu.shape[1] == self.nu:
self.QDu = QDu
else:
raise ValueError("QDu should be a square matrix of shape (nu, nu)!")
else:
self.QDu = np.zeros((self.nu, self.nu))
# constraints handling
if xmin is not None:
if __is_vector__(xmin) and xmin.size == self.nx:
self.xmin = xmin.ravel()
else:
raise ValueError("xmin should be a vector of shape (nx,)!")
else:
self.xmin = -np.ones(self.nx)*np.inf
if xmax is not None:
if __is_vector__(xmax) and xmax.size == self.nx:
self.xmax = xmax
else:
raise ValueError("xmax should be a vector of shape (nx,)!")
else:
self.xmax = np.ones(self.nx)*np.inf
if umin is not None:
if __is_vector__(umin) and umin.size == self.nu:
self.umin = umin
else:
raise ValueError("umin should be a vector of shape (nu,)!")
else:
self.umin = -np.ones(self.nu)*np.inf
if umax is not None:
if __is_vector__(umax) and umax.size == self.nu:
self.umax = umax
else:
raise ValueError("umax should be a vector of shape (nu,)!")
else:
self.umax = np.ones(self.nu)*np.inf
if Dumin is not None:
if __is_vector__(Dumin) and Dumin.size == self.nu:
self.Dumin = Dumin
else:
raise ValueError("Dumin should be a vector of shape (nu,)!")
else:
self.Dumin = -np.ones(self.nu)*np.inf
if Dumax is not None:
if __is_vector__(Dumax) and Dumax.size == self.nu:
self.Dumax = Dumax
else:
raise ValueError("Dumax should be a vector of shape (nu,)!")
else:
self.Dumax = np.ones(self.nu)*np.inf
self.eps_feas = eps_feas
self.Qeps = eps_feas * sparse.eye(self.nx)
self.eps_rel = eps_rel
self.eps_abs = eps_abs
self.u_failure = self.uref # value provided when the MPC solver fails.
# Hidden settings (for debug purpose)
self.raise_error = False # Raise an error when MPC optimization fails
self.JX_ON = True # Cost function terms in X active
self.JU_ON = True # Cost function terms in U active
self.JDU_ON = True # Cost function terms in Delta U active
self.SOFT_ON = True # Soft constraints active
self.COMPUTE_J_CNST = False # Compute the constant term of the MPC QP problem
# QP problem instance
self.prob = osqp.OSQP()
# Variables initialized by the setup() method
self.res = None
self.P = None
self.q = None
self.A = None
self.l = None
self.u = None
self.x0_rh = None
self.uminus1_rh = None
self.J_CNST = None # Constant term of the cost function
def setup(self, solve=True):
""" Set-up the QP problem.
Parameters
----------
solve : bool
If True, also solve the QP problem.
"""
self.x0_rh = np.copy(self.x0)
self.uminus1_rh = np.copy(self.uminus1)
self._compute_QP_matrices_()
self.prob.setup(self.P, self.q, self.A, self.l, self.u, warm_start=True, verbose=False, eps_abs=self.eps_rel, eps_rel=self.eps_abs)
if solve:
self.solve()
def output(self, return_x_seq=False, return_u_seq=False, return_eps_seq=False, return_status=False, return_obj_val=False):
""" Return the MPC controller output uMPC, i.e., the first element of the optimal input sequence and assign is to self.uminus1_rh.
Parameters
----------
return_x_seq : bool
If True, the method also returns the optimal sequence of states in the info dictionary
return_u_seq : bool
If True, the method also returns the optimal sequence of inputs in the info dictionary
return_eps_seq : bool
If True, the method also returns the optimal sequence of epsilon in the info dictionary
return_status : bool
If True, the method also returns the optimizer status in the info dictionary
return_obj_val : bool
If True, the method also returns the objective function value in the info dictionary
Returns
-------
array_like (nu,)
The first element of the optimal input sequence uMPC to be applied to the system.
dict
A dictionary with additional infos. It is returned only if one of the input flags return_* is set to True
"""
Nc = self.Nc
Np = self.Np
nx = self.nx
nu = self.nu
# Extract first control input to the plant
if self.res.info.status == 'solved':
uMPC = self.res.x[(Np+1)*nx:(Np+1)*nx + nu]
else:
uMPC = self.u_failure
# Return additional info?
info = {}
if return_x_seq:
seq_X = self.res.x[0:(Np+1)*nx]
seq_X = seq_X.reshape(-1,nx)
info['x_seq'] = seq_X
if return_u_seq:
seq_U = self.res.x[(Np+1)*nx:(Np+1)*nx + Nc*nu]
seq_U = seq_U.reshape(-1,nu)
info['u_seq'] = seq_U
if return_eps_seq:
seq_eps = self.res.x[(Np+1)*nx + Nc*nu : (Np+1)*nx + Nc*nu + (Np+1)*nx ]
seq_eps = seq_eps.reshape(-1,nx)
info['eps_seq'] = seq_eps
if return_status:
info['status'] = self.res.info.status
if return_obj_val:
obj_val = self.res.info.obj_val + self.J_CNST # constant of the objective value
info['obj_val'] = obj_val
self.uminus1_rh = uMPC
if len(info) == 0:
return uMPC
else:
return uMPC, info
def update(self, x, u=None, xref=None, solve=True):
""" Update the QP problem.
Parameters
----------
x : array_like. Size: (nx,)
The new value of x0.
u : array_like. Size: (nu,)
The new value of uminus1. If none, it is set to the previously computed u.
xref : array_like. Size: (nx,)
The new value of xref. If none, it is not changed
solve : bool
If True, also solve the QP problem.
"""
self.x0_rh = x # previous x0
if u is not None:
self.uminus1_rh = u # otherwise it is just the uMPC updated from the previous step() call
if xref is not None:
self.xref = xref # TODO: check that new reference is != old reference, do a minimal update of the QP matrices to improve speed
self._update_QP_matrices_()
if solve:
self.solve()
def solve(self):
""" Solve the QP problem. """
self.res = self.prob.solve()
# Check solver status
if self.res.info.status != 'solved':
warnings.warn('OSQP did not solve the problem!')
if self.raise_error:
raise ValueError('OSQP did not solve the problem!')
def __controller_function__(self, x, u, xref=None):
""" This function is meant to be used for debug only.
"""
self.update(x, u, xref=xref, solve=True)
uMPC = self.output()
return uMPC
def _update_QP_matrices_(self):
x0_rh = self.x0_rh
uminus1_rh = self.uminus1_rh
Np = self.Np
Nc = self.Nc
nx = self.nx
nu = self.nu
Dumin = self.Dumin
Dumax = self.Dumax
QDu = self.QDu
uref = self.uref
Qeps = self.Qeps
Qx = self.Qx
QxN = self.QxN
Qu = self.Qu
xref = self.xref
P_X = self.P_X
self.l[:nx] = -x0_rh
self.u[:nx] = -x0_rh
self.l[(Np+1)*nx + (Np+1)*nx + (Nc)*nu:(Np+1)*nx + (Np+1)*nx + (Nc)*nu + nu] = Dumin + uminus1_rh[0:nu] # update constraint on \Delta u0: Dumin <= u0 - u_{-1}
self.u[(Np+1)*nx + (Np+1)*nx + (Nc)*nu:(Np+1)*nx + (Np+1)*nx + (Nc)*nu + nu] = Dumax + uminus1_rh[0:nu] # update constraint on \Delta u0: u0 - u_{-1} <= Dumax
# Update the linear term q. This part could be further optimized in case of constant xref...
q_X = np.zeros((Np + 1) * nx) # x_N
self.J_CNST = 0.0
if self.JX_ON:
if xref.ndim == 2 and xref.shape[0] >= Np + 1: # xref is a vector per time-instant! experimental feature
#for idx_ref in range(Np):
# q_X[idx_ref * nx:(idx_ref + 1) * nx] += -Qx.dot(xref[idx_ref, :])
#q_X[Np * nx:(Np + 1) * nx] += -QxN.dot(xref[Np, :])
q_X += (-xref.reshape(1, -1) @ (P_X)).ravel() # way faster implementation of the same formula commented above
if self.COMPUTE_J_CNST:
self.J_CNST += -1/2 *q_X @ xref.ravel()
else:
q_X += np.hstack([np.kron(np.ones(Np), -Qx.dot(xref)), # x0... x_N-1
-QxN.dot(xref)]) # x_N
if self.COMPUTE_J_CNST:
self.J_CNST += 1/2*Np*(xref.dot(QxN.dot(xref))) + 1/2*xref.dot(QxN.dot(xref))
else:
pass
q_U = np.zeros(Nc*nu)
if self.JU_ON:
self.J_CNST += 1/2* Np * (uref.dot(Qu.dot(uref)))
if self.Nc == self.Np:
q_U += np.kron(np.ones(Nc), -Qu.dot(uref))
else: # Nc < Np. This formula is more general and could handle the case Nc = Np either. TODO: test
iU = np.ones(Nc)
iU[Nc-1] = (Np - Nc + 1)
q_U += np.kron(iU, -Qu.dot(uref))
# Filling P and q for J_DU
if self.JDU_ON:
self.J_CNST += 1/2*uminus1_rh.dot((QDu).dot(uminus1_rh))
q_U += np.hstack([-QDu.dot(uminus1_rh), # u0
np.zeros((Nc - 1) * nu)]) # u1..uN-1
else:
pass
if self.SOFT_ON:
q_eps = np.zeros((Np+1)*nx)
self.q = np.hstack([q_X, q_U, q_eps])
else:
self.q = np.hstack([q_X, q_U])
self.prob.update(l=self.l, u=self.u, q=self.q)
def _compute_QP_matrices_(self):
Np = self.Np
Nc = self.Nc
nx = self.nx
nu = self.nu
Qx = self.Qx
QxN = self.QxN
Qu = self.Qu
QDu = self.QDu
xref = self.xref
uref = self.uref
uminus1 = self.uminus1
Ad = self.Ad
Bd = self.Bd
x0 = self.x0
xmin = self.xmin
xmax = self.xmax
umin = self.umin
umax = self.umax
Dumin = self.Dumin
Dumax = self.Dumax
Qeps = self.Qeps
# Cast MPC problem to a QP: x = (x(0),x(1),...,x(N),u(0),...,u(N-1))
# - quadratic objective
P_X = sparse.csc_matrix(((Np+1)*nx, (Np+1)*nx))
q_X = np.zeros((Np+1)*nx) # x_N
self.J_CNST = 0.0
if self.JX_ON:
P_X += sparse.block_diag([sparse.kron(sparse.eye(Np), Qx), # x0...x_N-1
QxN]) # xN
if xref.ndim == 2 and xref.shape[0] >= Np + 1: # xref is a vector per time-instant! experimental feature
#for idx_ref in range(Np):
# q_X[idx_ref * nx:(idx_ref + 1) * nx] += -Qx.dot(xref[idx_ref, :])
#q_X[Np * nx:(Np + 1) * nx] += -QxN.dot(xref[Np, :])
q_X += (-xref.reshape(1, -1) @ (P_X)).ravel()
if self.COMPUTE_J_CNST:
self.J_CNST += -1/2 * q_X @ xref.ravel()
else:
q_X += np.hstack([np.kron(np.ones(Np), -Qx.dot(xref)), # x0... x_N-1
-QxN.dot(xref)]) # x_N
if self.COMPUTE_J_CNST:
self.J_CNST += 1/2*Np*(xref.dot(QxN.dot(xref))) + 1/2*xref.dot(QxN.dot(xref))
else:
pass
# Filling P and q for J_U
P_U = sparse.csc_matrix((Nc*nu, Nc*nu))
q_U = np.zeros(Nc*nu)
if self.JU_ON:
self.J_CNST += 1/2*Np*(uref.dot(Qu.dot(uref)))
if self.Nc == self.Np:
P_U += sparse.kron(sparse.eye(Nc), Qu)
q_U += np.kron(np.ones(Nc), -Qu.dot(uref))
else: # Nc < Np. This formula is more general and could handle the case Nc = Np either. TODO: test
iU = np.ones(Nc)
iU[Nc-1] = (Np - Nc + 1)
P_U += sparse.kron(sparse.diags(iU), Qu)
q_U += np.kron(iU, -Qu.dot(uref))
# Filling P and q for J_DU
if self.JDU_ON:
self.J_CNST += 1/2*uminus1.dot((QDu).dot(uminus1))
iDu = 2 * np.eye(Nc) - np.eye(Nc, k=1) - np.eye(Nc, k=-1)
iDu[Nc - 1, Nc - 1] = 1
P_U += sparse.kron(iDu, QDu)
q_U += np.hstack([-QDu.dot(uminus1), # u0
np.zeros((Nc - 1) * nu)]) # u1..uN-1
else:
pass
if self.SOFT_ON:
P_eps = sparse.kron(np.eye((Np+1)), Qeps)
q_eps = np.zeros((Np+1)*nx)
# Linear constraints
# - linear dynamics x_k+1 = Ax_k + Bu_k
Ax = sparse.kron(sparse.eye(Np + 1), -sparse.eye(nx)) + sparse.kron(sparse.eye(Np + 1, k=-1), Ad)
iBu = sparse.vstack([sparse.csc_matrix((1, Nc)),
sparse.eye(Nc)])
if self.Nc < self.Np: # expand A matrix if Nc < Nu (see notes)
iBu = sparse.vstack([iBu,
sparse.hstack([sparse.csc_matrix((Np - Nc, Nc - 1)), np.ones((Np - Nc, 1))])
])
Bu = sparse.kron(iBu, Bd)
n_eps = (Np + 1) * nx
Aeq_dyn = sparse.hstack([Ax, Bu])
if self.SOFT_ON:
Aeq_dyn = sparse.hstack([Aeq_dyn, sparse.csc_matrix((Aeq_dyn.shape[0], n_eps))]) # For soft constraints slack variables
leq_dyn = np.hstack([-x0, np.zeros(Np * nx)])
ueq_dyn = leq_dyn # for equality constraints -> upper bound = lower bound!
# - bounds on x
Aineq_x = sparse.hstack([sparse.eye((Np + 1) * nx), sparse.csc_matrix(((Np+1)*nx, Nc*nu))])
if self.SOFT_ON:
Aineq_x = sparse.hstack([Aineq_x, sparse.eye(n_eps)]) # For soft constraints slack variables
lineq_x = np.kron(np.ones(Np + 1), xmin) # lower bound of inequalities
uineq_x = np.kron(np.ones(Np + 1), xmax) # upper bound of inequalities
Aineq_u = sparse.hstack([sparse.csc_matrix((Nc*nu, (Np+1)*nx)), sparse.eye(Nc * nu)])
if self.SOFT_ON:
Aineq_u = sparse.hstack([Aineq_u, sparse.csc_matrix((Aineq_u.shape[0], n_eps))]) # For soft constraints slack variables
lineq_u = np.kron(np.ones(Nc), umin) # lower bound of inequalities
uineq_u = np.kron(np.ones(Nc), umax) # upper bound of inequalities
# - bounds on \Delta u
Aineq_du = sparse.vstack([sparse.hstack([np.zeros((nu, (Np + 1) * nx)), sparse.eye(nu), np.zeros((nu, (Nc - 1) * nu))]), # for u0 - u-1
sparse.hstack([np.zeros((Nc * nu, (Np+1) * nx)), -sparse.eye(Nc * nu) + sparse.eye(Nc * nu, k=1)]) # for uk - uk-1, k=1...Np
]
)
if self.SOFT_ON:
Aineq_du = sparse.hstack([Aineq_du, sparse.csc_matrix((Aineq_du.shape[0], n_eps))])
uineq_du = np.kron(np.ones(Nc+1), Dumax) #np.ones((Nc+1) * nu)*Dumax
uineq_du[0:nu] += self.uminus1[0:nu]
lineq_du = np.kron(np.ones(Nc+1), Dumin) #np.ones((Nc+1) * nu)*Dumin
lineq_du[0:nu] += self.uminus1[0:nu] # works for nonscalar u?
# Positivity of slack variables (not necessary!)
#Aineq_eps_pos = sparse.hstack([sparse.coo_matrix((n_eps,(Np+1)*nx)), sparse.coo_matrix((n_eps, Np*nu)), sparse.eye(n_eps)])
#lineq_eps_pos = np.zeros(n_eps)
#uineq_eps_pos = np.ones(n_eps)*np.inf
# - OSQP constraints
#A = sparse.vstack([Aeq_dyn, Aineq_x, Aineq_u, Aineq_du, Aineq_eps_pos]).tocsc()
#l = np.hstack([leq_dyn, lineq_x, lineq_u, lineq_du, lineq_eps_pos])
#u = np.hstack([ueq_dyn, uineq_x, uineq_u, uineq_du, uineq_eps_pos])
A = sparse.vstack([Aeq_dyn, Aineq_x, Aineq_u, Aineq_du]).tocsc()
l = np.hstack([leq_dyn, lineq_x, lineq_u, lineq_du])
u = np.hstack([ueq_dyn, uineq_x, uineq_u, uineq_du])
# assign all
if self.SOFT_ON:
self.P = sparse.block_diag([P_X, P_U, P_eps], format='csc')
self.q = np.hstack([q_X, q_U, q_eps])
else:
self.P = sparse.block_diag([P_X, P_U], format='csc')
self.q = np.hstack([q_X, q_U])
self.A = A
self.l = l
self.u = u
self.P_X = P_X
# Debug assignments
# self.P_U = P_U
# self.P_eps = P_eps
# self.Aineq_du = Aineq_du
# self.leq_dyn = leq_dyn
# self.lineq_du = lineq_du
if __name__ == '__main__':
import time
import matplotlib.pyplot as plt
# Constants #
Ts = 0.2 # sampling time (s)
M = 2 # mass (Kg)
b = 0.3 # friction coefficient (N*s/m)
# Continuous-time system matrices
Ac = np.array([
[0.0, 1.0],
[0, -b/M]]
)
Bc = np.array([
[0.0],
[1/M]
])
[nx, nu] = Bc.shape # number of states and number or inputs
# Forward euler discretization
Ad = np.eye(nx) + Ac*Ts
Bd = Bc*Ts
# Reference input and states
pref = 7.0
vref = 0.0
xref = np.array([pref, vref]) # reference state
uref = np.array([0.0]) # reference input
uminus1 = np.array([0.0]) # input at time step negative one - used to penalize the first delta u at time instant 0. Could be the same as uref.
# Constraints
xmin = np.array([-10, -10.0])
xmax = np.array([7.0, 10.0])
umin = np.array([-1.2])
umax = np.array([1.2])
Dumin = np.array([-2e-1])
Dumax = np.array([2e-1])
# Objective function
Qx = sparse.diags([0.5, 0.1]) # Quadratic cost for states x0, x1, ..., x_N-1
QxN = sparse.diags([0.5, 0.1]) # Quadratic cost for xN
Qu = 2.0 * sparse.eye(1) # Quadratic cost for u0, u1, ...., u_N-1
QDu = 10.0 * sparse.eye(1) # Quadratic cost for Du0, Du1, ...., Du_N-1
# Initial state
x0 = np.array([0.1, 0.2]) # initial state
# Prediction horizon
Np = 25
Nc = 10
Xref = np.kron(np.ones((Np + 1,1)), xref)
K = MPCController(Ad, Bd, Np=Np, Nc=Nc, x0=x0, xref=Xref, uminus1=uminus1,
Qx=Qx, QxN=QxN, Qu=Qu, QDu=QDu,
xmin=xmin, xmax=xmax, umin=umin, umax=umax, Dumin=Dumin, Dumax=Dumax)
K.setup()
# Simulate in closed loop
[nx, nu] = Bd.shape # number of states and number or inputs
len_sim = 40 # simulation length (s)
nsim = int(len_sim/Ts) # simulation length(timesteps)
xsim = np.zeros((nsim, nx))
usim = np.zeros((nsim, nu))
tsim = np.arange(0, nsim)*Ts
time_start = time.time()
xstep = x0
for i in range(nsim):
uMPC, info = K.output(return_u_seq=True, return_x_seq=True, return_eps_seq=True, return_status=True)
xstep = Ad.dot(xstep) + Bd.dot(uMPC) # system step
K.update(xstep, xref=Xref) # update with measurement
K.solve()
xsim[i, :] = xstep
usim[i, :] = uMPC
time_sim = time.time() - time_start
fig, axes = plt.subplots(3,1, figsize=(10,10))
axes[0].plot(tsim, xsim[:,0], "k", label='p')
axes[0].plot(tsim, xref[0]*np.ones(np.shape(tsim)), "r--", label="pref")
axes[0].set_title("Position (m)")
axes[1].plot(tsim, xsim[:,1], label="v")
axes[1].plot(tsim, xref[1]*np.ones(np.shape(tsim)), "r--", label="vref")
axes[1].set_title("Velocity (m/s)")
axes[2].plot(tsim, usim[:,0], label="u")
axes[2].plot(tsim, uref*np.ones(np.shape(tsim)), "r--", label="uref")
axes[2].set_title("Force (N)")
for ax in axes:
ax.grid(True)
ax.legend()
| [
"scipy.sparse.kron",
"numpy.ones",
"numpy.shape",
"numpy.arange",
"scipy.sparse.eye",
"numpy.copy",
"osqp.OSQP",
"matplotlib.pyplot.subplots",
"scipy.sparse.diags",
"numpy.hstack",
"scipy.sparse.block_diag",
"scipy.sparse.vstack",
"numpy.zeros",
"time.time",
"scipy.sparse.csc_matrix",
... | [((23648, 23683), 'numpy.array', 'np.array', (['[[0.0, 1.0], [0, -b / M]]'], {}), '([[0.0, 1.0], [0, -b / M]])\n', (23656, 23683), True, 'import numpy as np\n'), ((23713, 23739), 'numpy.array', 'np.array', (['[[0.0], [1 / M]]'], {}), '([[0.0], [1 / M]])\n', (23721, 23739), True, 'import numpy as np\n'), ((23979, 24001), 'numpy.array', 'np.array', (['[pref, vref]'], {}), '([pref, vref])\n', (23987, 24001), True, 'import numpy as np\n'), ((24031, 24046), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (24039, 24046), True, 'import numpy as np\n'), ((24082, 24097), 'numpy.array', 'np.array', (['[0.0]'], {}), '([0.0])\n', (24090, 24097), True, 'import numpy as np\n'), ((24249, 24271), 'numpy.array', 'np.array', (['[-10, -10.0]'], {}), '([-10, -10.0])\n', (24257, 24271), True, 'import numpy as np\n'), ((24283, 24304), 'numpy.array', 'np.array', (['[7.0, 10.0]'], {}), '([7.0, 10.0])\n', (24291, 24304), True, 'import numpy as np\n'), ((24319, 24335), 'numpy.array', 'np.array', (['[-1.2]'], {}), '([-1.2])\n', (24327, 24335), True, 'import numpy as np\n'), ((24347, 24362), 'numpy.array', 'np.array', (['[1.2]'], {}), '([1.2])\n', (24355, 24362), True, 'import numpy as np\n'), ((24376, 24392), 'numpy.array', 'np.array', (['[-0.2]'], {}), '([-0.2])\n', (24384, 24392), True, 'import numpy as np\n'), ((24406, 24421), 'numpy.array', 'np.array', (['[0.2]'], {}), '([0.2])\n', (24414, 24421), True, 'import numpy as np\n'), ((24458, 24482), 'scipy.sparse.diags', 'sparse.diags', (['[0.5, 0.1]'], {}), '([0.5, 0.1])\n', (24470, 24482), True, 'import scipy.sparse as sparse\n'), ((24542, 24566), 'scipy.sparse.diags', 'sparse.diags', (['[0.5, 0.1]'], {}), '([0.5, 0.1])\n', (24554, 24566), True, 'import scipy.sparse as sparse\n'), ((24780, 24800), 'numpy.array', 'np.array', (['[0.1, 0.2]'], {}), '([0.1, 0.2])\n', (24788, 24800), True, 'import numpy as np\n'), ((25358, 25378), 'numpy.zeros', 'np.zeros', (['(nsim, nx)'], {}), '((nsim, nx))\n', (25366, 25378), True, 'import numpy as np\n'), ((25390, 25410), 'numpy.zeros', 'np.zeros', (['(nsim, nu)'], {}), '((nsim, nu))\n', (25398, 25410), True, 'import numpy as np\n'), ((25462, 25473), 'time.time', 'time.time', ([], {}), '()\n', (25471, 25473), False, 'import time\n'), ((25874, 25910), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(10, 10)'}), '(3, 1, figsize=(10, 10))\n', (25886, 25910), True, 'import matplotlib.pyplot as plt\n'), ((8844, 8855), 'osqp.OSQP', 'osqp.OSQP', ([], {}), '()\n', (8853, 8855), False, 'import osqp\n'), ((9380, 9396), 'numpy.copy', 'np.copy', (['self.x0'], {}), '(self.x0)\n', (9387, 9396), True, 'import numpy as np\n'), ((9423, 9444), 'numpy.copy', 'np.copy', (['self.uminus1'], {}), '(self.uminus1)\n', (9430, 9444), True, 'import numpy as np\n'), ((14584, 14607), 'numpy.zeros', 'np.zeros', (['((Np + 1) * nx)'], {}), '((Np + 1) * nx)\n', (14592, 14607), True, 'import numpy as np\n'), ((15582, 15599), 'numpy.zeros', 'np.zeros', (['(Nc * nu)'], {}), '(Nc * nu)\n', (15590, 15599), True, 'import numpy as np\n'), ((17198, 17247), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['((Np + 1) * nx, (Np + 1) * nx)'], {}), '(((Np + 1) * nx, (Np + 1) * nx))\n', (17215, 17247), True, 'import scipy.sparse as sparse\n'), ((17254, 17277), 'numpy.zeros', 'np.zeros', (['((Np + 1) * nx)'], {}), '((Np + 1) * nx)\n', (17262, 17277), True, 'import numpy as np\n'), ((18383, 18420), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['(Nc * nu, Nc * nu)'], {}), '((Nc * nu, Nc * nu))\n', (18400, 18420), True, 'import scipy.sparse as sparse\n'), ((18431, 18448), 'numpy.zeros', 'np.zeros', (['(Nc * nu)'], {}), '(Nc * nu)\n', (18439, 18448), True, 'import numpy as np\n'), ((20083, 20103), 'scipy.sparse.kron', 'sparse.kron', (['iBu', 'Bd'], {}), '(iBu, Bd)\n', (20094, 20103), True, 'import scipy.sparse as sparse\n'), ((20153, 20176), 'scipy.sparse.hstack', 'sparse.hstack', (['[Ax, Bu]'], {}), '([Ax, Bu])\n', (20166, 20176), True, 'import scipy.sparse as sparse\n'), ((22735, 22783), 'numpy.hstack', 'np.hstack', (['[leq_dyn, lineq_x, lineq_u, lineq_du]'], {}), '([leq_dyn, lineq_x, lineq_u, lineq_du])\n', (22744, 22783), True, 'import numpy as np\n'), ((22796, 22844), 'numpy.hstack', 'np.hstack', (['[ueq_dyn, uineq_x, uineq_u, uineq_du]'], {}), '([ueq_dyn, uineq_x, uineq_u, uineq_du])\n', (22805, 22844), True, 'import numpy as np\n'), ((23870, 23880), 'numpy.eye', 'np.eye', (['nx'], {}), '(nx)\n', (23876, 23880), True, 'import numpy as np\n'), ((24607, 24620), 'scipy.sparse.eye', 'sparse.eye', (['(1)'], {}), '(1)\n', (24617, 24620), True, 'import scipy.sparse as sparse\n'), ((24686, 24699), 'scipy.sparse.eye', 'sparse.eye', (['(1)'], {}), '(1)\n', (24696, 24699), True, 'import scipy.sparse as sparse\n'), ((24887, 24907), 'numpy.ones', 'np.ones', (['(Np + 1, 1)'], {}), '((Np + 1, 1))\n', (24894, 24907), True, 'import numpy as np\n'), ((25422, 25440), 'numpy.arange', 'np.arange', (['(0)', 'nsim'], {}), '(0, nsim)\n', (25431, 25440), True, 'import numpy as np\n'), ((25832, 25843), 'time.time', 'time.time', ([], {}), '()\n', (25841, 25843), False, 'import time\n'), ((4013, 4030), 'numpy.zeros', 'np.zeros', (['self.nx'], {}), '(self.nx)\n', (4021, 4030), True, 'import numpy as np\n'), ((4491, 4508), 'numpy.zeros', 'np.zeros', (['self.nx'], {}), '(self.nx)\n', (4499, 4508), True, 'import numpy as np\n'), ((4786, 4803), 'numpy.zeros', 'np.zeros', (['self.nu'], {}), '(self.nu)\n', (4794, 4803), True, 'import numpy as np\n'), ((5394, 5422), 'numpy.zeros', 'np.zeros', (['(self.nx, self.nx)'], {}), '((self.nx, self.nx))\n', (5402, 5422), True, 'import numpy as np\n'), ((6022, 6050), 'numpy.zeros', 'np.zeros', (['(self.nu, self.nu)'], {}), '((self.nu, self.nu))\n', (6030, 6050), True, 'import numpy as np\n'), ((6342, 6370), 'numpy.zeros', 'np.zeros', (['(self.nu, self.nu)'], {}), '((self.nu, self.nu))\n', (6350, 6370), True, 'import numpy as np\n'), ((8172, 8191), 'scipy.sparse.eye', 'sparse.eye', (['self.nx'], {}), '(self.nx)\n', (8182, 8191), True, 'import scipy.sparse as sparse\n'), ((13266, 13314), 'warnings.warn', 'warnings.warn', (['"""OSQP did not solve the problem!"""'], {}), "('OSQP did not solve the problem!')\n", (13279, 13314), False, 'import warnings\n'), ((16357, 16380), 'numpy.zeros', 'np.zeros', (['((Np + 1) * nx)'], {}), '((Np + 1) * nx)\n', (16365, 16380), True, 'import numpy as np\n'), ((16398, 16426), 'numpy.hstack', 'np.hstack', (['[q_X, q_U, q_eps]'], {}), '([q_X, q_U, q_eps])\n', (16407, 16426), True, 'import numpy as np\n'), ((16462, 16483), 'numpy.hstack', 'np.hstack', (['[q_X, q_U]'], {}), '([q_X, q_U])\n', (16471, 16483), True, 'import numpy as np\n'), ((19219, 19240), 'scipy.sparse.kron', 'sparse.kron', (['iDu', 'QDu'], {}), '(iDu, QDu)\n', (19230, 19240), True, 'import scipy.sparse as sparse\n'), ((19508, 19531), 'numpy.zeros', 'np.zeros', (['((Np + 1) * nx)'], {}), '((Np + 1) * nx)\n', (19516, 19531), True, 'import numpy as np\n'), ((20754, 20769), 'numpy.ones', 'np.ones', (['(Np + 1)'], {}), '(Np + 1)\n', (20761, 20769), True, 'import numpy as np\n'), ((20833, 20848), 'numpy.ones', 'np.ones', (['(Np + 1)'], {}), '(Np + 1)\n', (20840, 20848), True, 'import numpy as np\n'), ((21164, 21175), 'numpy.ones', 'np.ones', (['Nc'], {}), '(Nc)\n', (21171, 21175), True, 'import numpy as np\n'), ((21243, 21254), 'numpy.ones', 'np.ones', (['Nc'], {}), '(Nc)\n', (21250, 21254), True, 'import numpy as np\n'), ((21854, 21869), 'numpy.ones', 'np.ones', (['(Nc + 1)'], {}), '(Nc + 1)\n', (21861, 21869), True, 'import numpy as np\n'), ((21977, 21992), 'numpy.ones', 'np.ones', (['(Nc + 1)'], {}), '(Nc + 1)\n', (21984, 21992), True, 'import numpy as np\n'), ((22913, 22963), 'scipy.sparse.block_diag', 'sparse.block_diag', (['[P_X, P_U, P_eps]'], {'format': '"""csc"""'}), "([P_X, P_U, P_eps], format='csc')\n", (22930, 22963), True, 'import scipy.sparse as sparse\n'), ((22985, 23013), 'numpy.hstack', 'np.hstack', (['[q_X, q_U, q_eps]'], {}), '([q_X, q_U, q_eps])\n', (22994, 23013), True, 'import numpy as np\n'), ((23049, 23092), 'scipy.sparse.block_diag', 'sparse.block_diag', (['[P_X, P_U]'], {'format': '"""csc"""'}), "([P_X, P_U], format='csc')\n", (23066, 23092), True, 'import scipy.sparse as sparse\n'), ((23114, 23135), 'numpy.hstack', 'np.hstack', (['[q_X, q_U]'], {}), '([q_X, q_U])\n', (23123, 23135), True, 'import numpy as np\n'), ((6947, 6963), 'numpy.ones', 'np.ones', (['self.nx'], {}), '(self.nx)\n', (6954, 6963), True, 'import numpy as np\n'), ((7508, 7524), 'numpy.ones', 'np.ones', (['self.nu'], {}), '(self.nu)\n', (7515, 7524), True, 'import numpy as np\n'), ((8083, 8099), 'numpy.ones', 'np.ones', (['self.nu'], {}), '(self.nu)\n', (8090, 8099), True, 'import numpy as np\n'), ((15910, 15921), 'numpy.ones', 'np.ones', (['Nc'], {}), '(Nc)\n', (15917, 15921), True, 'import numpy as np\n'), ((18811, 18822), 'numpy.ones', 'np.ones', (['Nc'], {}), '(Nc)\n', (18818, 18822), True, 'import numpy as np\n'), ((19147, 19163), 'numpy.eye', 'np.eye', (['Nc'], {'k': '(-1)'}), '(Nc, k=-1)\n', (19153, 19163), True, 'import numpy as np\n'), ((19466, 19480), 'numpy.eye', 'np.eye', (['(Np + 1)'], {}), '(Np + 1)\n', (19472, 19480), True, 'import numpy as np\n'), ((19632, 19650), 'scipy.sparse.eye', 'sparse.eye', (['(Np + 1)'], {}), '(Np + 1)\n', (19642, 19650), True, 'import scipy.sparse as sparse\n'), ((19683, 19707), 'scipy.sparse.eye', 'sparse.eye', (['(Np + 1)'], {'k': '(-1)'}), '(Np + 1, k=-1)\n', (19693, 19707), True, 'import scipy.sparse as sparse\n'), ((19742, 19768), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['(1, Nc)'], {}), '((1, Nc))\n', (19759, 19768), True, 'import scipy.sparse as sparse\n'), ((19799, 19813), 'scipy.sparse.eye', 'sparse.eye', (['Nc'], {}), '(Nc)\n', (19809, 19813), True, 'import scipy.sparse as sparse\n'), ((20369, 20386), 'numpy.zeros', 'np.zeros', (['(Np * nx)'], {}), '(Np * nx)\n', (20377, 20386), True, 'import numpy as np\n'), ((20531, 20556), 'scipy.sparse.eye', 'sparse.eye', (['((Np + 1) * nx)'], {}), '((Np + 1) * nx)\n', (20541, 20556), True, 'import scipy.sparse as sparse\n'), ((20558, 20601), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['((Np + 1) * nx, Nc * nu)'], {}), '(((Np + 1) * nx, Nc * nu))\n', (20575, 20601), True, 'import scipy.sparse as sparse\n'), ((20920, 20963), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['(Nc * nu, (Np + 1) * nx)'], {}), '((Nc * nu, (Np + 1) * nx))\n', (20937, 20963), True, 'import scipy.sparse as sparse\n'), ((20959, 20978), 'scipy.sparse.eye', 'sparse.eye', (['(Nc * nu)'], {}), '(Nc * nu)\n', (20969, 20978), True, 'import scipy.sparse as sparse\n'), ((22662, 22714), 'scipy.sparse.vstack', 'sparse.vstack', (['[Aeq_dyn, Aineq_x, Aineq_u, Aineq_du]'], {}), '([Aeq_dyn, Aineq_x, Aineq_u, Aineq_du])\n', (22675, 22714), True, 'import scipy.sparse as sparse\n'), ((25998, 26012), 'numpy.shape', 'np.shape', (['tsim'], {}), '(tsim)\n', (26006, 26012), True, 'import numpy as np\n'), ((26159, 26173), 'numpy.shape', 'np.shape', (['tsim'], {}), '(tsim)\n', (26167, 26173), True, 'import numpy as np\n'), ((26319, 26333), 'numpy.shape', 'np.shape', (['tsim'], {}), '(tsim)\n', (26327, 26333), True, 'import numpy as np\n'), ((6667, 6683), 'numpy.ones', 'np.ones', (['self.nx'], {}), '(self.nx)\n', (6674, 6683), True, 'import numpy as np\n'), ((7228, 7244), 'numpy.ones', 'np.ones', (['self.nu'], {}), '(self.nu)\n', (7235, 7244), True, 'import numpy as np\n'), ((7796, 7812), 'numpy.ones', 'np.ones', (['self.nu'], {}), '(self.nu)\n', (7803, 7812), True, 'import numpy as np\n'), ((15749, 15760), 'numpy.ones', 'np.ones', (['Nc'], {}), '(Nc)\n', (15756, 15760), True, 'import numpy as np\n'), ((16239, 16262), 'numpy.zeros', 'np.zeros', (['((Nc - 1) * nu)'], {}), '((Nc - 1) * nu)\n', (16247, 16262), True, 'import numpy as np\n'), ((18600, 18614), 'scipy.sparse.eye', 'sparse.eye', (['Nc'], {}), '(Nc)\n', (18610, 18614), True, 'import scipy.sparse as sparse\n'), ((18651, 18662), 'numpy.ones', 'np.ones', (['Nc'], {}), '(Nc)\n', (18658, 18662), True, 'import numpy as np\n'), ((18899, 18915), 'scipy.sparse.diags', 'sparse.diags', (['iU'], {}), '(iU)\n', (18911, 18915), True, 'import scipy.sparse as sparse\n'), ((19129, 19144), 'numpy.eye', 'np.eye', (['Nc'], {'k': '(1)'}), '(Nc, k=1)\n', (19135, 19144), True, 'import numpy as np\n'), ((19336, 19359), 'numpy.zeros', 'np.zeros', (['((Nc - 1) * nu)'], {}), '((Nc - 1) * nu)\n', (19344, 19359), True, 'import numpy as np\n'), ((19653, 19667), 'scipy.sparse.eye', 'sparse.eye', (['nx'], {}), '(nx)\n', (19663, 19667), True, 'import scipy.sparse as sparse\n'), ((20248, 20292), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['(Aeq_dyn.shape[0], n_eps)'], {}), '((Aeq_dyn.shape[0], n_eps))\n', (20265, 20292), True, 'import scipy.sparse as sparse\n'), ((20669, 20686), 'scipy.sparse.eye', 'sparse.eye', (['n_eps'], {}), '(n_eps)\n', (20679, 20686), True, 'import scipy.sparse as sparse\n'), ((21052, 21096), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['(Aineq_u.shape[0], n_eps)'], {}), '((Aineq_u.shape[0], n_eps))\n', (21069, 21096), True, 'import scipy.sparse as sparse\n'), ((21778, 21823), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['(Aineq_du.shape[0], n_eps)'], {}), '((Aineq_du.shape[0], n_eps))\n', (21795, 21823), True, 'import scipy.sparse as sparse\n'), ((17380, 17394), 'scipy.sparse.eye', 'sparse.eye', (['Np'], {}), '(Np)\n', (17390, 17394), True, 'import scipy.sparse as sparse\n'), ((19116, 19126), 'numpy.eye', 'np.eye', (['Nc'], {}), '(Nc)\n', (19122, 19126), True, 'import numpy as np\n'), ((21378, 21407), 'numpy.zeros', 'np.zeros', (['(nu, (Np + 1) * nx)'], {}), '((nu, (Np + 1) * nx))\n', (21386, 21407), True, 'import numpy as np\n'), ((21409, 21423), 'scipy.sparse.eye', 'sparse.eye', (['nu'], {}), '(nu)\n', (21419, 21423), True, 'import scipy.sparse as sparse\n'), ((21425, 21454), 'numpy.zeros', 'np.zeros', (['(nu, (Nc - 1) * nu)'], {}), '((nu, (Nc - 1) * nu))\n', (21433, 21454), True, 'import numpy as np\n'), ((21523, 21557), 'numpy.zeros', 'np.zeros', (['(Nc * nu, (Np + 1) * nx)'], {}), '((Nc * nu, (Np + 1) * nx))\n', (21531, 21557), True, 'import numpy as np\n'), ((15267, 15278), 'numpy.ones', 'np.ones', (['Np'], {}), '(Np)\n', (15274, 15278), True, 'import numpy as np\n'), ((18034, 18045), 'numpy.ones', 'np.ones', (['Np'], {}), '(Np)\n', (18041, 18045), True, 'import numpy as np\n'), ((19973, 20009), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['(Np - Nc, Nc - 1)'], {}), '((Np - Nc, Nc - 1))\n', (19990, 20009), True, 'import scipy.sparse as sparse\n'), ((20011, 20032), 'numpy.ones', 'np.ones', (['(Np - Nc, 1)'], {}), '((Np - Nc, 1))\n', (20018, 20032), True, 'import numpy as np\n'), ((21580, 21604), 'scipy.sparse.eye', 'sparse.eye', (['(Nc * nu)'], {'k': '(1)'}), '(Nc * nu, k=1)\n', (21590, 21604), True, 'import scipy.sparse as sparse\n'), ((21558, 21577), 'scipy.sparse.eye', 'sparse.eye', (['(Nc * nu)'], {}), '(Nc * nu)\n', (21568, 21577), True, 'import scipy.sparse as sparse\n')] |
import numpy as np
from tensorflow_serving_client import TensorflowServingClient
from keras_model_specs import ModelSpec
MODEL_SERVING_PORTS = {
'mobilenet_v1': 9001,
'inception_v3': 9002,
'xception': 9003
}
def query_model(model_spec_name):
model_spec = ModelSpec.get(model_spec_name)
client = TensorflowServingClient('localhost', MODEL_SERVING_PORTS[model_spec_name])
image = model_spec.load_image('tests/fixtures/files/cat.jpg')
return client.make_prediction(image, 'image')
def assert_predictions(response, expected_top_5, imagenet_dictionary):
assert 'class_probabilities' in response
assert len(response['class_probabilities']) == 1
assert len(response['class_probabilities'][0]) == 1000
predictions = response['class_probabilities'][0]
predictions = list(zip(imagenet_dictionary, predictions))
predictions = sorted(predictions, reverse=True, key=lambda kv: kv[1])[:5]
predictions = [(label, float(score)) for label, score in predictions]
print(predictions)
classes = [name for name, _ in predictions]
expected_classes = [name for name, _ in expected_top_5]
assert classes == expected_classes
scores = [score for _, score in predictions]
expected_scores = [score for _, score in expected_top_5]
np.testing.assert_array_almost_equal(np.array(scores), np.array(expected_scores))
def test_mobilenet_v1(imagenet_dictionary):
response = query_model('mobilenet_v1')
assert_predictions(response, [
('tiger cat', 0.334694504737854),
('Egyptian cat', 0.2851393222808838),
('tabby, tabby cat', 0.15471667051315308),
('kit fox, Vulpes macrotis', 0.03160465136170387),
('lynx, catamount', 0.030886519700288773)
], imagenet_dictionary)
def test_inception_v3(imagenet_dictionary):
response = query_model('inception_v3')
assert_predictions(response, [
('tiger cat', 0.4716886878013611),
('Egyptian cat', 0.127954363822937),
('Pembroke, Pembroke Welsh corgi', 0.07338221371173859),
('tabby, tabby cat', 0.052391838282346725),
('Cardigan, Cardigan Welsh corgi', 0.008323794230818748)
], imagenet_dictionary)
def test_xception(imagenet_dictionary):
response = query_model('xception')
assert_predictions(response, [
('red fox, Vulpes vulpes', 0.10058529675006866),
('weasel', 0.09152575582265854),
('Pembroke, Pembroke Welsh corgi', 0.07581676542758942),
('tiger cat', 0.0746716633439064),
('kit fox, Vulpes macrotis', 0.06751589477062225)
], imagenet_dictionary)
| [
"tensorflow_serving_client.TensorflowServingClient",
"keras_model_specs.ModelSpec.get",
"numpy.array"
] | [((276, 306), 'keras_model_specs.ModelSpec.get', 'ModelSpec.get', (['model_spec_name'], {}), '(model_spec_name)\n', (289, 306), False, 'from keras_model_specs import ModelSpec\n'), ((320, 394), 'tensorflow_serving_client.TensorflowServingClient', 'TensorflowServingClient', (['"""localhost"""', 'MODEL_SERVING_PORTS[model_spec_name]'], {}), "('localhost', MODEL_SERVING_PORTS[model_spec_name])\n", (343, 394), False, 'from tensorflow_serving_client import TensorflowServingClient\n'), ((1329, 1345), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (1337, 1345), True, 'import numpy as np\n'), ((1347, 1372), 'numpy.array', 'np.array', (['expected_scores'], {}), '(expected_scores)\n', (1355, 1372), True, 'import numpy as np\n')] |
"""
各ユーザの合計の生産性と品質
"""
from __future__ import annotations
import copy
import logging
from enum import Enum
from pathlib import Path
from typing import Any, Collection, Optional
import bokeh
import bokeh.layouts
import bokeh.palettes
import numpy
import pandas
from annofabapi.models import TaskPhase
from bokeh.plotting import ColumnDataSource, figure
from annofabcli.common.utils import print_csv, read_multiheader_csv
from annofabcli.statistics.scatter import (
create_hover_tool,
get_color_from_palette,
plot_bubble,
plot_scatter,
write_bokeh_graph,
)
logger = logging.getLogger(__name__)
class WorktimeType(Enum):
"""
作業時間の種類
"""
ACTUAL = "actual"
MONITORED = "monitored"
class UserPerformance:
"""
各ユーザの合計の生産性と品質
"""
PLOT_WIDTH = 1200
PLOT_HEIGHT = 800
def __init__(self, df: pandas.DataFrame):
self.df = df
self.phase_list = self.get_phase_list(df.columns)
@staticmethod
def _add_ratio_column_for_productivity_per_user(df: pandas.DataFrame, phase_list: list[str]):
for phase in phase_list:
# AnnoFab時間の比率
df[("monitored_worktime_ratio", phase)] = (
df[("monitored_worktime_hour", phase)] / df[("monitored_worktime_hour", "sum")]
)
# AnnoFab時間の比率から、Annowork時間を予測する
df[("prediction_actual_worktime_hour", phase)] = (
df[("actual_worktime_hour", "sum")] * df[("monitored_worktime_ratio", phase)]
)
# 生産性を算出
df[("monitored_worktime/input_data_count", phase)] = (
df[("monitored_worktime_hour", phase)] / df[("input_data_count", phase)]
)
df[("actual_worktime/input_data_count", phase)] = (
df[("prediction_actual_worktime_hour", phase)] / df[("input_data_count", phase)]
)
df[("monitored_worktime/annotation_count", phase)] = (
df[("monitored_worktime_hour", phase)] / df[("annotation_count", phase)]
)
df[("actual_worktime/annotation_count", phase)] = (
df[("prediction_actual_worktime_hour", phase)] / df[("annotation_count", phase)]
)
phase = TaskPhase.ANNOTATION.value
df[("pointed_out_inspection_comment_count/annotation_count", phase)] = (
df[("pointed_out_inspection_comment_count", phase)] / df[("annotation_count", phase)]
)
df[("pointed_out_inspection_comment_count/input_data_count", phase)] = (
df[("pointed_out_inspection_comment_count", phase)] / df[("input_data_count", phase)]
)
df[("rejected_count/task_count", phase)] = df[("rejected_count", phase)] / df[("task_count", phase)]
@staticmethod
def get_phase_list(columns: list[tuple[str, str]]) -> list[str]:
# multiindexの2段目を取得する
tmp_set = {c[1] for c in columns}
phase_list = []
for phase in TaskPhase:
if phase.value in tmp_set:
phase_list.append(phase.value)
return phase_list
@classmethod
def from_csv(cls, csv_file: Path) -> UserPerformance:
df = read_multiheader_csv(str(csv_file))
return cls(df)
def actual_worktime_exists(self) -> bool:
"""実績作業時間が入力されているか否か"""
return self.df[("actual_worktime_hour", "sum")].sum() > 0
@classmethod
def from_df(
cls, df_task_history: pandas.DataFrame, df_labor: pandas.DataFrame, df_worktime_ratio: pandas.DataFrame
) -> UserPerformance:
"""
AnnoWorkの実績時間から、作業者ごとに生産性を算出する。
Args:
df_task_history: タスク履歴のDataFrame
df_labor: 実績作業時間のDataFrame
df_worktime_ratio: 作業したタスク数を、作業時間で按分した値が格納されたDataFrame
Returns:
"""
def get_phase_list(columns) -> list[str]:
phase_list = []
for phase in TaskPhase:
if phase.value in columns:
phase_list.append(phase.value)
return phase_list
df_agg_task_history = df_task_history.pivot_table(
values="worktime_hour", columns="phase", index="user_id", aggfunc=numpy.sum
).fillna(0)
if len(df_labor) > 0:
df_agg_labor = df_labor.pivot_table(values="actual_worktime_hour", index="user_id", aggfunc=numpy.sum)
df_tmp = df_labor[df_labor["actual_worktime_hour"] > 0].pivot_table(
values="date", index="user_id", aggfunc=numpy.max
)
if len(df_tmp) > 0:
df_agg_labor["last_working_date"] = df_tmp
else:
df_agg_labor["last_working_date"] = numpy.nan
df = df_agg_task_history.join(df_agg_labor)
else:
df = df_agg_task_history
df["actual_worktime_hour"] = 0
df["last_working_date"] = None
phase_list = get_phase_list(list(df.columns))
df = df[["actual_worktime_hour", "last_working_date"] + phase_list].copy()
df.columns = pandas.MultiIndex.from_tuples(
[("actual_worktime_hour", "sum"), ("last_working_date", "")]
+ [("monitored_worktime_hour", phase) for phase in phase_list]
)
df[("monitored_worktime_hour", "sum")] = df[[("monitored_worktime_hour", phase) for phase in phase_list]].sum(
axis=1
)
df_agg_production = df_worktime_ratio.pivot_table(
values=[
"worktime_ratio_by_task",
"input_data_count",
"annotation_count",
"pointed_out_inspection_comment_count",
"rejected_count",
],
columns="phase",
index="user_id",
aggfunc=numpy.sum,
).fillna(0)
df_agg_production.rename(columns={"worktime_ratio_by_task": "task_count"}, inplace=True)
df = df.join(df_agg_production)
# 比例関係の列を計算して追加する
cls._add_ratio_column_for_productivity_per_user(df, phase_list=phase_list)
# 不要な列を削除する
tmp_phase_list = copy.deepcopy(phase_list)
tmp_phase_list.remove(TaskPhase.ANNOTATION.value)
dropped_column = [("pointed_out_inspection_comment_count", phase) for phase in tmp_phase_list] + [
("rejected_count", phase) for phase in tmp_phase_list
]
df = df.drop(dropped_column, axis=1)
# ユーザ情報を取得
df_user = df_task_history.groupby("user_id").first()[["username", "biography"]]
df_user.columns = pandas.MultiIndex.from_tuples([("username", ""), ("biography", "")])
df = df.join(df_user)
df[("user_id", "")] = df.index
return cls(df)
@classmethod
def merge(cls, obj1: UserPerformance, obj2: UserPerformance) -> UserPerformance:
def max_last_working_date(date1, date2):
if not isinstance(date1, str) and numpy.isnan(date1):
date1 = ""
if not isinstance(date2, str) and numpy.isnan(date2):
date2 = ""
max_date = max(date1, date2)
if max_date == "":
return numpy.nan
else:
return max_date
def merge_row(row1: pandas.Series, row2: pandas.Series) -> pandas.Series:
string_column_list = ["username", "biography", "last_working_date"]
sum_row = row1.drop(labels=string_column_list, level=0).fillna(0) + row2.drop(
labels=string_column_list, level=0
).fillna(0)
sum_row.loc["username", ""] = row1.loc["username", ""]
sum_row.loc["biography", ""] = row1.loc["biography", ""]
sum_row.loc["last_working_date", ""] = max_last_working_date(
row1.loc["last_working_date", ""], row2.loc["last_working_date", ""]
)
return sum_row
df1 = obj1.df
df2 = obj2.df
user_id_set = set(df1["user_id"]) | set(df2["user_id"])
sum_df = df1.set_index("user_id").copy()
added_df = df2.set_index("user_id")
for user_id in user_id_set:
if user_id not in added_df.index:
continue
if user_id in sum_df.index:
sum_df.loc[user_id] = merge_row(sum_df.loc[user_id], added_df.loc[user_id])
else:
sum_df.loc[user_id] = added_df.loc[user_id]
phase_list = cls.get_phase_list(list(sum_df["monitored_worktime_hour"].columns))
cls._add_ratio_column_for_productivity_per_user(sum_df, phase_list=phase_list)
sum_df.reset_index(inplace=True)
sum_df.sort_values(["user_id"], inplace=True)
return cls(sum_df)
def _validate_df_for_output(self, output_file: Path) -> bool:
if len(self.df) == 0:
logger.warning(f"データが0件のため、{output_file} は出力しません。")
return False
return True
@staticmethod
def get_productivity_columns(phase_list: list[str]) -> list[tuple[str, str]]:
monitored_worktime_columns = (
[("monitored_worktime_hour", phase) for phase in phase_list]
+ [("monitored_worktime_hour", "sum")]
+ [("monitored_worktime_ratio", phase) for phase in phase_list]
)
production_columns = (
[("task_count", phase) for phase in phase_list]
+ [("input_data_count", phase) for phase in phase_list]
+ [("annotation_count", phase) for phase in phase_list]
)
actual_worktime_columns = [("actual_worktime_hour", "sum")] + [
("prediction_actual_worktime_hour", phase) for phase in phase_list
]
productivity_columns = (
[("monitored_worktime/input_data_count", phase) for phase in phase_list]
+ [("actual_worktime/input_data_count", phase) for phase in phase_list]
+ [("monitored_worktime/annotation_count", phase) for phase in phase_list]
+ [("actual_worktime/annotation_count", phase) for phase in phase_list]
)
inspection_comment_columns = [
("pointed_out_inspection_comment_count", TaskPhase.ANNOTATION.value),
("pointed_out_inspection_comment_count/input_data_count", TaskPhase.ANNOTATION.value),
("pointed_out_inspection_comment_count/annotation_count", TaskPhase.ANNOTATION.value),
]
rejected_count_columns = [
("rejected_count", TaskPhase.ANNOTATION.value),
("rejected_count/task_count", TaskPhase.ANNOTATION.value),
]
prior_columns = (
monitored_worktime_columns
+ production_columns
+ actual_worktime_columns
+ productivity_columns
+ inspection_comment_columns
+ rejected_count_columns
)
return prior_columns
def to_csv(self, output_file: Path) -> None:
if not self._validate_df_for_output(output_file):
return
value_columns = self.get_productivity_columns(self.phase_list)
user_columns = [("user_id", ""), ("username", ""), ("biography", ""), ("last_working_date", "")]
columns = user_columns + value_columns
print_csv(self.df[columns], str(output_file))
@staticmethod
def _plot_average_line(fig: bokeh.plotting.Figure, value: Optional[float], dimension: str):
if value is None:
return
span_average_line = bokeh.models.Span(
location=value,
dimension=dimension,
line_color="red",
line_width=0.5,
)
fig.add_layout(span_average_line)
@staticmethod
def _plot_quartile_line(fig: bokeh.plotting.Figure, quartile: Optional[tuple[float, float, float]], dimension: str):
if quartile is None:
return
for value in quartile:
span_average_line = bokeh.models.Span(
location=value,
dimension=dimension,
line_color="blue",
line_width=0.5,
)
fig.add_layout(span_average_line)
@staticmethod
def _get_average_value(df: pandas.DataFrame, numerator_column: Any, denominator_column: Any) -> Optional[float]:
numerator = df[numerator_column].sum()
denominator = df[denominator_column].sum()
if denominator > 0:
return numerator / denominator
else:
return None
@staticmethod
def _get_quartile_value(df: pandas.DataFrame, column: Any) -> Optional[tuple[float, float, float]]:
tmp = df[column].describe()
if tmp["count"] > 3:
return (tmp["25%"], tmp["50%"], tmp["75%"])
else:
return None
@staticmethod
def _create_div_element() -> bokeh.models.Div:
"""
HTMLページの先頭に付与するdiv要素を生成する。
"""
return bokeh.models.Div(
text="""<h4>グラフの見方</h4>
<span style="color:red;">赤線</span>:平均値<br>
<span style="color:blue;">青線</span>:四分位数<br>
"""
)
@staticmethod
def _set_legend(fig: bokeh.plotting.Figure) -> None:
"""
凡例の設定。
"""
fig.legend.location = "top_left"
fig.legend.click_policy = "mute"
fig.legend.title = "biography"
if len(fig.legend) > 0:
legend = fig.legend[0]
fig.add_layout(legend, "left")
def get_summary(self) -> pandas.Series:
"""
全体の生産性と品質が格納された pandas.Series を取得する。
"""
columns_for_sum = [
"monitored_worktime_hour",
"task_count",
"input_data_count",
"annotation_count",
"actual_worktime_hour",
"prediction_actual_worktime_hour",
"pointed_out_inspection_comment_count",
"rejected_count",
]
sum_series = self.df[columns_for_sum].sum()
self._add_ratio_column_for_productivity_per_user(sum_series, phase_list=self.phase_list)
# 作業している人数をカウントする
for phase in self.phase_list:
sum_series[("working_user_count", phase)] = (self.df[("task_count", phase)] > 0).sum()
return sum_series
def _plot_productivity(self, output_file: Path, worktime_type: WorktimeType):
"""作業時間と生産性の関係をメンバごとにプロットする。"""
if not self._validate_df_for_output(output_file):
return
# numpy.inf が含まれていると散布図を出力できないので置換する
df = self.df.replace(numpy.inf, numpy.nan)
def create_figure(title: str) -> bokeh.plotting.Figure:
return figure(
plot_width=self.PLOT_WIDTH,
plot_height=self.PLOT_HEIGHT,
title=title,
x_axis_label="累計作業時間[hour]",
y_axis_label="アノテーションあたり作業時間[hour/annotation]",
)
if worktime_type == WorktimeType.ACTUAL:
worktime_name = "実績時間"
worktime_key_for_phase = "prediction_actual"
elif worktime_type == WorktimeType.MONITORED:
worktime_name = "計測時間"
worktime_key_for_phase = WorktimeType.MONITORED.value
logger.debug(f"{output_file} を出力します。")
DICT_PHASE_NAME = {
TaskPhase.ANNOTATION.value: "教師付",
TaskPhase.INSPECTION.value: "検査",
TaskPhase.ACCEPTANCE.value: "受入",
}
figure_list = [
create_figure(f"{DICT_PHASE_NAME[phase]}のアノテーションあたり作業時間と累計作業時間の関係({worktime_name})")
for phase in self.phase_list
]
df["biography"] = df["biography"].fillna("")
for biography_index, biography in enumerate(sorted(set(df["biography"]))):
x_column = f"{worktime_key_for_phase}_worktime_hour"
y_column = f"{worktime_type.value}_worktime/annotation_count"
for fig, phase in zip(figure_list, self.phase_list):
filtered_df = df[
(df["biography"] == biography) & df[(x_column, phase)].notna() & df[(y_column, phase)].notna()
]
if len(filtered_df) == 0:
continue
source = ColumnDataSource(data=filtered_df)
plot_scatter(
fig=fig,
source=source,
x_column_name=f"{x_column}_{phase}",
y_column_name=f"{y_column}_{phase}",
text_column_name="username_",
legend_label=biography,
color=get_color_from_palette(biography_index),
)
for fig, phase in zip(figure_list, self.phase_list):
average_value = self._get_average_value(
df,
numerator_column=(f"{worktime_key_for_phase}_worktime_hour", phase),
denominator_column=("annotation_count", phase),
)
self._plot_average_line(fig, average_value, dimension="width")
quartile = self._get_quartile_value(df, (f"{worktime_type.value}_worktime/annotation_count", phase))
self._plot_quartile_line(fig, quartile, dimension="width")
for fig, phase in zip(figure_list, self.phase_list):
tooltip_item = [
"user_id_",
"username_",
"biography_",
f"{worktime_key_for_phase}_worktime_hour_{phase}",
f"task_count_{phase}",
f"input_data_count_{phase}",
f"annotation_count_{phase}",
f"{worktime_type.value}_worktime/input_data_count_{phase}",
f"{worktime_type.value}_worktime/annotation_count_{phase}",
]
if worktime_type == WorktimeType.ACTUAL:
tooltip_item.append("last_working_date_")
hover_tool = create_hover_tool(tooltip_item)
fig.add_tools(hover_tool)
self._set_legend(fig)
div_element = self._create_div_element()
write_bokeh_graph(bokeh.layouts.column([div_element] + figure_list), output_file)
def plot_productivity_from_monitored_worktime(self, output_file: Path):
"""
AnnoFab計測時間とAnnoFab計測時間を元に算出した生産性を、メンバごとにプロットする
"""
self._plot_productivity(output_file, worktime_type=WorktimeType.MONITORED)
def plot_productivity_from_actual_worktime(self, output_file: Path):
"""
実績作業時間と実績作業時間を元に算出した生産性を、メンバごとにプロットする
"""
self._plot_productivity(output_file, worktime_type=WorktimeType.ACTUAL)
def plot_quality(self, output_file: Path):
"""
メンバごとに品質を散布図でプロットする
Args:
df:
Returns:
"""
if not self._validate_df_for_output(output_file):
return
# numpy.inf が含まれていると散布図を出力できないので置換する
df = self.df.replace(numpy.inf, numpy.nan)
def create_figure(title: str, x_axis_label: str, y_axis_label: str) -> bokeh.plotting.Figure:
return figure(
plot_width=self.PLOT_WIDTH,
plot_height=self.PLOT_HEIGHT,
title=title,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
)
logger.debug(f"{output_file} を出力します。")
figure_list = [
create_figure(title=f"タスクあたり差し戻し回数とタスク数の関係", x_axis_label="タスク数", y_axis_label="タスクあたり差し戻し回数"),
create_figure(
title=f"アノテーションあたり検査コメント数とアノテーション数の関係", x_axis_label="アノテーション数", y_axis_label="アノテーションあたり検査コメント数"
),
]
column_pair_list = [
("task_count", "rejected_count/task_count"),
("annotation_count", "pointed_out_inspection_comment_count/annotation_count"),
]
phase = "annotation"
df["biography"] = df["biography"].fillna("")
for biography_index, biography in enumerate(sorted(set(df["biography"]))):
for column_pair, fig in zip(column_pair_list, figure_list):
x_column = column_pair[0]
y_column = column_pair[1]
filtered_df = df[
(df["biography"] == biography) & df[(x_column, phase)].notna() & df[(y_column, phase)].notna()
]
if len(filtered_df) == 0:
continue
source = ColumnDataSource(data=filtered_df)
plot_scatter(
fig=fig,
source=source,
x_column_name=f"{x_column}_{phase}",
y_column_name=f"{y_column}_{phase}",
text_column_name="username_",
legend_label=biography,
color=get_color_from_palette(biography_index),
)
for column_pair, fig in zip(
[("rejected_count", "task_count"), ("pointed_out_inspection_comment_count", "annotation_count")],
figure_list,
):
average_value = self._get_average_value(
df, numerator_column=(column_pair[0], phase), denominator_column=(column_pair[1], phase)
)
self._plot_average_line(fig, average_value, dimension="width")
for column, fig in zip(
["rejected_count/task_count", "pointed_out_inspection_comment_count/annotation_count"],
figure_list,
):
quartile = self._get_quartile_value(df, (column, phase))
self._plot_quartile_line(fig, quartile, dimension="width")
for fig in figure_list:
tooltip_item = [
"user_id_",
"username_",
"biography_",
"last_working_date_",
f"monitored_worktime_hour_{phase}",
f"task_count_{phase}",
f"input_data_count_{phase}",
f"annotation_count_{phase}",
f"rejected_count_{phase}",
f"pointed_out_inspection_comment_count_{phase}",
f"rejected_count/task_count_{phase}",
f"pointed_out_inspection_comment_count/annotation_count_{phase}",
]
hover_tool = create_hover_tool(tooltip_item)
fig.add_tools(hover_tool)
self._set_legend(fig)
div_element = self._create_div_element()
write_bokeh_graph(bokeh.layouts.column([div_element] + figure_list), output_file)
def plot_quality_and_productivity_from_actual_worktime(
self,
output_file: Path,
):
"""
実績作業時間を元に算出した生産性と品質の関係を、メンバごとにプロットする
"""
self._plot_quality_and_productivity(output_file, worktime_type=WorktimeType.ACTUAL)
def plot_quality_and_productivity_from_monitored_worktime(
self,
output_file: Path,
):
"""
計測作業時間を元に算出した生産性と品質の関係を、メンバごとにプロットする
"""
self._plot_quality_and_productivity(output_file, worktime_type=WorktimeType.MONITORED)
def _plot_quality_and_productivity(self, output_file: Path, worktime_type: WorktimeType):
"""
作業時間を元に算出した生産性と品質の関係を、メンバごとにプロットする
"""
if not self._validate_df_for_output(output_file):
return
if worktime_type == WorktimeType.ACTUAL:
worktime_key_for_phase = "prediction_actual"
elif worktime_type == WorktimeType.MONITORED:
worktime_key_for_phase = WorktimeType.MONITORED.value
# numpy.inf が含まれていると散布図を出力できないので置換する
df = self.df.replace(numpy.inf, numpy.nan)
def create_figure(title: str, x_axis_label: str, y_axis_label: str) -> bokeh.plotting.Figure:
return figure(
plot_width=1200,
plot_height=800,
title=title,
x_axis_label=x_axis_label,
y_axis_label=y_axis_label,
)
logger.debug(f"{output_file} を出力します。")
figure_list = [
create_figure(
title=f"アノテーションあたり作業時間とタスクあたり差し戻し回数の関係", x_axis_label="アノテーションあたり作業時間", y_axis_label="タスクあたり差し戻し回数"
),
create_figure(
title=f"アノテーションあたり作業時間とアノテーションあたり検査コメント数の関係",
x_axis_label="アノテーションあたり作業時間",
y_axis_label="アノテーションあたり検査コメント数",
),
]
column_pair_list = [
(f"{worktime_type.value}_worktime/annotation_count", "rejected_count/task_count"),
(
f"{worktime_type.value}_worktime/annotation_count",
"pointed_out_inspection_comment_count/annotation_count",
),
]
phase = TaskPhase.ANNOTATION.value
df["biography"] = df["biography"].fillna("")
for biography_index, biography in enumerate(sorted(set(df["biography"]))):
for fig, column_pair in zip(figure_list, column_pair_list):
x_column, y_column = column_pair
filtered_df = df[
(df["biography"] == biography) & df[(x_column, phase)].notna() & df[(y_column, phase)].notna()
]
if len(filtered_df) == 0:
continue
source = ColumnDataSource(data=filtered_df)
plot_bubble(
fig=fig,
source=source,
x_column_name=f"{x_column}_{phase}",
y_column_name=f"{y_column}_{phase}",
text_column_name="username_",
size_column_name=f"{worktime_key_for_phase}_worktime_hour_{phase}",
legend_label=biography,
color=get_color_from_palette(biography_index),
)
x_average_value = self._get_average_value(
df,
numerator_column=(f"{worktime_key_for_phase}_worktime_hour", phase),
denominator_column=("annotation_count", phase),
)
for column_pair, fig in zip(
[("rejected_count", "task_count"), ("pointed_out_inspection_comment_count", "annotation_count")],
figure_list,
):
self._plot_average_line(fig, x_average_value, dimension="height")
y_average_value = self._get_average_value(
df,
numerator_column=(column_pair[0], phase),
denominator_column=(column_pair[1], phase),
)
self._plot_average_line(fig, y_average_value, dimension="width")
x_quartile = self._get_quartile_value(df, (f"{worktime_type.value}_worktime/annotation_count", phase))
for column, fig in zip(
["rejected_count/task_count", "pointed_out_inspection_comment_count/annotation_count"],
figure_list,
):
self._plot_quartile_line(fig, x_quartile, dimension="height")
quartile = self._get_quartile_value(df, (column, phase))
self._plot_quartile_line(fig, quartile, dimension="width")
for fig in figure_list:
tooltip_item = [
"user_id_",
"username_",
"biography_",
f"{worktime_key_for_phase}_worktime_hour_{phase}",
f"task_count_{phase}",
f"input_data_count_{phase}",
f"annotation_count_{phase}",
f"{worktime_type.value}_worktime/input_data_count_{phase}",
f"{worktime_type.value}_worktime/annotation_count_{phase}",
f"rejected_count_{phase}",
f"pointed_out_inspection_comment_count_{phase}",
f"rejected_count/task_count_{phase}",
f"pointed_out_inspection_comment_count/annotation_count_{phase}",
]
if worktime_type == WorktimeType.ACTUAL:
tooltip_item.append("last_working_date_")
hover_tool = create_hover_tool(tooltip_item)
fig.add_tools(hover_tool)
self._set_legend(fig)
div_element = self._create_div_element()
div_element.text += """円の大きさ:作業時間<br>"""
write_bokeh_graph(bokeh.layouts.column([div_element] + figure_list), output_file)
class WholePerformance:
"""
全体の生産性と品質の情報
"""
def __init__(self, series: pandas.Series):
self.series = series
def _validate_df_for_output(self, output_file: Path) -> bool:
if len(self.series) == 0:
logger.warning(f"データが0件のため、{output_file} は出力しません。")
return False
return True
@classmethod
def from_csv(cls, csv_file: Path) -> WholePerformance:
df = pandas.read_csv(str(csv_file), header=None, index_col=[0, 1])
# 3列目を値としたpandas.Series を取得する。
series = df[2]
return cls(series)
def to_csv(self, output_file: Path) -> None:
"""
全体の生産性と品質が格納されたCSVを出力します。
"""
if not self._validate_df_for_output(output_file):
return
# 列の順番を整える
phase_list = UserPerformance.get_phase_list(self.series.index)
indexes = UserPerformance.get_productivity_columns(phase_list) + [
("working_user_count", phase) for phase in phase_list
]
series = self.series[indexes]
output_file.parent.mkdir(exist_ok=True, parents=True)
logger.debug(f"{str(output_file)} を出力します。")
series.to_csv(str(output_file), sep=",", encoding="utf_8_sig", header=False)
class ProjectPerformance:
"""
プロジェクトごとの生産性と品質
"""
def __init__(self, df: pandas.DataFrame):
self.df = df
def _validate_df_for_output(self, output_file: Path) -> bool:
if len(self.df) == 0:
logger.warning(f"データが0件のため、{output_file} は出力しません。")
return False
return True
@classmethod
def from_whole_performance_objs(
cls, objs: Collection[WholePerformance], project_titles: Collection[str]
) -> ProjectPerformance:
series_list = []
for whole_performance_obj, project_title in zip(objs, project_titles):
series = whole_performance_obj.series
series[("project_title", "")] = project_title
series_list.append(series)
df = pandas.DataFrame(series_list)
return cls(df)
def to_csv(self, output_file: Path) -> None:
"""
全体の生産性と品質が格納されたCSVを出力します。
"""
if not self._validate_df_for_output(output_file):
return
phase_list = UserPerformance.get_phase_list(self.df.columns)
first_columns = [("project_title", "")]
value_columns = UserPerformance.get_productivity_columns(phase_list)
columns = first_columns + value_columns + [("working_user_count", phase) for phase in phase_list]
print_csv(self.df[columns], output=str(output_file))
| [
"pandas.DataFrame",
"copy.deepcopy",
"bokeh.plotting.figure",
"pandas.MultiIndex.from_tuples",
"bokeh.models.Div",
"annofabcli.statistics.scatter.create_hover_tool",
"numpy.isnan",
"annofabcli.statistics.scatter.get_color_from_palette",
"bokeh.plotting.ColumnDataSource",
"bokeh.models.Span",
"bo... | [((588, 615), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (605, 615), False, 'import logging\n'), ((5043, 5206), 'pandas.MultiIndex.from_tuples', 'pandas.MultiIndex.from_tuples', (["([('actual_worktime_hour', 'sum'), ('last_working_date', '')] + [(\n 'monitored_worktime_hour', phase) for phase in phase_list])"], {}), "([('actual_worktime_hour', 'sum'), (\n 'last_working_date', '')] + [('monitored_worktime_hour', phase) for\n phase in phase_list])\n", (5072, 5206), False, 'import pandas\n'), ((6084, 6109), 'copy.deepcopy', 'copy.deepcopy', (['phase_list'], {}), '(phase_list)\n', (6097, 6109), False, 'import copy\n'), ((6531, 6599), 'pandas.MultiIndex.from_tuples', 'pandas.MultiIndex.from_tuples', (["[('username', ''), ('biography', '')]"], {}), "([('username', ''), ('biography', '')])\n", (6560, 6599), False, 'import pandas\n'), ((11402, 11494), 'bokeh.models.Span', 'bokeh.models.Span', ([], {'location': 'value', 'dimension': 'dimension', 'line_color': '"""red"""', 'line_width': '(0.5)'}), "(location=value, dimension=dimension, line_color='red',\n line_width=0.5)\n", (11419, 11494), False, 'import bokeh\n'), ((12828, 13007), 'bokeh.models.Div', 'bokeh.models.Div', ([], {'text': '"""<h4>グラフの見方</h4>\n <span style="color:red;">赤線</span>:平均値<br>\n <span style="color:blue;">青線</span>:四分位数<br>\n """'}), '(text=\n """<h4>グラフの見方</h4>\n <span style="color:red;">赤線</span>:平均値<br>\n <span style="color:blue;">青線</span>:四分位数<br>\n """\n )\n', (12844, 13007), False, 'import bokeh\n'), ((30019, 30048), 'pandas.DataFrame', 'pandas.DataFrame', (['series_list'], {}), '(series_list)\n', (30035, 30048), False, 'import pandas\n'), ((11844, 11937), 'bokeh.models.Span', 'bokeh.models.Span', ([], {'location': 'value', 'dimension': 'dimension', 'line_color': '"""blue"""', 'line_width': '(0.5)'}), "(location=value, dimension=dimension, line_color='blue',\n line_width=0.5)\n", (11861, 11937), False, 'import bokeh\n'), ((14536, 14700), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': 'self.PLOT_WIDTH', 'plot_height': 'self.PLOT_HEIGHT', 'title': 'title', 'x_axis_label': '"""累計作業時間[hour]"""', 'y_axis_label': '"""アノテーションあたり作業時間[hour/annotation]"""'}), "(plot_width=self.PLOT_WIDTH, plot_height=self.PLOT_HEIGHT, title=\n title, x_axis_label='累計作業時間[hour]', y_axis_label=\n 'アノテーションあたり作業時間[hour/annotation]')\n", (14542, 14700), False, 'from bokeh.plotting import ColumnDataSource, figure\n'), ((17742, 17773), 'annofabcli.statistics.scatter.create_hover_tool', 'create_hover_tool', (['tooltip_item'], {}), '(tooltip_item)\n', (17759, 17773), False, 'from annofabcli.statistics.scatter import create_hover_tool, get_color_from_palette, plot_bubble, plot_scatter, write_bokeh_graph\n'), ((17922, 17971), 'bokeh.layouts.column', 'bokeh.layouts.column', (['([div_element] + figure_list)'], {}), '([div_element] + figure_list)\n', (17942, 17971), False, 'import bokeh\n'), ((18895, 19031), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': 'self.PLOT_WIDTH', 'plot_height': 'self.PLOT_HEIGHT', 'title': 'title', 'x_axis_label': 'x_axis_label', 'y_axis_label': 'y_axis_label'}), '(plot_width=self.PLOT_WIDTH, plot_height=self.PLOT_HEIGHT, title=\n title, x_axis_label=x_axis_label, y_axis_label=y_axis_label)\n', (18901, 19031), False, 'from bokeh.plotting import ColumnDataSource, figure\n'), ((22057, 22088), 'annofabcli.statistics.scatter.create_hover_tool', 'create_hover_tool', (['tooltip_item'], {}), '(tooltip_item)\n', (22074, 22088), False, 'from annofabcli.statistics.scatter import create_hover_tool, get_color_from_palette, plot_bubble, plot_scatter, write_bokeh_graph\n'), ((22237, 22286), 'bokeh.layouts.column', 'bokeh.layouts.column', (['([div_element] + figure_list)'], {}), '([div_element] + figure_list)\n', (22257, 22286), False, 'import bokeh\n'), ((23533, 23645), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': '(1200)', 'plot_height': '(800)', 'title': 'title', 'x_axis_label': 'x_axis_label', 'y_axis_label': 'y_axis_label'}), '(plot_width=1200, plot_height=800, title=title, x_axis_label=\n x_axis_label, y_axis_label=y_axis_label)\n', (23539, 23645), False, 'from bokeh.plotting import ColumnDataSource, figure\n'), ((27704, 27735), 'annofabcli.statistics.scatter.create_hover_tool', 'create_hover_tool', (['tooltip_item'], {}), '(tooltip_item)\n', (27721, 27735), False, 'from annofabcli.statistics.scatter import create_hover_tool, get_color_from_palette, plot_bubble, plot_scatter, write_bokeh_graph\n'), ((27933, 27982), 'bokeh.layouts.column', 'bokeh.layouts.column', (['([div_element] + figure_list)'], {}), '([div_element] + figure_list)\n', (27953, 27982), False, 'import bokeh\n'), ((6891, 6909), 'numpy.isnan', 'numpy.isnan', (['date1'], {}), '(date1)\n', (6902, 6909), False, 'import numpy\n'), ((6984, 7002), 'numpy.isnan', 'numpy.isnan', (['date2'], {}), '(date2)\n', (6995, 7002), False, 'import numpy\n'), ((16086, 16120), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', ([], {'data': 'filtered_df'}), '(data=filtered_df)\n', (16102, 16120), False, 'from bokeh.plotting import ColumnDataSource, figure\n'), ((20243, 20277), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', ([], {'data': 'filtered_df'}), '(data=filtered_df)\n', (20259, 20277), False, 'from bokeh.plotting import ColumnDataSource, figure\n'), ((25045, 25079), 'bokeh.plotting.ColumnDataSource', 'ColumnDataSource', ([], {'data': 'filtered_df'}), '(data=filtered_df)\n', (25061, 25079), False, 'from bokeh.plotting import ColumnDataSource, figure\n'), ((16449, 16488), 'annofabcli.statistics.scatter.get_color_from_palette', 'get_color_from_palette', (['biography_index'], {}), '(biography_index)\n', (16471, 16488), False, 'from annofabcli.statistics.scatter import create_hover_tool, get_color_from_palette, plot_bubble, plot_scatter, write_bokeh_graph\n'), ((20606, 20645), 'annofabcli.statistics.scatter.get_color_from_palette', 'get_color_from_palette', (['biography_index'], {}), '(biography_index)\n', (20628, 20645), False, 'from annofabcli.statistics.scatter import create_hover_tool, get_color_from_palette, plot_bubble, plot_scatter, write_bokeh_graph\n'), ((25495, 25534), 'annofabcli.statistics.scatter.get_color_from_palette', 'get_color_from_palette', (['biography_index'], {}), '(biography_index)\n', (25517, 25534), False, 'from annofabcli.statistics.scatter import create_hover_tool, get_color_from_palette, plot_bubble, plot_scatter, write_bokeh_graph\n')] |
# -*- coding: utf-8 -*-
"""pix2pix_data.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1prb4RcGjgsweVTZF5G1URcqMxKSMX4Sk
"""
#!pip3 install scipy==1.1.0
#!pip install texttable
#from google.colab import drive
#drive.mount('/gdrive')
# %cd /gdrive
import configparser
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from numpy import genfromtxt
from six.moves import cPickle
import os
import matplotlib.colors as mcolors
from mpl_toolkits.axes_grid1 import make_axes_locatable
import statistics as stats
import glob
from texttable import Texttable
#import matplotlib
#matplotlib.use('Agg')
import copy
from sklearn import metrics
import seaborn
import tensorflow as tf
import keras
import keras.backend.tensorflow_backend as K
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Dropout
from keras.layers import Dense, Activation
from keras.layers import Flatten
from keras.engine.topology import Input
from keras.optimizers import Adam
from keras import regularizers
from keras.models import load_model
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from vis.visualization import visualize_activation, visualize_saliency, visualize_cam
from vis.utils import utils
from keras import activations
from IPython.display import HTML, display
import tqdm
import json
from sklearn.metrics import roc_curve, auc, confusion_matrix
import datetime
from astropy.time import Time
from tensorflow.keras.models import model_from_json, load_model
from tensorflow.keras.utils import normalize as tf_norm
import io
import gzip
from astropy.io import fits
#from bson.json_util import loads, dumps
import matplotlib.pyplot as plt
# plt.style.use(['dark_background'])
from pandas.plotting import register_matplotlib_converters, scatter_matrix
register_matplotlib_converters()
#%matplotlib inline
x=np.load("X_test.npy")
y=np.load("y_test.npy")
preds=np.load("preds.npy")
prob=np.load("preds_proba.npy")
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=(5, 5), activation="relu",
#data_format = "channels_first",
kernel_regularizer=regularizers.l2(0.01),
input_shape=(22,24,1),
padding='valid',
name="conv2d1"))
model.add(MaxPooling2D(pool_size=(2, 2), padding='valid',
#data_format = "channels_first",
name="maxpool2d1",
strides=(2,2)))
#model.add(Dropout(rate=0.1))
model.add(Conv2D(filters=64, kernel_size=(5, 5), activation="relu",
kernel_regularizer=regularizers.l2(0.01),
padding='valid',
#data_format = "channels_first",
name="conv2d2"))
#model.add(Conv2D(filters=256, kernel_size=(5, 5), activation="relu", kernel_regularizer=regularizers.l2(0.01)))
model.add(Flatten(
#data_format = "channels_first",
name="flatten"))
#model.add(Dense(units=512, kernel_regularizer=regularizers.l2(0.01)))
#model.add(Dropout(rate=0.5))
#model.add(Dense(units=512, kernel_regularizer=regularizers.l2(0.01)))
model.add(Dense(units=7, activation="linear", name="preds"))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
#model.load_weights('/gdrive/My Drive/workflow/periodic/code/experiments/cnn/model.h5')
model.load_weights('model.h5')
model.summary()
##def load_model_helper(path, model_base_name):
## """
## Build keras model using json-file with architecture and hdf5-file with weights
## """
## with open(os.path.join(path, f'{model_base_name}.architecture.json'), 'r') as json_file:
## loaded_model_json = json_file.read()
## m = model_from_json(loaded_model_json)
## m.load_weights(os.path.join(path, f'{model_base_name}.weights.h5'))
##
## return m
##
##model = load_model_helper(path='G:\\Caltech SURF 2018\\Desktop_2019\\braai\\', model_base_name='d6_m7')
##print(model.summary())
##def vgg6(input_shape=(63, 63, 3), n_classes: int = 1):
## """
## VGG6
## :param input_shape:
## :param n_classes:
## :return:
## """
##
## model = keras.models.Sequential(name='VGG6')
## # input: 63x63 images with 3 channel -> (63, 63, 3) tensors.
## # this applies 16 convolution filters of size 3x3 each.
## model.add(keras.layers.Conv2D(16, (3, 3), activation='relu', input_shape=input_shape, name='conv1'))
## model.add(keras.layers.Conv2D(16, (3, 3), activation='relu', name='conv2'))
## model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))
## model.add(keras.layers.Dropout(0.25))
##
## model.add(keras.layers.Conv2D(32, (3, 3), activation='relu', name='conv3'))
## model.add(keras.layers.Conv2D(32, (3, 3), activation='relu', name='conv4'))
## model.add(keras.layers.MaxPooling2D(pool_size=(4, 4)))
## model.add(keras.layers.Dropout(0.25))
##
## model.add(keras.layers.Flatten())
##
## model.add(keras.layers.Dense(256, activation='relu', name='fc_1'))
## model.add(keras.layers.Dropout(0.5))
## # output layer
## activation = 'sigmoid' if n_classes == 1 else 'softmax'
## model.add(keras.layers.Dense(n_classes, activation=activation, name='fc_out'))
##
## return model
##
##loss = 'sparse_categorical_crossentropy'
##optimizer = 'adam'
##
##image_shape = x.shape[1:]
##
##binary_classification = True if loss == 'sparse_categorical_crossentropy' else False
##n_classes = 2 if binary_classification else 1
##
##model = vgg6(input_shape=image_shape, n_classes=n_classes)
### Swap softmax with linear
###layer_idx = utils.find_layer_idx(model, 'fc_out')
###model.layers[layer_idx].activation = activations.linear
###model = utils.apply_modifications(model)
##
### set up optimizer:
##if optimizer == 'adam':
## optimzr = keras.optimizers.Adam(lr=3e-4, beta_1=0.9, beta_2=0.999,
## epsilon=None, decay=0.0, amsgrad=False)
##elif optimizer == 'sgd':
## optimzr = keras.optimizers.SGD(lr=0.01, momentum=0.9, decay=1e-6, nesterov=True)
##else:
## print('Could not recognize optimizer, using Adam')
## optimzr = keras.optimizers.Adam(lr=3e-4, beta_1=0.9, beta_2=0.999,
## epsilon=None, decay=0.0, amsgrad=False)
##
##model.compile(optimizer=optimzr, loss=loss, metrics=['accuracy'])
##model.load_weights('model.h5')
##print(model.summary())
# x=np.load("/gdrive/My Drive/workflow/periodic/code/experiments/cnn/X_test.npy")
# y=np.load("/gdrive/My Drive/workflow/periodic/code/experiments/cnn/y_test.npy")
# preds=np.load("/gdrive/My Drive/workflow/periodic/code/experiments/cnn/preds.npy")
# prob=np.load("/gdrive/My Drive/workflow/periodic/code/experiments/cnn/preds_proba.npy")
classes=[1,2,4,5,6,8,13]
#classes=[1]
filterid={1:0,2:1,4:2,5:3,6:4,8:5,13:6}
pd={1:'EW',2:'EA',4:'RRab',5:'RRc',6:'RRd',8:'RSCVn',13:'LPV'}
dmints = [-8,-5,-3,-2.5,-2,-1.5,-1,-0.5,-0.3,-0.2,-0.1,0,0.1,0.2,0.3,0.5,1,1.5,2,2.5,3,5,8]
dtints = [0,1.0/145,2.0/145,3.0/145,4.0/145,1.0/25,2.0/25,3.0/25,1.5,2.5,3.5,4.5,5.5,7,10,20,30,60,90,120,240,600,960,2000,4000]
xloc=np.arange(25)
yloc=np.arange(23)
yloc=yloc[::-1]
for i in range(len(dtints)):
dtints[i]=round(dtints[i],3)
yloc=yloc-0.5
xloc=xloc-0.5
thres=np.where(prob>=0.8)
ind_thres=thres[0]
class_thres=thres[1]
#plt.rcParams['figure.figsize'] = (18, 6)
layer_idx = utils.find_layer_idx(model, 'preds')
penultimate_layer_idx = utils.find_layer_idx(model, 'conv2d2')
# os.mkdir("/gdrive/My Drive/workflow/periodic/code/experiments/cnn/keras-vis/grad_CAM/dmdt/")
# os.mkdir("/gdrive/My Drive/workflow/periodic/code/experiments/cnn/keras-vis/grad_CAM/gradcam/")
###os.mkdir("keras-vis/")
###os.mkdir("keras-vis/grad_CAM/")
###os.mkdir("keras-vis/grad_CAM/triplet/")
###os.mkdir("keras-vis/grad_CAM/gradcam/")
##os.mkdir("activation_maximization/")
##os.mkdir("activation_maximization/fc_out")
#from matplotlib import pyplot as plt
#%matplotlib inline
plt.rcParams['figure.figsize'] = (10, 8)
plt.rcParams["font.weight"] = "bold"
plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams.update({'font.size': 16})
#layer_idx = utils.find_layer_idx(model, 'fc_out')
# Swap softmax with linear
#model.layers[layer_idx].activation = activations.linear
#model = utils.apply_modifications(model)
# This is the output node we want to maximize.
os.mkdir("activation_maximization/")
os.mkdir("activation_maximization/preds/")
for i in range(len(classes)):
filter_idx = filterid[classes[i]]
input_range= (0.,1.)
img_act = visualize_activation(model,
layer_idx,
filter_indices=filter_idx,
input_range=input_range,
#verbose=True,
tv_weight=0.2,
lp_norm_weight=0.0
)
fig, ax=plt.subplots(1,1)
#print(img_act.shape)
im=ax.imshow(img_act[:,:,0])
fig.colorbar(im)
ax.set_xticks(xloc)
ax.set_xticklabels(dtints,rotation=90)
ax.set_yticks(yloc)
ax.set_yticklabels(dmints)
ax.set(xlabel="dt(days)",ylabel="dm(mag)")
#plt.title("preds layer filter for "+pd[classes[i]])
plt.tight_layout()
plt.savefig("activation_maximization/preds/"+pd[classes[i]]+"_filter.png")
plt.close()
fig, ax=plt.subplots(1,1)
im1=ax.hist(img_act[:,:,0].flatten(), color="indigo")
ax.set_xticks(np.round(im1[1],2))
plt.tight_layout()
plt.savefig("activation_maximization/preds/"+pd[classes[i]]+"_filter_histogram.png")
plt.close()
##for i in range(len(classes)):
## # This is the output node we want to maximize.
## filter_idx = filterid[classes[i]]
## ###os.mkdir("keras-vis/grad_CAM/triplet/"+pd[classes[i]]+"/")
## ###os.mkdir("keras-vis/grad_CAM/gradcam/"+pd[classes[i]]+"/")
## ###os.mkdir("keras-vis/saliency/"+pd[classes[i]]+"/")
## #input_range= (0,255)
## #image=np.random.random_sample((22,24,1))
## x_ind=np.where(class_thres==filter_idx)[0]
## ###init=np.where(ind_thres[x_ind]==8882)[0][0]
## init=0
## for ii in range(init,x_ind.shape[0]):
## #image=x[ind_thres[x_ind[ii]]].transpose((1,2,0))
## image=x[ind_thres[x_ind[ii]]]
## ###img_cam = visualize_cam(model, layer_idx, filter_indices=filter_idx, seed_input=image,
## ### penultimate_layer_idx=penultimate_layer_idx)
## img_sal = visualize_saliency(model, layer_idx, filter_indices=filter_idx, seed_input=image)
## # penultimate_layer_idx=penultimate_layer_idx
## ###plt.figure()
## #fig, ax=plt.subplots(1,2)
## #fig, ax=plt.subplots(1,1)
## #im1=ax.imshow(image[:,:,0])
## ###plt.imshow(image[:,:,0])
## ###fig = plt.figure(figsize=(8, 2), dpi=100)
## ###ax = fig.add_subplot(131)
## ###ax.axis('off')
## ###ax.imshow(image[:, :, 0], origin='upper', cmap=plt.cm.bone)
## ###ax2 = fig.add_subplot(132)
## ###ax2.axis('off')
## ###ax2.imshow(image[:, :, 1], origin='upper', cmap=plt.cm.bone)
## ###ax3 = fig.add_subplot(133)
## ###ax3.axis('off')
## ###ax3.imshow(image[:, :, 2], origin='upper', cmap=plt.cm.bone)
## #plt.imshow()
## ###plt.axis('off')
#### plt.xticks(xloc,dtints,rotation=90)
#### plt.yticks(yloc,dmints)
## #plt.set_xticks(xloc)
## #ax.set_xticks(xloc)
## #plt.set_xticklabels(dtints,rotation=90)
## #plt.set_yticks(yloc)
## #plt.set_yticklabels(dmints)
## #ax.set(xlabel="dt(days)",ylabel="dm(mag)")
## ###plt.savefig("keras-vis/grad_CAM/triplet/"+pd[classes[i]]+"/"+str(ind_thres[x_ind[ii]])+".png", bbox_inches='tight', transparent="True", pad_inches=0)
## ###plt.close()
## #plt.clf()
##
## plt.figure()
## #fig, ax=plt.subplots(1,2)
## #fig, ax=plt.subplots(1,1)
## ###plt.imshow(img_cam)
## plt.imshow(img_sal)
## plt.axis('off')
#### plt.xticks(xloc,dtints,rotation=90)
#### plt.yticks(yloc,dmints)
## #plt.set_xticks(xloc)
## #plt.set_xticklabels(dtints,rotation=90)
## #plt.set_yticks(yloc)
## #plt.set_yticklabels(dmints)
## #ax.set(xlabel="dt(days)",ylabel="dm(mag)")
## ###plt.savefig("keras-vis/grad_CAM/gradcam/"+pd[classes[i]]+"/"+str(ind_thres[x_ind[ii]])+".png", bbox_inches='tight', transparent="True", pad_inches=0)
## ###plt.savefig("keras-vis/saliency/"+pd[classes[i]]+"/"+str(ind_thres[x_ind[ii]])+".png", bbox_inches='tight', transparent="True", pad_inches=0)
## plt.savefig("keras-vis/real/saliency/"+str(ind_thres[x_ind[ii]])+".png", bbox_inches='tight', transparent="True", pad_inches=0)
## plt.close()
## #plt.clf()
##
## #im2=ax[1].imshow(img_cam)
## #divider1 = make_axes_locatable(ax[0])
## #cax1 = divider1.append_axes("right", size="5%", pad=0.1)
## #divider2 = make_axes_locatable(ax[1])
## #cax2 = divider2.append_axes("right", size="5%", pad=0.1)
## #ax[0].set_xticks(xloc)
## #ax[0].set_xticklabels(dtints,rotation=90)
## #ax[1].set_xticks(xloc)
## #ax[1].set_xticklabels(dtints,rotation=90)
## #ax[0].set_yticks(yloc)
## #ax[0].set_yticklabels(dmints)
## #ax[1].set_yticks(yloc)
## #ax[1].set_yticklabels(dmints)
## #ax[0].set(xlabel="dt(days)",ylabel="dm(mag)")
## #ax[1].set(xlabel="dt(days)",ylabel="dm(mag)")
## #fig.colorbar(im1, cax=cax1)
## #fig.colorbar(im2, cax=cax2)
## #plt.tight_layout()
## #plt.suptitle("Class: "+pd[classes[i]]+", grad_CAM,\n"+"X_id: "+str(ind_thres[x_ind[ii]])+"\nPred_Prob: "+str(round(prob[ind_thres[x_ind[ii]],class_thres[x_ind[ii]]],4)))
## #plt.savefig("keras-vis/grad_CAM/"+pd[classes[i]]+"/"+str(ind_thres[x_ind[ii]])+".png")
## #plt.close()
##
| [
"os.mkdir",
"numpy.load",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig",
"keras.regularizers.l2",
"matplotlib.pyplot.close",
"numpy.round",
"keras.layers.Flatten",
"pandas.plotting.register_matplotlib_converters",
"numpy.where",
"numpy.arange",
"matplotlib.pyplot.rcParams.updat... | [((1990, 2022), 'pandas.plotting.register_matplotlib_converters', 'register_matplotlib_converters', ([], {}), '()\n', (2020, 2022), False, 'from pandas.plotting import register_matplotlib_converters, scatter_matrix\n'), ((2046, 2067), 'numpy.load', 'np.load', (['"""X_test.npy"""'], {}), "('X_test.npy')\n", (2053, 2067), True, 'import numpy as np\n'), ((2070, 2091), 'numpy.load', 'np.load', (['"""y_test.npy"""'], {}), "('y_test.npy')\n", (2077, 2091), True, 'import numpy as np\n'), ((2098, 2118), 'numpy.load', 'np.load', (['"""preds.npy"""'], {}), "('preds.npy')\n", (2105, 2118), True, 'import numpy as np\n'), ((2124, 2150), 'numpy.load', 'np.load', (['"""preds_proba.npy"""'], {}), "('preds_proba.npy')\n", (2131, 2150), True, 'import numpy as np\n'), ((2160, 2172), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2170, 2172), False, 'from keras.models import Sequential\n'), ((7291, 7304), 'numpy.arange', 'np.arange', (['(25)'], {}), '(25)\n', (7300, 7304), True, 'import numpy as np\n'), ((7310, 7323), 'numpy.arange', 'np.arange', (['(23)'], {}), '(23)\n', (7319, 7323), True, 'import numpy as np\n'), ((7437, 7458), 'numpy.where', 'np.where', (['(prob >= 0.8)'], {}), '(prob >= 0.8)\n', (7445, 7458), True, 'import numpy as np\n'), ((7552, 7588), 'vis.utils.utils.find_layer_idx', 'utils.find_layer_idx', (['model', '"""preds"""'], {}), "(model, 'preds')\n", (7572, 7588), False, 'from vis.utils import utils\n'), ((7613, 7651), 'vis.utils.utils.find_layer_idx', 'utils.find_layer_idx', (['model', '"""conv2d2"""'], {}), "(model, 'conv2d2')\n", (7633, 7651), False, 'from vis.utils import utils\n'), ((8257, 8295), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 16}"], {}), "({'font.size': 16})\n", (8276, 8295), True, 'import matplotlib.pyplot as plt\n'), ((8523, 8559), 'os.mkdir', 'os.mkdir', (['"""activation_maximization/"""'], {}), "('activation_maximization/')\n", (8531, 8559), False, 'import os\n'), ((8560, 8602), 'os.mkdir', 'os.mkdir', (['"""activation_maximization/preds/"""'], {}), "('activation_maximization/preds/')\n", (8568, 8602), False, 'import os\n'), ((2471, 2558), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)', 'padding': '"""valid"""', 'name': '"""maxpool2d1"""', 'strides': '(2, 2)'}), "(pool_size=(2, 2), padding='valid', name='maxpool2d1', strides=\n (2, 2))\n", (2483, 2558), False, 'from keras.layers import MaxPooling2D\n'), ((3039, 3062), 'keras.layers.Flatten', 'Flatten', ([], {'name': '"""flatten"""'}), "(name='flatten')\n", (3046, 3062), False, 'from keras.layers import Flatten\n'), ((3289, 3338), 'keras.layers.Dense', 'Dense', ([], {'units': '(7)', 'activation': '"""linear"""', 'name': '"""preds"""'}), "(units=7, activation='linear', name='preds')\n", (3294, 3338), False, 'from keras.layers import Dense, Activation\n'), ((8711, 8840), 'vis.visualization.visualize_activation', 'visualize_activation', (['model', 'layer_idx'], {'filter_indices': 'filter_idx', 'input_range': 'input_range', 'tv_weight': '(0.2)', 'lp_norm_weight': '(0.0)'}), '(model, layer_idx, filter_indices=filter_idx,\n input_range=input_range, tv_weight=0.2, lp_norm_weight=0.0)\n', (8731, 8840), False, 'from vis.visualization import visualize_activation, visualize_saliency, visualize_cam\n'), ((9112, 9130), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (9124, 9130), True, 'import matplotlib.pyplot as plt\n'), ((9440, 9458), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9456, 9458), True, 'import matplotlib.pyplot as plt\n'), ((9463, 9541), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('activation_maximization/preds/' + pd[classes[i]] + '_filter.png')"], {}), "('activation_maximization/preds/' + pd[classes[i]] + '_filter.png')\n", (9474, 9541), True, 'import matplotlib.pyplot as plt\n'), ((9542, 9553), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9551, 9553), True, 'import matplotlib.pyplot as plt\n'), ((9567, 9585), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (9579, 9585), True, 'import matplotlib.pyplot as plt\n'), ((9685, 9703), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9701, 9703), True, 'import matplotlib.pyplot as plt\n'), ((9708, 9800), 'matplotlib.pyplot.savefig', 'plt.savefig', (["('activation_maximization/preds/' + pd[classes[i]] + '_filter_histogram.png')"], {}), "('activation_maximization/preds/' + pd[classes[i]] +\n '_filter_histogram.png')\n", (9719, 9800), True, 'import matplotlib.pyplot as plt\n'), ((9797, 9808), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (9806, 9808), True, 'import matplotlib.pyplot as plt\n'), ((9661, 9680), 'numpy.round', 'np.round', (['im1[1]', '(2)'], {}), '(im1[1], 2)\n', (9669, 9680), True, 'import numpy as np\n'), ((2329, 2350), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (2344, 2350), False, 'from keras import regularizers\n'), ((2774, 2795), 'keras.regularizers.l2', 'regularizers.l2', (['(0.01)'], {}), '(0.01)\n', (2789, 2795), False, 'from keras import regularizers\n')] |
import tensorflow
import numpy as np
import os
from numpy import genfromtxt
import Utilities
import Model.Constants as Constants
import Model.blockFactory as factory
import Model.Layers.Start.firstBlock as startBlock
import Model.Layers.Inceptions.FirstInception.firstBlock as firstBlockOfFirstInception
import Model.Layers.Inceptions.FirstInception.secondBlock as secondBlockOfFirstInception
import Model.Layers.Inceptions.FirstInception.thirdBlock as thirdBlockOfFirstInception
import Model.Layers.Inceptions.SecondInception.firstBlock as firstBlockOfSecondInception
import Model.Layers.Inceptions.SecondInception.secondBlock as secondBlockOfSecondInception
import Model.Layers.Inceptions.ThirdInception.firstBlock as firstBlockOfThirdInception
import Model.Layers.Inceptions.ThirdInception.secondBlock as secondBlockOfThirdInception
def CreateModel(shape):
initialTensor = factory.initialization(shape)
tensor = startBlock.constructor(initialTensor)
tensor = firstBlockOfFirstInception.inceptionConstructor(tensor)
tensor = secondBlockOfFirstInception.inceptionConstructor(tensor)
tensor = thirdBlockOfFirstInception.inceptionConstructor(tensor)
tensor = firstBlockOfSecondInception.inceptionConstructor(tensor)
tensor = secondBlockOfSecondInception.inceptionConstructor(tensor)
tensor = firstBlockOfThirdInception.inceptionConstructor(tensor)
tensor = secondBlockOfThirdInception.inceptionConstructor(tensor)
model = factory.finishing(initialTensor, tensor, 128, 'dense_layer')
return model
def CalculateTripletLoss(target, outputs, alpha = 0.2):
"""
Implementation of the triplet loss as defined by formula (3)
Arguments:
y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
y_pred -- python list containing three objects:
anchor -- the encodings for the anchor images, of shape (None, 128)
positive -- the encodings for the positive images, of shape (None, 128)
negative -- the encodings for the negative images, of shape (None, 128)
Returns:
loss -- real number, value of the loss
"""
anchor, positive, negative = outputs[0], outputs[1], outputs[2]
### START CODE HERE ### (? 4 lines)
# Step 1: Compute the (encoding) distance between the anchor and the positive, you will need to sum over axis=-1
positiveDistance = tensorflow.reduce_sum(tensorflow.square(tensorflow.subtract(anchor, positive)), axis = -1)
# Step 2: Compute the (encoding) distance between the anchor and the negative, you will need to sum over axis=-1
negativeDistance = tensorflow.reduce_sum(tensorflow.square(tensorflow.subtract(anchor, negative)), axis = -1)
# Step 3: subtract the two previous distances and add alpha.
basicLoss = positiveDistance - negativeDistance + alpha# tf.add(tf.subtract(pos_dist, neg_dist), alpha)#
# Step 4: Take the maximum of basic_loss and 0.0. Sum over the training examples.
loss = tensorflow.reduce_sum(tensorflow.maximum(basicLoss, 0.0))
### END CODE HERE ###
return loss
def verify(image_path, identity, database, model):
"""
Function that verifies if the person on the "image_path" image is "identity".
Arguments:
image_path -- path to an image
identity -- string, name of the person you'd like to verify the identity. Has to be a resident of the Happy house.
database -- python dictionary mapping names of allowed people's names (strings) to their encodings (vectors).
model -- your Inception model instance in Keras
Returns:
dist -- distance between the image_path and the image of "identity" in the database.
door_open -- True, if the door should open. False otherwise.
"""
### START CODE HERE ###
# Step 1: Compute the encoding for the image. Use img_to_encoding() see example above. (≈ 1 line)
encoding = Utilities.GetImageData(image_path, model)
# Step 2: Compute distance with identity's image (≈ 1 line)
dist = np.linalg.norm((encoding - database[identity]))
# Step 3: Open the door if dist < 0.7, else don't open (≈ 3 lines)
if dist < 0.7:
print("It's " + str(identity) + ", welcome home!")
door_open = True
else:
print("It's not " + str(identity) + ", please go away")
door_open = False
### END CODE HERE ###
return dist, door_open
def loadWeightsFromFile():
# Set weights path
dirPath = './Model/weights'
fileNames = filter(lambda x: not x.startswith('.'), os.listdir(dirPath))
paths = {}
weights_dict = {}
for fileName in fileNames:
paths[fileName.replace('.csv', '')] = dirPath + '/' + fileName
for name in Constants.layerNames:
if 'conv' in name:
conv_w = genfromtxt(paths[name + '_w'], delimiter=',', dtype=None)
conv_w = np.reshape(conv_w, Constants.conv_shape[name])
conv_w = np.transpose(conv_w, (2, 3, 1, 0))
conv_b = genfromtxt(paths[name + '_b'], delimiter=',', dtype=None)
weights_dict[name] = [conv_w, conv_b]
elif 'bn' in name:
bn_w = genfromtxt(paths[name + '_w'], delimiter=',', dtype=None)
bn_b = genfromtxt(paths[name + '_b'], delimiter=',', dtype=None)
bn_m = genfromtxt(paths[name + '_m'], delimiter=',', dtype=None)
bn_v = genfromtxt(paths[name + '_v'], delimiter=',', dtype=None)
weights_dict[name] = [bn_w, bn_b, bn_m, bn_v]
elif 'dense' in name:
dense_w = genfromtxt(dirPath+'/dense_w.csv', delimiter=',', dtype=None)
dense_w = np.reshape(dense_w, (128, 736))
dense_w = np.transpose(dense_w, (1, 0))
dense_b = genfromtxt(dirPath+'/dense_b.csv', delimiter=',', dtype=None)
weights_dict[name] = [dense_w, dense_b]
return weights_dict
def loadWeights(model):
# Load weights from csv files (which was exported from Openface torch model)
weights_dict = loadWeightsFromFile()
# Set layer weights of the model
for name in Constants.layerNames:
if model.get_layer(name) != None:
model.get_layer(name).set_weights(weights_dict[name])
#elif model.get_layer(name) != None:
# model.get_layer(name).set_weights(weights_dict[name])
| [
"Utilities.GetImageData",
"Model.Layers.Inceptions.ThirdInception.firstBlock.inceptionConstructor",
"tensorflow.subtract",
"Model.Layers.Inceptions.FirstInception.thirdBlock.inceptionConstructor",
"Model.Layers.Inceptions.SecondInception.secondBlock.inceptionConstructor",
"Model.blockFactory.finishing",
... | [((881, 910), 'Model.blockFactory.initialization', 'factory.initialization', (['shape'], {}), '(shape)\n', (903, 910), True, 'import Model.blockFactory as factory\n'), ((924, 961), 'Model.Layers.Start.firstBlock.constructor', 'startBlock.constructor', (['initialTensor'], {}), '(initialTensor)\n', (946, 961), True, 'import Model.Layers.Start.firstBlock as startBlock\n'), ((976, 1031), 'Model.Layers.Inceptions.FirstInception.firstBlock.inceptionConstructor', 'firstBlockOfFirstInception.inceptionConstructor', (['tensor'], {}), '(tensor)\n', (1023, 1031), True, 'import Model.Layers.Inceptions.FirstInception.firstBlock as firstBlockOfFirstInception\n'), ((1045, 1101), 'Model.Layers.Inceptions.FirstInception.secondBlock.inceptionConstructor', 'secondBlockOfFirstInception.inceptionConstructor', (['tensor'], {}), '(tensor)\n', (1093, 1101), True, 'import Model.Layers.Inceptions.FirstInception.secondBlock as secondBlockOfFirstInception\n'), ((1115, 1170), 'Model.Layers.Inceptions.FirstInception.thirdBlock.inceptionConstructor', 'thirdBlockOfFirstInception.inceptionConstructor', (['tensor'], {}), '(tensor)\n', (1162, 1170), True, 'import Model.Layers.Inceptions.FirstInception.thirdBlock as thirdBlockOfFirstInception\n'), ((1185, 1241), 'Model.Layers.Inceptions.SecondInception.firstBlock.inceptionConstructor', 'firstBlockOfSecondInception.inceptionConstructor', (['tensor'], {}), '(tensor)\n', (1233, 1241), True, 'import Model.Layers.Inceptions.SecondInception.firstBlock as firstBlockOfSecondInception\n'), ((1255, 1312), 'Model.Layers.Inceptions.SecondInception.secondBlock.inceptionConstructor', 'secondBlockOfSecondInception.inceptionConstructor', (['tensor'], {}), '(tensor)\n', (1304, 1312), True, 'import Model.Layers.Inceptions.SecondInception.secondBlock as secondBlockOfSecondInception\n'), ((1327, 1382), 'Model.Layers.Inceptions.ThirdInception.firstBlock.inceptionConstructor', 'firstBlockOfThirdInception.inceptionConstructor', (['tensor'], {}), '(tensor)\n', (1374, 1382), True, 'import Model.Layers.Inceptions.ThirdInception.firstBlock as firstBlockOfThirdInception\n'), ((1396, 1452), 'Model.Layers.Inceptions.ThirdInception.secondBlock.inceptionConstructor', 'secondBlockOfThirdInception.inceptionConstructor', (['tensor'], {}), '(tensor)\n', (1444, 1452), True, 'import Model.Layers.Inceptions.ThirdInception.secondBlock as secondBlockOfThirdInception\n'), ((1466, 1526), 'Model.blockFactory.finishing', 'factory.finishing', (['initialTensor', 'tensor', '(128)', '"""dense_layer"""'], {}), "(initialTensor, tensor, 128, 'dense_layer')\n", (1483, 1526), True, 'import Model.blockFactory as factory\n'), ((3943, 3984), 'Utilities.GetImageData', 'Utilities.GetImageData', (['image_path', 'model'], {}), '(image_path, model)\n', (3965, 3984), False, 'import Utilities\n'), ((4065, 4110), 'numpy.linalg.norm', 'np.linalg.norm', (['(encoding - database[identity])'], {}), '(encoding - database[identity])\n', (4079, 4110), True, 'import numpy as np\n'), ((3043, 3077), 'tensorflow.maximum', 'tensorflow.maximum', (['basicLoss', '(0.0)'], {}), '(basicLoss, 0.0)\n', (3061, 3077), False, 'import tensorflow\n'), ((4602, 4621), 'os.listdir', 'os.listdir', (['dirPath'], {}), '(dirPath)\n', (4612, 4621), False, 'import os\n'), ((2468, 2505), 'tensorflow.subtract', 'tensorflow.subtract', (['anchor', 'positive'], {}), '(anchor, positive)\n', (2487, 2505), False, 'import tensorflow\n'), ((2699, 2736), 'tensorflow.subtract', 'tensorflow.subtract', (['anchor', 'negative'], {}), '(anchor, negative)\n', (2718, 2736), False, 'import tensorflow\n'), ((4850, 4907), 'numpy.genfromtxt', 'genfromtxt', (["paths[name + '_w']"], {'delimiter': '""","""', 'dtype': 'None'}), "(paths[name + '_w'], delimiter=',', dtype=None)\n", (4860, 4907), False, 'from numpy import genfromtxt\n'), ((4929, 4975), 'numpy.reshape', 'np.reshape', (['conv_w', 'Constants.conv_shape[name]'], {}), '(conv_w, Constants.conv_shape[name])\n', (4939, 4975), True, 'import numpy as np\n'), ((4997, 5031), 'numpy.transpose', 'np.transpose', (['conv_w', '(2, 3, 1, 0)'], {}), '(conv_w, (2, 3, 1, 0))\n', (5009, 5031), True, 'import numpy as np\n'), ((5053, 5110), 'numpy.genfromtxt', 'genfromtxt', (["paths[name + '_b']"], {'delimiter': '""","""', 'dtype': 'None'}), "(paths[name + '_b'], delimiter=',', dtype=None)\n", (5063, 5110), False, 'from numpy import genfromtxt\n'), ((5212, 5269), 'numpy.genfromtxt', 'genfromtxt', (["paths[name + '_w']"], {'delimiter': '""","""', 'dtype': 'None'}), "(paths[name + '_w'], delimiter=',', dtype=None)\n", (5222, 5269), False, 'from numpy import genfromtxt\n'), ((5289, 5346), 'numpy.genfromtxt', 'genfromtxt', (["paths[name + '_b']"], {'delimiter': '""","""', 'dtype': 'None'}), "(paths[name + '_b'], delimiter=',', dtype=None)\n", (5299, 5346), False, 'from numpy import genfromtxt\n'), ((5366, 5423), 'numpy.genfromtxt', 'genfromtxt', (["paths[name + '_m']"], {'delimiter': '""","""', 'dtype': 'None'}), "(paths[name + '_m'], delimiter=',', dtype=None)\n", (5376, 5423), False, 'from numpy import genfromtxt\n'), ((5443, 5500), 'numpy.genfromtxt', 'genfromtxt', (["paths[name + '_v']"], {'delimiter': '""","""', 'dtype': 'None'}), "(paths[name + '_v'], delimiter=',', dtype=None)\n", (5453, 5500), False, 'from numpy import genfromtxt\n'), ((5611, 5674), 'numpy.genfromtxt', 'genfromtxt', (["(dirPath + '/dense_w.csv')"], {'delimiter': '""","""', 'dtype': 'None'}), "(dirPath + '/dense_w.csv', delimiter=',', dtype=None)\n", (5621, 5674), False, 'from numpy import genfromtxt\n'), ((5695, 5726), 'numpy.reshape', 'np.reshape', (['dense_w', '(128, 736)'], {}), '(dense_w, (128, 736))\n', (5705, 5726), True, 'import numpy as np\n'), ((5749, 5778), 'numpy.transpose', 'np.transpose', (['dense_w', '(1, 0)'], {}), '(dense_w, (1, 0))\n', (5761, 5778), True, 'import numpy as np\n'), ((5801, 5864), 'numpy.genfromtxt', 'genfromtxt', (["(dirPath + '/dense_b.csv')"], {'delimiter': '""","""', 'dtype': 'None'}), "(dirPath + '/dense_b.csv', delimiter=',', dtype=None)\n", (5811, 5864), False, 'from numpy import genfromtxt\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Imports.
import argparse as Ap
import hashlib
import logging as L
import numpy as np
import os, pdb, sys
from pysnips.ml.argparseactions import OptimizerAction
import time
__version__ = "0.0.0"
#
# Message Formatter
#
class MsgFormatter(L.Formatter):
"""Message Formatter
Formats messages with time format YYYY-MM-DD HH:MM:SS.mmm TZ
"""
def formatTime(self, record, datefmt):
t = record.created
timeFrac = abs(t-long(t))
timeStruct = time.localtime(record.created)
timeString = ""
timeString += time.strftime("%F %T", timeStruct)
timeString += "{:.3f} ".format(timeFrac)[1:]
timeString += time.strftime("%Z", timeStruct)
return timeString
#############################################################################################################
############################## Subcommands ##################################
#############################################################################################################
class Subcommand(object):
name = None
@classmethod
def addArgParser(cls, subp, *args, **kwargs):
argp = subp.add_parser(cls.name, usage=cls.__doc__, *args, **kwargs)
cls.addArgs(argp)
argp.set_defaults(__subcmdfn__=cls.run)
return argp
@classmethod
def addArgs(cls, argp):
pass
@classmethod
def run(cls, d):
pass
class Screw(Subcommand):
"""Screw around with me in Screw(Subcommand)."""
name = "screw"
@classmethod
def run(cls, d):
print(cls.__doc__)
class Train(Subcommand):
name = "train"
LOGLEVELS = {"none":L.NOTSET, "debug": L.DEBUG, "info": L.INFO,
"warn":L.WARN, "err": L.ERROR, "crit": L.CRITICAL}
@classmethod
def addArgs(cls, argp):
argp.add_argument("-w", "--workDir", default=".", type=str,
help="Path to the working directory for this experiment.")
argp.add_argument("-d", "--dataDir", default=".", type=str,
help="Path to datasets directory.")
argp.add_argument("-t", "--tempDir", default=".", type=str,
help="Path to temporary directory.")
argp.add_argument("-l", "--loglevel", default="info", type=str,
choices=cls.LOGLEVELS.keys(),
help="Logging severity level.")
argp.add_argument("-s", "--seed", default=0x6a09e667f3bcc908, type=long,
help="Seed for PRNGs. Default is 64-bit fractional expansion of sqrt(2).")
argp.add_argument("--summary", action="store_true",
help="""Print a summary of the network.""")
argp.add_argument("--model", default="complex", type=str,
choices=["real", "complex"],
help="Model Selection.")
argp.add_argument("--dataset", default="cifar10", type=str,
choices=["cifar10", "cifar100", "svhn"],
help="Dataset Selection.")
argp.add_argument("--dropout", default=0, type=float,
help="Dropout probability.")
argp.add_argument("-n", "--num-epochs", default=200, type=int,
help="Number of epochs")
argp.add_argument("-b", "--batch-size", default=64, type=int,
help="Batch Size")
argp.add_argument("--act", default="relu", type=str,
choices=["relu"],
help="Activation.")
argp.add_argument("--aact", default="modrelu", type=str,
choices=["modrelu"],
help="Advanced Activation.")
argp.add_argument("--cuda", default=None, type=int,
nargs="?", const=0,
help="CUDA device to use.")
argp.add_argument("--pdb", action="store_true",
help="""Breakpoint before model entry.""")
optp = argp.add_argument_group("Optimizers", "Tunables for all optimizers")
optp.add_argument("--optimizer", "--opt", action=OptimizerAction,
type=str, default="nag", help="Optimizer selection.")
optp.add_argument("--clipnorm", "--cn", default=1.0, type=float,
help="The norm of the gradient will be clipped at this magnitude.")
optp.add_argument("--clipval", "--cv", default=1.0, type=float,
help="The values of the gradients will be individually clipped at this magnitude.")
optp.add_argument("--l1", default=0, type=float,
help="L1 penalty.")
optp.add_argument("--l2", default=0, type=float,
help="L2 penalty.")
optp.add_argument("--decay", default=0, type=float,
help="Learning rate decay for optimizers.")
@classmethod
def run(cls, d):
if not os.path.isdir(d.workDir):
os.mkdir(d.workDir)
logDir = os.path.join(d.workDir, "logs")
if not os.path.isdir(logDir):
os.mkdir(logDir)
logFormatter = MsgFormatter ("[%(asctime)s ~~ %(levelname)-8s] %(message)s")
stdoutLogSHandler = L.StreamHandler(sys.stdout)
stdoutLogSHandler .setLevel (cls.LOGLEVELS[d.loglevel])
stdoutLogSHandler .setFormatter (logFormatter)
defltLogger = L.getLogger ()
defltLogger .setLevel (cls.LOGLEVELS[d.loglevel])
defltLogger .addHandler (stdoutLogSHandler)
trainLogFilename = os.path.join(d.workDir, "logs", "train.txt")
trainLogFHandler = L.FileHandler (trainLogFilename, "a", "UTF-8", delay=True)
trainLogFHandler .setLevel (cls.LOGLEVELS[d.loglevel])
trainLogFHandler .setFormatter (logFormatter)
trainLogger = L.getLogger ("train")
trainLogger .setLevel (cls.LOGLEVELS[d.loglevel])
trainLogger .addHandler (trainLogFHandler)
entryLogFilename = os.path.join(d.workDir, "logs", "entry.txt")
entryLogFHandler = L.FileHandler (entryLogFilename, "a", "UTF-8", delay=True)
entryLogFHandler .setLevel (cls.LOGLEVELS[d.loglevel])
entryLogFHandler .setFormatter (logFormatter)
entryLogger = L.getLogger ("entry")
entryLogger .setLevel (cls.LOGLEVELS[d.loglevel])
entryLogger .addHandler (entryLogFHandler)
#
# np.random.seed() won't use anything more than 32 seed bits.
# Be brutal and directly generate an entire MT19937 state using PBKDF2.
#
state = np.random.get_state()
state = (state[0],
np.frombuffer(hashlib.pbkdf2_hmac("sha256", # Hash
str(d.seed), # Password
state[0], # Salt
1, # Rounds
state[1].nbytes), # DKLen
dtype=np.uint32),
624,
0,
0.0)
np.random.set_state(state)
import expmt;
if d.pdb: pdb.set_trace()
expmt.getExperiment(d).rollback().run()
#############################################################################################################
############################## Argument Parsers #################################
#############################################################################################################
def getArgParser(prog):
argp = Ap.ArgumentParser(prog = prog,
usage = None,
description = None,
epilog = None,
version = __version__)
subp = argp.add_subparsers()
argp.set_defaults(argp=argp)
argp.set_defaults(subp=subp)
# Add global args to argp here?
# ...
# Add subcommands
for v in globals().itervalues():
if(isinstance(v, type) and
issubclass(v, Subcommand) and
v != Subcommand):
v.addArgParser(subp)
# Return argument parser.
return argp
#############################################################################################################
############################## Main ##################################
#############################################################################################################
def main(argv):
sys.setrecursionlimit(10000)
d = getArgParser(argv[0]).parse_args(argv[1:])
return d.__subcmdfn__(d)
if __name__ == "__main__":
try:
main(sys.argv)
except KeyboardInterrupt as ki:
raise
except:
traceback.print_exc()
| [
"os.mkdir",
"argparse.ArgumentParser",
"logging.FileHandler",
"numpy.random.get_state",
"os.path.isdir",
"logging.StreamHandler",
"expmt.getExperiment",
"time.strftime",
"logging.getLogger",
"numpy.random.set_state",
"pdb.set_trace",
"sys.setrecursionlimit",
"os.path.join",
"time.localtime... | [((7411, 7507), 'argparse.ArgumentParser', 'Ap.ArgumentParser', ([], {'prog': 'prog', 'usage': 'None', 'description': 'None', 'epilog': 'None', 'version': '__version__'}), '(prog=prog, usage=None, description=None, epilog=None,\n version=__version__)\n', (7428, 7507), True, 'import argparse as Ap\n'), ((8339, 8367), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10000)'], {}), '(10000)\n', (8360, 8367), False, 'import os, pdb, sys\n'), ((606, 636), 'time.localtime', 'time.localtime', (['record.created'], {}), '(record.created)\n', (620, 636), False, 'import time\n'), ((672, 706), 'time.strftime', 'time.strftime', (['"""%F %T"""', 'timeStruct'], {}), "('%F %T', timeStruct)\n", (685, 706), False, 'import time\n'), ((770, 801), 'time.strftime', 'time.strftime', (['"""%Z"""', 'timeStruct'], {}), "('%Z', timeStruct)\n", (783, 801), False, 'import time\n'), ((4896, 4927), 'os.path.join', 'os.path.join', (['d.workDir', '"""logs"""'], {}), "(d.workDir, 'logs')\n", (4908, 4927), False, 'import os, pdb, sys\n'), ((5094, 5121), 'logging.StreamHandler', 'L.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (5109, 5121), True, 'import logging as L\n'), ((5261, 5274), 'logging.getLogger', 'L.getLogger', ([], {}), '()\n', (5272, 5274), True, 'import logging as L\n'), ((5426, 5470), 'os.path.join', 'os.path.join', (['d.workDir', '"""logs"""', '"""train.txt"""'], {}), "(d.workDir, 'logs', 'train.txt')\n", (5438, 5470), False, 'import os, pdb, sys\n'), ((5493, 5550), 'logging.FileHandler', 'L.FileHandler', (['trainLogFilename', '"""a"""', '"""UTF-8"""'], {'delay': '(True)'}), "(trainLogFilename, 'a', 'UTF-8', delay=True)\n", (5506, 5550), True, 'import logging as L\n'), ((5692, 5712), 'logging.getLogger', 'L.getLogger', (['"""train"""'], {}), "('train')\n", (5703, 5712), True, 'import logging as L\n'), ((5863, 5907), 'os.path.join', 'os.path.join', (['d.workDir', '"""logs"""', '"""entry.txt"""'], {}), "(d.workDir, 'logs', 'entry.txt')\n", (5875, 5907), False, 'import os, pdb, sys\n'), ((5930, 5987), 'logging.FileHandler', 'L.FileHandler', (['entryLogFilename', '"""a"""', '"""UTF-8"""'], {'delay': '(True)'}), "(entryLogFilename, 'a', 'UTF-8', delay=True)\n", (5943, 5987), True, 'import logging as L\n'), ((6129, 6149), 'logging.getLogger', 'L.getLogger', (['"""entry"""'], {}), "('entry')\n", (6140, 6149), True, 'import logging as L\n'), ((6437, 6458), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (6456, 6458), True, 'import numpy as np\n'), ((6928, 6954), 'numpy.random.set_state', 'np.random.set_state', (['state'], {}), '(state)\n', (6947, 6954), True, 'import numpy as np\n'), ((4833, 4857), 'os.path.isdir', 'os.path.isdir', (['d.workDir'], {}), '(d.workDir)\n', (4846, 4857), False, 'import os, pdb, sys\n'), ((4862, 4881), 'os.mkdir', 'os.mkdir', (['d.workDir'], {}), '(d.workDir)\n', (4870, 4881), False, 'import os, pdb, sys\n'), ((4937, 4958), 'os.path.isdir', 'os.path.isdir', (['logDir'], {}), '(logDir)\n', (4950, 4958), False, 'import os, pdb, sys\n'), ((4963, 4979), 'os.mkdir', 'os.mkdir', (['logDir'], {}), '(logDir)\n', (4971, 4979), False, 'import os, pdb, sys\n'), ((6986, 7001), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (6999, 7001), False, 'import os, pdb, sys\n'), ((7004, 7026), 'expmt.getExperiment', 'expmt.getExperiment', (['d'], {}), '(d)\n', (7023, 7026), False, 'import expmt\n')] |
"""
main module used for running the inference on simple network sim
"""
from __future__ import annotations
import datetime as dt
import logging.config
import sys
import time
from abc import ABC, abstractmethod
from typing import Tuple, List, Dict, Type, ClassVar, Union
import numpy as np
import pandas as pd
import scipy.stats as stats
from data_pipeline_api import standard_api
from more_itertools import pairwise
from . import loaders, common
from . import network_of_populations as ss
from . import sampleUseOfModel as sm
sys.path.append('..')
logger = logging.getLogger(__name__)
def uniform_pdf(
x: Union[float, np.array],
a: Union[float, np.array],
b: Union[float, np.array]
) -> Union[float, np.array]:
"""pdf function for uniform distribution
:param x: value at which to evaluate the pdf
:param a: lower bound of the distribution
:param b: upper bound of the distribution
"""
return ((a <= x) & (x <= b)) / (b - a)
def lognormal(mean: float, stddev: float, stddev_min: float = -np.inf) -> stats.rv_continuous:
"""Constructs Scipy lognormal object to match a given mean and std
dev passed as input. The parameters to input in the model are inverted
from the formulas:
.. math::
if X~LogNormal(mu, scale)
then:
E[X] = exp{mu + sigma^2 * 0.5}
Var[X] = (exp{sigma^2} - 1) * exp{2 * mu + sigma^2}
The stddev is taken as a % of the mean, floored at 10. This
allows natural scaling with the size of the population inside the
nodes, always allowing for a minimal uncertainty.
:param mean: Mean to match
:param stddev: Std dev to match
:param stddev_min: Minimum std dev to match
:return: Distribution object representing a lognormal distribution with
the given mean and std dev
"""
stddev = np.maximum(mean * stddev, stddev_min)
sigma = np.sqrt(np.log(1 + (stddev**2 / mean**2)))
mu = np.log(mean / np.sqrt(1 + (stddev**2 / mean**2)))
return stats.lognorm(s=sigma, loc=0., scale=np.exp(mu))
def split_dataframe(multipliers, partitions, col="Contact_Multiplier"):
df = multipliers.copy()
df.Date = pd.to_datetime(df.Date)
for prev_date, curr_date in pairwise(partitions):
index = (df.Date.dt.date < curr_date) & (df.Date.dt.date >= prev_date)
values = df.loc[index, col].values
if len(values) == 0:
continue
yield values[0], index
class InferredVariable(ABC):
"""
Abstract class representing a variable to infer in ABC-SMC. To be inferred,
we require from a parameter to:
- Sample and retrieve pdf from prior
- Perturb the parameter and get the perturbation pdf
- Validate if the parameter is correct
- Convert to frame to be initialize and run the model
"""
value: pd.DataFrame
def __init__(self, value: pd.DataFrame, mean: pd.DataFrame):
self.value = value
self.mean = mean
@staticmethod
@abstractmethod
def generate_from_prior(fitter: ABCSMC) -> InferredVariable:
""" Abstract method for generating a parameter from the prior """
@abstractmethod
def generate_perturbated(self) -> InferredVariable:
""" Abstract method for generating a perturbated copy """
@abstractmethod
def validate(self) -> bool:
""" Abstract method for validating the parameter correctness """
@abstractmethod
def prior_pdf(self) -> float:
""" Abstract method for generating the prior pdf """
@abstractmethod
def perturbation_pdf(self, x: pd.DataFrame) -> float:
""" Abstract method for generating the perturbation pdf """
class InferredInfectionProbability(InferredVariable):
"""
Class representing inferred infection probability to be used inside ABC-SMC fitter.
Infection probability is the odd that a contact between an infectious and susceptible
person leads to a new infection.
"""
# pylint: disable=too-many-arguments
def __init__(
self,
value: pd.DataFrame,
mean: pd.DataFrame,
shape: float,
kernel_sigma: float,
rng: np.random.Generator
):
super().__init__(value, mean)
self.shape = shape
self.kernel_sigma = kernel_sigma
self.rng = rng
@staticmethod
def generate_from_prior(fitter: ABCSMC) -> InferredInfectionProbability:
""" Sample from prior distribution. For infection probability, we use
the value in the data pipeline as mean of our prior, and shape parameter
is fixed at 2. This defines a Beta distribution centered around our prior.
:param fitter: ABC-SMC fitter object
:return: New InferredInitialInfections randomly sampled from prior
"""
shape = fitter.infection_probability_shape
sigma = fitter.infection_probability_kernel_sigma
mean = fitter.infection_probability
value = fitter.infection_probability.copy()
value.Value = fitter.rng.beta(shape, shape * (1 - mean.Value) / mean.Value)
return InferredInfectionProbability(value, mean, shape, sigma, fitter.rng)
def generate_perturbated(self) -> InferredInfectionProbability:
""" From current parameter, add a perturbation to infection probability and return
a newly created perturbated parameter:
.. math::
P_t^* \sim K(P_t | P_{t-1}) \sim Uniform(max(P_{t-1} - \sigma, 0),\min(\sigma + P_{t-1}, 1))
A uniform perturbation of range 2 * kernel_sigma is targeted, with a floor at 0 so
that the infection probability remains valid. This is correct as the algorithm
states that the particle should be re-sampled as long as the perturbation brings it
out of bounds, and the truncation of a uniform distribution is still a uniform
distribution.
:return: New parameter which is similar to self up to a perturbation
"""
sigma = self.kernel_sigma
value = self.value.copy()
value.Value = self.rng.uniform(np.maximum(value.Value - sigma, 0), np.minimum(value.Value + sigma, 1.))
return InferredInfectionProbability(value, self.mean, self.shape, sigma, self.rng)
def validate(self) -> bool:
""" Checks that the particle is valid, i.e. that infection probability is
between 0 and 1.
:return: Whether the parameter is valid
"""
return np.all(self.value.Value > 0.) and np.all(self.value.Value < 1.)
def prior_pdf(self) -> np.ndarray:
""" Compute pdf of the prior distribution evaluated at the parameter x.
Infection probability has a prior a beta distribution. The pdf is evaluated
at the current value of the parameter.
:return: pdf value of prior distribution evaluated at x
"""
return np.prod(stats.beta.pdf(self.value.Value, self.shape,
self.shape * (1 - self.mean.Value) / self.mean.Value))
def perturbation_pdf(self, x: pd.DataFrame) -> np.ndarray:
""" Compute pdf of the perturbation evaluated at the parameter x,
from the current parameter. In ABC-SMC when a particle is sampled
from the previous population it is slightly perturbed:
.. math::
P_t^* \sim K(P_t | P_{t-1}) \sim Uniform(max(P_{t-1} - \sigma, 0),\min(\sigma + P_{t-1}, 1))
Given in the sampling the perturbation was capped and floored in [0, 1]
to keep the particle valid, it results in a truncated uniform
distribution which is reflected in the pdf.
:param x: Particle to evaluate the pdf at
:return: pdf value of perturbation from previous particle evaluated at x
"""
return np.prod(uniform_pdf(x.Value, np.maximum(self.value.Value - self.kernel_sigma, 0),
np.minimum(self.value.Value + self.kernel_sigma, 1)))
class InferredInitialInfections(InferredVariable):
"""
Class representing inferred initial infections to be used inside ABC-SMC fitter.
Initial infections are the number of exposed individuals per node at the start
date of the model.
"""
# pylint: disable=too-many-arguments
def __init__(
self,
value: pd.DataFrame,
mean: pd.DataFrame,
stddev: float,
stddev_min: float,
kernel_sigma: float,
rng: np.random.Generator
):
super().__init__(value, mean)
self.stddev = stddev
self.stddev_min = stddev_min
self.kernel_sigma = kernel_sigma
self.rng = rng
@staticmethod
def generate_from_prior(fitter: ABCSMC) -> InferredInitialInfections:
""" Sample from prior distribution. For initial infections, we use
the values in the data pipeline as mean of our priors, and the std dev
is taken as a percentage of the mean. This allow the prior to scale
the uncertainty with the scale of the prior itself.
:param fitter: ABC-SMC fitter object
:return: New InferredInitialInfections randomly sampled from prior
"""
stddev = fitter.initial_infections_stddev
stddev_min = fitter.initial_infections_stddev_min
sigma = fitter.initial_infections_kernel_sigma
value = fitter.initial_infections.copy()
value.Infected = lognormal(value.Infected, stddev, stddev_min).rvs(random_state=fitter.rng)
return InferredInitialInfections(value, fitter.initial_infections, stddev, stddev_min, sigma, fitter.rng)
def generate_perturbated(self) -> InferredInitialInfections:
""" From current table, add a perturbation to every parameter and return
a newly created perturbated particle. For initial infections, we apply a uniform
perturbation from the current value:
.. math::
P_t^* \sim K(P_t | P_{t-1}) \sim Uniform(\max(P_{t-1} - \sigma, 0),P_{t-1} + \sigma)
A uniform perturbation of range 2 * kernel_sigma is targeted, with a floor at 0 so
that the infection probability remains valid. This is correct as the algorithm
states that the particle should be re-sampled as long as the perturbation brings it
out of bounds, and the truncation of a uniform distribution is still a uniform
distribution.
:return: New parameter which is similar to self up to a perturbation
"""
sigma = self.kernel_sigma
value = self.value.copy()
value.Infected = self.rng.uniform(np.maximum(value.Infected - sigma, 0.), value.Infected + sigma)
return InferredInitialInfections(value, self.mean, self.stddev, self.stddev_min, self.kernel_sigma, self.rng)
def validate(self) -> bool:
""" Checks that the particle is valid, i.e. that all initial infections are
positive or 0.
:return: Whether the particle is valid
"""
return np.all(self.value.Infected >= 0.)
def prior_pdf(self) -> float:
""" Compute pdf of the prior distribution evaluated at the parameter x.
As all priors are independent the joint pdf is the product of individual pdfs.
The pdf is evaluated at the current value of the parameter.
:return: pdf value of prior distribution evaluated at x
"""
pdf = 1.
for index, row in self.mean.iterrows():
pdf *= lognormal(row.Infected, self.stddev, self.stddev_min).pdf(self.value.at[index, "Infected"])
return pdf
def perturbation_pdf(self, x: pd.DataFrame) -> float:
""" Compute pdf of the perturbation evaluated at the parameter ``x``,
from the current parameter. In ABC-SMC when a particle is sampled
from the previous population it is slightly perturbed:
.. math::
P_t^* \sim K(P_t | P_{t-1}) \sim Uniform(\max(P_{t-1} - \sigma, 0),P_{t-1} + \sigma)
As all perturbations are independent the joint pdf is the product
of individual pdfs.
:param x: Particle to evaluate the pdf at
:return: pdf value of perturbation from previous particle evaluated at x
"""
pdf = 1.
for index, row in self.value.iterrows():
pdf *= uniform_pdf(x.at[index, "Infected"], max(row.Infected - self.kernel_sigma, 0.),
row.Infected + self.kernel_sigma)
return pdf
class InferredContactMultipliers(InferredVariable):
"""
Class representing inferred contact multipliers to be used inside ABC-SMC fitter.
Contact multipliers are adjustment numbers by which we adjust the contact to
simulate the effect of quarantine and social distancing.
"""
# pylint: disable=too-many-arguments
def __init__(
self,
value: pd.DataFrame,
mean: pd.DataFrame,
stddev: float,
kernel_sigma: float,
partitions: List[dt.date],
rng: np.random.Generator
):
super().__init__(value, mean)
self.stddev = stddev
self.rng = rng
self.kernel_sigma = kernel_sigma
self.partitions = partitions
@staticmethod
def generate_from_prior(fitter: ABCSMC) -> InferredContactMultipliers:
""" Sample from prior distribution. For contact multipliers, we use
the values in the data pipeline as mean of our priors, and the std dev
is taken as a fixed number. We use a lognormal prior.
:param fitter: ABC-SMC fitter object
:return: New InferredInitialInfections randomly sampled from prior
"""
stddev = fitter.contact_multipliers_stddev
sigma = fitter.contact_multipliers_kernel_sigma
partitions = fitter.contact_multipliers_partitions
value = fitter.movement_multipliers.copy()
for multiplier, index in split_dataframe(value, partitions):
value.loc[index, "Contact_Multiplier"] = lognormal(multiplier, stddev).rvs(random_state=fitter.rng)
return InferredContactMultipliers(value, fitter.movement_multipliers, stddev, sigma, partitions, fitter.rng)
def generate_perturbated(self) -> InferredContactMultipliers:
""" From current table, add a perturbation to every parameter and return
a newly created perturbated particle. For contact multipliers, we apply a uniform
perturbation from the current value:
.. math::
P_t^* \sim K(P_t | P_{t-1}) \sim Uniform(\max(P_{t-1} - \sigma, 0),P_{t-1} + \sigma)
A uniform perturbation of range 2 * kernel_sigma is targeted, with a floor at 0 so
that the infection probability remains valid. This is correct as the algorithm
states that the particle should be re-sampled as long as the perturbation brings it
out of bounds, and the truncation of a uniform distribution is still a uniform
distribution.
:return: New parameter which is similar to self up to a perturbation
"""
value = self.value.copy()
for multiplier, index in split_dataframe(value, self.partitions):
value.loc[index, "Contact_Multiplier"] = self.rng.uniform(np.maximum(multiplier - self.kernel_sigma, 0.),
multiplier + self.kernel_sigma)
return InferredContactMultipliers(value, self.mean, self.stddev, self.kernel_sigma, self.partitions, self.rng)
def validate(self) -> bool:
""" Checks that the particle is valid, i.e. that all contact multipliers are
strictly positive 0.
:return: Whether the particle is valid
"""
return np.all(self.value.Contact_Multiplier > 0.)
def prior_pdf(self) -> float:
""" Compute pdf of the prior distribution evaluated at the parameter x.
As all priors are independent the joint pdf is the product of individual pdfs.
The pdf is evaluated at the current value of the parameter.
:return: pdf value of prior distribution evaluated at x
"""
pdf = 1.
for multiplier, index in split_dataframe(self.value, self.partitions):
mean_x = self.mean.loc[index, "Contact_Multiplier"].values[0]
pdf *= lognormal(mean_x, self.stddev).pdf(multiplier)
return pdf
def perturbation_pdf(self, x: pd.DataFrame) -> float:
""" Compute pdf of the perturbation evaluated at the parameter ``x``,
from the current parameter. In ABC-SMC when a particle is sampled
from the previous population it is slightly perturbed:
.. math::
P_t^* \sim K(P_t | P_{t-1}) \sim P_{t-1} + \sigma * Uniform([-1,1])
As all perturbations are independent the joint pdf is the product
of individual pdfs.
:param x: Particle to evaluate the pdf at
:return: pdf value of perturbation from previous particle evaluated at x
"""
pdf = 1.
for multiplier, index in split_dataframe(self.value, self.partitions):
curr_x = x.loc[index, "Contact_Multiplier"].values[0]
pdf *= uniform_pdf(curr_x, max(multiplier - self.kernel_sigma, 0.), multiplier + self.kernel_sigma)
return pdf
class Particle:
"""
Class representing a particle, a collection of parameters that will be
sampled and eventually inferred. To be used within ABC-SMC, a particle
must satisfy the following requirements:
1) We can sample from the priors and get the prior pdf
2) We should be able to generate a perturbated particle
"""
inferred_variables_classes: ClassVar[Dict[str, Type[InferredVariable]]] = {
"infection-probability": InferredInfectionProbability,
"initial-infections": InferredInitialInfections,
"contact-multipliers": InferredContactMultipliers
}
def __init__(self, inferred_variables: Dict[str, InferredVariable]):
self.inferred_variables = inferred_variables
@staticmethod
def generate_from_priors(fitter: ABCSMC) -> Particle:
""" Generate a particle from the prior distribution. All parameters in the
particle are independent and therefore we simply random select each parameter
from its prior distribution. The fitter object provides the random state used
for random variable generation but also the prior parameters, which are
the values in the data pipeline.
:return: New particle generated from prior
"""
return Particle({variable_name: variable_class.generate_from_prior(fitter)
for variable_name, variable_class in Particle.inferred_variables_classes.items()})
def generate_perturbated(self) -> Particle:
""" From current particle, add a perturbation to every parameter and return
a newly created perturbated particle.
:return: New particle which is similar to self up to a perturbation
"""
perturbed_variables = {}
for name, inferred_variable in self.inferred_variables.items():
perturbed_variables[name] = inferred_variable.generate_perturbated()
return Particle(perturbed_variables)
def validate_particle(self) -> bool:
""" Checks that the particle is valid, simply checks that all parameters inside
respect their supports, i.e. the values are attainable. It may return False for
example if the uniform perturbation pushed a lognormally-distribution value
below 0.
:return: Whether the particle is valid
"""
return all(variable.validate() for variable in self.inferred_variables.values())
@staticmethod
def resample_and_perturbate(
particles: List[Particle],
weights: List[float],
rng: np.random.Generator
) -> Particle:
""" Resampling part of ABC-SMC, selects randomly a new particle from the
list of previously accepted. Then perturb it slightly. This causes
the persistence and selection of fit particles in a manner similar to
evolution algorithms. The perturbation is done until the candidate is valid,
this is because the perturbation is unaware of the support of the distribution
of the particle.
:param particles: List of particles from previous population
:param weights: List of weights of particles from previous population
:param rng: Random state for random sampling into particles
:return: Newly created particles sampled from previous population and perturbated
"""
while True:
particle = rng.choice(particles, p=weights / np.sum(weights))
particle = particle.generate_perturbated()
if particle.validate_particle():
return particle
def prior_pdf(self) -> float:
""" Compute pdf of the prior distribution evaluated at the particle x.
As all priors are independent the joint pdf is the product
of individual pdfs. The pdf is evaluated at the current particle.
:return: pdf value of prior distribution evaluated at x
"""
pdf = 1.
for key in self.inferred_variables:
pdf *= self.inferred_variables[key].prior_pdf()
return pdf
def perturbation_pdf(self, x: Particle) -> float:
""" Compute pdf of the perturbation evaluated at the particle x,
from the current particle. In ABC-SMC when a particle is sampled
from the previous population it is slightly perturbed:
.. math::
P_t^* \sim K(P_t | P_{t-1})
(Usually a uniform perturbation around the previous value).
As all perturbations are independent the joint pdf is the product
of individual pdfs.
:param x: Particle to evaluate the pdf at
:return: pdf value of perturbation from previous particle evaluated at x
"""
pdf = 1.
for key in self.inferred_variables:
pdf *= self.inferred_variables[key].perturbation_pdf(x.inferred_variables[key].value)
return pdf
# pylint: disable=too-many-instance-attributes
class ABCSMC:
"""
Class to wrap inference routines for the ABC SMC inference fitting. This algorithm
provides a list of samples distribution with the posterior pdf of parameters
given the data. This class is fairly tightly coupled to the simple network
sim network, sacrificing abstraction for speed and clarity. We rely on the
``Particle`` class in which all the abstraction about parameters reside.
Algorithm (briefly):
Set parameters ``smc_iteration``, ``n_particles``, ``threshold``
For iteration in smc_iterations:
While accepted_particles < n_particles:
p = sample randomly chosen particle from accepted at previous iteration
p = perturb p Using uniform perturbation
distance = run model with particle p
if distance < threshold:
Particle is accepted for the current population
Compute weight for current particle
References:
https://royalsocietypublishing.org/doi/pdf/10.1098/rsif.2008.0172
https://en.wikipedia.org/wiki/Approximate_Bayesian_computation
https://pyabc.readthedocs.io/en/latest/index.html
"""
# pylint: disable=too-many-arguments
def __init__(
self,
parameters: pd.DataFrame,
historical_deaths: pd.DataFrame,
compartment_transition_table: pd.DataFrame,
population_table: pd.DataFrame,
commutes_table: pd.DataFrame,
mixing_matrix_table: pd.DataFrame,
infection_probability: pd.DataFrame,
initial_infections: pd.DataFrame,
infectious_states: pd.DataFrame,
trials: pd.DataFrame,
start_end_date: pd.DataFrame,
movement_multipliers: pd.DataFrame,
stochastic_mode: pd.DataFrame,
random_seed: pd.DataFrame
):
self.historical_deaths = loaders.readHistoricalDeaths(historical_deaths)
parameters = loaders.readABCSMCParameters(parameters)
self.n_smc_steps = parameters["n_smc_steps"]
self.n_particles = parameters["n_particles"]
self.infection_probability_shape = parameters["infection_probability_shape"]
self.infection_probability_kernel_sigma = parameters["infection_probability_kernel_sigma"]
self.initial_infections_stddev = parameters["initial_infections_stddev"]
self.initial_infections_stddev_min = parameters["initial_infections_stddev_min"]
self.initial_infections_kernel_sigma = parameters["initial_infections_kernel_sigma"]
self.contact_multipliers_stddev = parameters["contact_multipliers_stddev"]
self.contact_multipliers_kernel_sigma = parameters["contact_multipliers_kernel_sigma"]
self.contact_multipliers_partitions = parameters["contact_multipliers_partitions"]
assert self.n_smc_steps > 0
assert self.n_particles > 0
assert self.infection_probability_shape > 0.
assert self.infection_probability_kernel_sigma > 0.
assert self.initial_infections_stddev > 0.
assert self.initial_infections_stddev_min > 0.
assert self.initial_infections_kernel_sigma > 0.
assert self.contact_multipliers_stddev > 0
assert self.contact_multipliers_kernel_sigma > 0
self.compartment_transition_table = compartment_transition_table
self.population_table = population_table
self.commutes_table = commutes_table
self.mixing_matrix_table = mixing_matrix_table
self.infection_probability = infection_probability
self.initial_infections = initial_infections
self.infectious_states = infectious_states
self.trials = trials
self.start_end_date = start_end_date
self.movement_multipliers = movement_multipliers
self.stochastic_mode = stochastic_mode
self.random_seed = random_seed
self.rng = np.random.default_rng(loaders.readRandomSeed(random_seed))
assert trials.at[0, "Value"] == 1, "Only one trial should be used for both stochastic and deterministic mode"
assert len(infection_probability) == 1, "Only one infection probability is allowed"
self.threshold = np.inf
self.fit_statistics: Dict[int] = {}
def fit(self) -> Tuple[List[Particle], List[float], List[float]]:
""" Performs ABC-SMC iterative procedure for finding posterior distributions
of model parameters given priors and data. In brief, iteratively samples
particles, keeping at each round only ones with good fitting criteria.
References:
https://royalsocietypublishing.org/doi/pdf/10.1098/rsif.2008.0172
https://en.wikipedia.org/wiki/Approximate_Bayesian_computation
:return: List of particles selected at the end of the procedure, and weights
associated
"""
prev_particles: List[Particle] = []
prev_weights: List[float] = []
distances: List[float] = []
for smc_step in range(self.n_smc_steps):
logger.info("SMC step %d/%d", smc_step + 1, self.n_smc_steps)
prev_particles, prev_weights, distances = self.sample_particles(smc_step, prev_particles, prev_weights)
self.update_threshold(distances)
return prev_particles, prev_weights, distances
def update_threshold(self, distances: List[float]):
""" Updates threshold using distances found on previous round.
The median is used as a criterion for next round.
:param distances: List of distances found for particles in previous round
:return: New threshold to use for next round, median of distances from previous round
"""
self.threshold = np.percentile(distances, 50)
def sample_particles(
self,
smc_step: int,
prev_particles: List[Particle],
prev_weights: List[float]
) -> Tuple[List[Particle], List[float], List[float]]:
""" Internal single iteration of ABC-SMC. Sampling particles (from prior or previous accepted ones),
until enough particles pass a goodness-of-fit threshold.
:param smc_step: ABC-SMC iteration number
:param prev_particles: List of particles accepted on previous round
:param prev_weights: List of particles weights accepted on previous round
:return: List of particles and weights accepted at current ABC-SMC round
"""
t0 = time.time()
particles = []
weights = []
distances = []
particles_accepted = 0
particles_simulated = 0
while particles_accepted < self.n_particles:
if smc_step == 0:
particle = Particle.generate_from_priors(self)
else:
particle = Particle.resample_and_perturbate(prev_particles, prev_weights, self.rng)
result = self.run_model(particle)
distance = self.compute_distance(result)
if distance <= self.threshold:
logger.info("Particle accepted with distance %d", distance)
weight = ABCSMC.compute_weight(smc_step, prev_particles, prev_weights, particle)
particles.append(particle)
weights.append(weight)
distances.append(distance)
particles_accepted += 1
particles_simulated += 1
logger.info("Particles accepted %d/%d", particles_accepted, particles_simulated)
self.add_iteration_statistics(smc_step, particles, weights, particles_simulated, distances, t0)
return particles, weights, distances
def run_model(self, particle: Particle) -> pd.DataFrame:
""" Run models using current particle as parameters.
:param particle: Particle under consideration
:return: Model run results for current particle
"""
network, issues = ss.createNetworkOfPopulation(
self.compartment_transition_table,
self.population_table,
self.commutes_table,
self.mixing_matrix_table,
self.infectious_states,
particle.inferred_variables["infection-probability"].value,
particle.inferred_variables["initial-infections"].value,
self.trials,
self.start_end_date,
particle.inferred_variables["contact-multipliers"].value,
self.stochastic_mode,
)
random_seed = loaders.readRandomSeed(self.random_seed)
results = sm.runSimulation(network, random_seed, issues=issues)
if issues:
logger.warning("We had %s issues when running the model:", len(issues))
for issue in issues:
logger.warning("%s (severity: %s)", issue.description, issue.severity)
aggregated = sm.aggregateResults(results)
return aggregated.output
def compute_distance(self, result: pd.DataFrame) -> float:
""" Computes distance between target and model run with current particle.
For dynamical systems such as epidemiological models, the distance generally
used is the root mean squared distance between model and historical outputs.
e.g. for a dynamical system which outputs `y(t)` (number of deaths, number
of infected, etc):
.. math::
\sqrt{\sum_{t=1,...,T} ( y_{model}(t) - y_{reality}(t) )^2}
In our case, `y(t)` is the number of deaths per node and per week.
:param result: Model run results
:return: distance value between model run and target
"""
result_by_node = (
result
.query("state == 'D'")
.groupby(["date", "node"])
.sum()
.reset_index()
.assign(date=lambda x: pd.to_datetime(x.date))
.pivot(index="date", columns="node", values="mean")
.diff()
.resample('7D').sum()
)
distance = (result_by_node - self.historical_deaths)**2
distance = np.sqrt(distance.sum().sum() / distance.count().sum())
return distance
@staticmethod
def compute_weight(
smc_step: int,
particles: List[Particle],
weights: List[float],
particle: Particle
) -> float:
""" Compute weights of particle as per the ABC-SMC algorithm.
As per the reference article [#tonistumpf]_, the weights are
updated as per the formula:
.. math::
w_t^i = \frac{\PI(\Theta_t^i)}{\sum_{j=1}^N w_{t-1}^j K(\Theta_t^{j-1}, \Theta_t^{j})}
.. [#tonistumpf] Toni, Tina, and <NAME>.
“Simulation-Based Model Selection for Dynamical
Systems in Systems and Population Biology”.
Bioinformatics 26, no. 1, 104–10, 2010.
doi:10.1093/bioinformatics/btp619.
:param smc_step: Step number of ABC-SMC algorithm
:param particles: List of accepted particles in the previous run
:param weights: List of weights of accepted particles in the previous run
:param particle: Particle under consideration
:return: Weight of the particle under consideration
"""
if smc_step == 0:
return 1.
num = particle.prior_pdf()
denom = sum(weights[i] * p.perturbation_pdf(particle) for i, p in enumerate(particles))
return num / denom
# pylint: disable=too-many-arguments
def add_iteration_statistics(
self,
smc_step: int,
particles: List[Particle],
weights: List[float],
particles_simulated: int,
distances: List[float],
t0: float
):
""" Log statistics of each iteration of ABC-SMC algorithm
:param smc_step: Step number of ABC-SMC algorithm
:param particles: Accepted particles
:param weights: Weights of accepted particles
:param particles_simulated: Number of accepted particles
:param distances: List of distances generated by accepted particles
:param t0: Time at start of iteration
"""
self.fit_statistics.setdefault(smc_step, {})
self.fit_statistics[smc_step]["particles"] = particles
self.fit_statistics[smc_step]["weights"] = weights
self.fit_statistics[smc_step]["particles_accepted"] = len(particles)
self.fit_statistics[smc_step]["particles_simulated"] = particles_simulated
self.fit_statistics[smc_step]["distances"] = distances
self.fit_statistics[smc_step]["threshold"] = self.threshold
self.fit_statistics[smc_step]["time"] = f"{time.time() - t0:.0f}s"
def summarize(self, particles: List[Particle], weights: List[float], distances: List[float], t0: float) -> Dict:
""" Summarize ABC-SMC run, by assembling all fit statistics
and final list of particles into a dictionary.
:param particles: Accepted particles
:param weights: Weights of accepted particles
:param distances: Distances of accepted particles
:param t0: Time just before fit started
"""
results = {
"fit_statistics": self.fit_statistics,
"particles": particles,
"weights": weights,
"distances": distances,
"best_particle": particles[int(np.argmin(distances))],
"best_distance": distances[int(np.argmin(distances))],
"time": time.time() - t0
}
return results
def run_inference(config) -> Dict:
"""Run inference routine
:param config: Config file name
:type config: string
:return: Result runs for inference
"""
info = common.get_repo_info()
with standard_api.StandardAPI.from_config(config, uri=info.uri, git_sha=info.git_sha) as store:
abcsmc = ABCSMC(
store.read_table("human/abcsmc-parameters", "abcsmc-parameters"),
store.read_table("human/historical-deaths", "historical-deaths"),
store.read_table("human/compartment-transition", "compartment-transition"),
store.read_table("human/population", "population"),
store.read_table("human/commutes", "commutes"),
store.read_table("human/mixing-matrix", "mixing-matrix"),
store.read_table("human/infection-probability", "infection-probability"),
store.read_table("human/initial-infections", "initial-infections"),
store.read_table("human/infectious-compartments", "infectious-compartments"),
store.read_table("human/trials", "trials"),
store.read_table("human/start-end-date", "start-end-date"),
store.read_table("human/movement-multipliers", "movement-multipliers"),
store.read_table("human/stochastic-mode", "stochastic-mode"),
store.read_table("human/random-seed", "random-seed"),
)
t0 = time.time()
particles, weights, distances = abcsmc.fit()
summary = abcsmc.summarize(particles, weights, distances, t0)
return summary
def main(argv):
args = sm.build_args(argv)
sm.setup_logger(args)
logger.info("Running inference ABC SMC...")
t0 = time.time()
run_inference("../config_inference.yaml")
logger.info("Writing output")
logger.info("Took %.2fs to run the inference.", time.time() - t0)
if __name__ == "__main__":
logger = logging.getLogger(f"{__package__}.{__name__}")
main(sys.argv[1:])
| [
"sys.path.append",
"numpy.minimum",
"numpy.maximum",
"numpy.log",
"numpy.sum",
"data_pipeline_api.standard_api.StandardAPI.from_config",
"time.time",
"numpy.percentile",
"numpy.argmin",
"pandas.to_datetime",
"numpy.exp",
"more_itertools.pairwise",
"scipy.stats.beta.pdf",
"numpy.all",
"nu... | [((531, 552), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (546, 552), False, 'import sys\n'), ((1854, 1891), 'numpy.maximum', 'np.maximum', (['(mean * stddev)', 'stddev_min'], {}), '(mean * stddev, stddev_min)\n', (1864, 1891), True, 'import numpy as np\n'), ((2182, 2205), 'pandas.to_datetime', 'pd.to_datetime', (['df.Date'], {}), '(df.Date)\n', (2196, 2205), True, 'import pandas as pd\n'), ((2238, 2258), 'more_itertools.pairwise', 'pairwise', (['partitions'], {}), '(partitions)\n', (2246, 2258), False, 'from more_itertools import pairwise\n'), ((37395, 37406), 'time.time', 'time.time', ([], {}), '()\n', (37404, 37406), False, 'import time\n'), ((1912, 1947), 'numpy.log', 'np.log', (['(1 + stddev ** 2 / mean ** 2)'], {}), '(1 + stddev ** 2 / mean ** 2)\n', (1918, 1947), True, 'import numpy as np\n'), ((10983, 11017), 'numpy.all', 'np.all', (['(self.value.Infected >= 0.0)'], {}), '(self.value.Infected >= 0.0)\n', (10989, 11017), True, 'import numpy as np\n'), ((15699, 15742), 'numpy.all', 'np.all', (['(self.value.Contact_Multiplier > 0.0)'], {}), '(self.value.Contact_Multiplier > 0.0)\n', (15705, 15742), True, 'import numpy as np\n'), ((27899, 27927), 'numpy.percentile', 'np.percentile', (['distances', '(50)'], {}), '(distances, 50)\n', (27912, 27927), True, 'import numpy as np\n'), ((28629, 28640), 'time.time', 'time.time', ([], {}), '()\n', (28638, 28640), False, 'import time\n'), ((35921, 36006), 'data_pipeline_api.standard_api.StandardAPI.from_config', 'standard_api.StandardAPI.from_config', (['config'], {'uri': 'info.uri', 'git_sha': 'info.git_sha'}), '(config, uri=info.uri, git_sha=info.git_sha\n )\n', (35957, 36006), False, 'from data_pipeline_api import standard_api\n'), ((37107, 37118), 'time.time', 'time.time', ([], {}), '()\n', (37116, 37118), False, 'import time\n'), ((1970, 2006), 'numpy.sqrt', 'np.sqrt', (['(1 + stddev ** 2 / mean ** 2)'], {}), '(1 + stddev ** 2 / mean ** 2)\n', (1977, 2006), True, 'import numpy as np\n'), ((2054, 2064), 'numpy.exp', 'np.exp', (['mu'], {}), '(mu)\n', (2060, 2064), True, 'import numpy as np\n'), ((6094, 6128), 'numpy.maximum', 'np.maximum', (['(value.Value - sigma)', '(0)'], {}), '(value.Value - sigma, 0)\n', (6104, 6128), True, 'import numpy as np\n'), ((6130, 6166), 'numpy.minimum', 'np.minimum', (['(value.Value + sigma)', '(1.0)'], {}), '(value.Value + sigma, 1.0)\n', (6140, 6166), True, 'import numpy as np\n'), ((6474, 6504), 'numpy.all', 'np.all', (['(self.value.Value > 0.0)'], {}), '(self.value.Value > 0.0)\n', (6480, 6504), True, 'import numpy as np\n'), ((6508, 6538), 'numpy.all', 'np.all', (['(self.value.Value < 1.0)'], {}), '(self.value.Value < 1.0)\n', (6514, 6538), True, 'import numpy as np\n'), ((6889, 6992), 'scipy.stats.beta.pdf', 'stats.beta.pdf', (['self.value.Value', 'self.shape', '(self.shape * (1 - self.mean.Value) / self.mean.Value)'], {}), '(self.value.Value, self.shape, self.shape * (1 - self.mean.\n Value) / self.mean.Value)\n', (6903, 6992), True, 'import scipy.stats as stats\n'), ((10586, 10625), 'numpy.maximum', 'np.maximum', (['(value.Infected - sigma)', '(0.0)'], {}), '(value.Infected - sigma, 0.0)\n', (10596, 10625), True, 'import numpy as np\n'), ((37540, 37551), 'time.time', 'time.time', ([], {}), '()\n', (37549, 37551), False, 'import time\n'), ((7817, 7868), 'numpy.maximum', 'np.maximum', (['(self.value.Value - self.kernel_sigma)', '(0)'], {}), '(self.value.Value - self.kernel_sigma, 0)\n', (7827, 7868), True, 'import numpy as np\n'), ((7905, 7956), 'numpy.minimum', 'np.minimum', (['(self.value.Value + self.kernel_sigma)', '(1)'], {}), '(self.value.Value + self.kernel_sigma, 1)\n', (7915, 7956), True, 'import numpy as np\n'), ((15207, 15254), 'numpy.maximum', 'np.maximum', (['(multiplier - self.kernel_sigma)', '(0.0)'], {}), '(multiplier - self.kernel_sigma, 0.0)\n', (15217, 15254), True, 'import numpy as np\n'), ((35652, 35663), 'time.time', 'time.time', ([], {}), '()\n', (35661, 35663), False, 'import time\n'), ((34840, 34851), 'time.time', 'time.time', ([], {}), '()\n', (34849, 34851), False, 'import time\n'), ((35541, 35561), 'numpy.argmin', 'np.argmin', (['distances'], {}), '(distances)\n', (35550, 35561), True, 'import numpy as np\n'), ((35608, 35628), 'numpy.argmin', 'np.argmin', (['distances'], {}), '(distances)\n', (35617, 35628), True, 'import numpy as np\n'), ((20683, 20698), 'numpy.sum', 'np.sum', (['weights'], {}), '(weights)\n', (20689, 20698), True, 'import numpy as np\n'), ((31952, 31974), 'pandas.to_datetime', 'pd.to_datetime', (['x.date'], {}), '(x.date)\n', (31966, 31974), True, 'import pandas as pd\n')] |
import pandas as pd
import numpy as np
import os
import sys
import pdb
import argparse
'''
Script to find the epoch that gives maximum validation set accuracy.
Used to choose best checkpoint
'''
def main(args):
exp = args.exp_id
log_dir = os.path.join(args.log_root,"exp_{}".format(exp))
df = pd.read_csv(os.path.join(log_dir,'output.log'), delimiter=' ')
epochs = df['Epoch'].values
val_acc = df['Validation_accuracy'].values
best_idx = np.argmax(val_acc)
best_ep = epochs[best_idx]
if 'Validation_accuracy_bias' in df.columns:
val_acc_bias = df['Validation_accuracy_bias'].values[best_idx]
else:
val_acc_bias = 0
print("best epoch for exp = {} is {}".format(exp,best_ep))
print("Val spk acc = {}. Val bias acc = {}".format(df['Validation_accuracy'].values[best_idx], val_acc_bias))
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp_id', type=str, required=True)
parser.add_argument('--log_root', type=str, required=True,
help='Directory where logs are saved')
args = parser.parse_args()
main(args)
| [
"os.path.join",
"argparse.ArgumentParser",
"numpy.argmax"
] | [((466, 484), 'numpy.argmax', 'np.argmax', (['val_acc'], {}), '(val_acc)\n', (475, 484), True, 'import numpy as np\n'), ((888, 913), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (911, 913), False, 'import argparse\n'), ((320, 355), 'os.path.join', 'os.path.join', (['log_dir', '"""output.log"""'], {}), "(log_dir, 'output.log')\n", (332, 355), False, 'import os\n')] |
import numpy as np
import torch
import torch.nn as nn
import os
import copy
import time
from PIL import Image
# images have shape 1280x1024
dims = (1920, 1080)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.layer2 = nn.Sequential(
nn.Conv2d(1, 4, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.BatchNorm2d(4),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.layer3 = nn.Sequential(
nn.Conv2d(4, 4, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.BatchNorm2d(4),
nn.MaxPool2d(kernel_size=4, stride=4)
)
self.layer4 = nn.Sequential(
nn.Conv2d(4, 4, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.BatchNorm2d(4),
nn.MaxPool2d(kernel_size=4, stride=4)
)
self.fc1 = nn.Linear(40 * 32 * 4, 200)
self.fc2 = nn.Linear(200, 20)
self.fc3 = nn.Linear(20, 2)
def forward(self, x):
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return(x)
def getPaths():
ims = []
count = 0
for dir in os.listdir(r"C:/Users/Ethan_H_Laptop/base/programs/python/eye_tracker/data"):
for im in os.listdir(r"C:/Users/Ethan_H_Laptop/base/programs/python/eye_tracker/data/" + dir):
ims.append(r"C:/Users/Ethan_H_Laptop/base/programs/python/eye_tracker/data/" + dir + "/" + im)
return(np.array(ims))
def loadImage(path):
nameLoc = path[-13:-4].split("x")
location = (int(nameLoc[0])/dims[0], int(nameLoc[1])/dims[1])
image = Image.open(path)
data = np.asarray(image)
return(((torch.tensor([[data/255.0]])).to(dtype=torch.float), (torch.tensor([location])).to(dtype=torch.float)))
def evaluateModel(model, testPaths):
model.eval()
errx = 0
erry = 0
for i, path in enumerate(testPaths):
im, label = loadImage(path)
# print("eval", i, label)
output = model(im)
errx += abs(output[0][0].item() - label[0][0].item())
erry += abs(output[0][1].item() - label[0][1].item())
model.train()
return((errx/len(testPaths)*dims[0], erry/len(testPaths)*dims[1]))
def trainModel():
model = Net()
# model.load_state_dict(torch.load(r"C:\Users\Ethan_H_Laptop\base\programs\python\eye_tracker\models\test_all.plt"))
nums_epochs = 10
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.002)
bestModel = model
bestScore = (10000, 10000)
testscores = []
trainscores = []
bigTest = []
bigTrain = []
paths = getPaths()
np.random.shuffle(paths)
trainingSet = paths[:18000]
testSet = paths[18000:]
# trainingSet = paths[:18]
# testSet = paths[18:20]
model.train()
for epoch in range(nums_epochs):
epochStart = time.time()
print("start of epoch {}".format(epoch+1))
np.random.shuffle(trainingSet)
for i, path in enumerate(trainingSet):
im, label = loadImage(path)
# print("train", i, label)
output = model(im)
loss = criterion(output, label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# if (i+1) % 9000 == 0:
# trainSc = evaluateModel(model, trainingSet)
# testSc = evaluateModel(model, testSet)
# if testSc < bestScore:
# bestModel = copy.deepcopy(model)
# bestScore = testSc
# testscores.append(testSc)
# trainscores.append(trainSc)
# print(trainSc)
# print(testSc)
# print("Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}"
# .format(epoch+1, nums_epochs, i+1, len(trainingSet), loss.item()))
testSc = evaluateModel(model, testSet)
if sum(testSc) < sum(bestScore):
bestModel = model
bestScore = testSc
print("Epoch [{}/{}], Loss: {:.4f}, Current Score [x:{:.3f} y:{:.3f}], Best Score [x:{:.3f} y:{:.3f}]"
.format(epoch+1, nums_epochs, loss.item(), testSc[0], testSc[1], bestScore[0], bestScore[1]))
epochEnd = time.time()
print("epoch took {} seconds".format(epochEnd-epochStart))
# bigTest.append(testscores)
# bigTrain.append(trainscores)
finalscore = evaluateModel(model, testSet)
print("-------------------------------------------------------")
print("Final Score: [x:{:.3f} y:{:.3f}], Best Score: [x:{:.3f} y:{:.3f}]"
.format(finalscore[0], finalscore[1], bestScore[0], bestScore[1]))
# print(bigTrain)
# print(bigTest)
# torch.save(bestModel.state_dict(), r"C:\Users\Ethan_H_Laptop\base\programs\python\eye_tracker\models\all_data_full_pass.plt")
start = time.time()
np.random.seed(1)
trainModel()
end = time.time()
print("total time {} seconds".format(end-start))
| [
"torch.nn.MSELoss",
"numpy.random.seed",
"torch.nn.ReLU",
"numpy.asarray",
"torch.nn.Conv2d",
"PIL.Image.open",
"time.time",
"torch.nn.BatchNorm2d",
"numpy.array",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.tensor",
"os.listdir",
"numpy.random.shuffle"
] | [((5235, 5246), 'time.time', 'time.time', ([], {}), '()\n', (5244, 5246), False, 'import time\n'), ((5248, 5265), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (5262, 5265), True, 'import numpy as np\n'), ((5287, 5298), 'time.time', 'time.time', ([], {}), '()\n', (5296, 5298), False, 'import time\n'), ((1373, 1448), 'os.listdir', 'os.listdir', (['"""C:/Users/Ethan_H_Laptop/base/programs/python/eye_tracker/data"""'], {}), "('C:/Users/Ethan_H_Laptop/base/programs/python/eye_tracker/data')\n", (1383, 1448), False, 'import os\n'), ((1675, 1688), 'numpy.array', 'np.array', (['ims'], {}), '(ims)\n', (1683, 1688), True, 'import numpy as np\n'), ((1833, 1849), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (1843, 1849), False, 'from PIL import Image\n'), ((1862, 1879), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1872, 1879), True, 'import numpy as np\n'), ((2652, 2664), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2662, 2664), True, 'import torch.nn as nn\n'), ((2901, 2925), 'numpy.random.shuffle', 'np.random.shuffle', (['paths'], {}), '(paths)\n', (2918, 2925), True, 'import numpy as np\n'), ((953, 980), 'torch.nn.Linear', 'nn.Linear', (['(40 * 32 * 4)', '(200)'], {}), '(40 * 32 * 4, 200)\n', (962, 980), True, 'import torch.nn as nn\n'), ((1001, 1019), 'torch.nn.Linear', 'nn.Linear', (['(200)', '(20)'], {}), '(200, 20)\n', (1010, 1019), True, 'import torch.nn as nn\n'), ((1040, 1056), 'torch.nn.Linear', 'nn.Linear', (['(20)', '(2)'], {}), '(20, 2)\n', (1049, 1056), True, 'import torch.nn as nn\n'), ((1470, 1556), 'os.listdir', 'os.listdir', (["('C:/Users/Ethan_H_Laptop/base/programs/python/eye_tracker/data/' + dir)"], {}), "('C:/Users/Ethan_H_Laptop/base/programs/python/eye_tracker/data/' +\n dir)\n", (1480, 1556), False, 'import os\n'), ((3137, 3148), 'time.time', 'time.time', ([], {}), '()\n', (3146, 3148), False, 'import time\n'), ((3210, 3240), 'numpy.random.shuffle', 'np.random.shuffle', (['trainingSet'], {}), '(trainingSet)\n', (3227, 3240), True, 'import numpy as np\n'), ((4585, 4596), 'time.time', 'time.time', ([], {}), '()\n', (4594, 4596), False, 'import time\n'), ((318, 369), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1)', '(4)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(1, 4, kernel_size=3, stride=1, padding=1)\n', (327, 369), True, 'import torch.nn as nn\n'), ((384, 393), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (391, 393), True, 'import torch.nn as nn\n'), ((408, 425), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(4)'], {}), '(4)\n', (422, 425), True, 'import torch.nn as nn\n'), ((440, 477), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (452, 477), True, 'import torch.nn as nn\n'), ((540, 591), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(4)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(4, 4, kernel_size=3, stride=1, padding=1)\n', (549, 591), True, 'import torch.nn as nn\n'), ((606, 615), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (613, 615), True, 'import torch.nn as nn\n'), ((630, 647), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(4)'], {}), '(4)\n', (644, 647), True, 'import torch.nn as nn\n'), ((662, 699), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(4)', 'stride': '(4)'}), '(kernel_size=4, stride=4)\n', (674, 699), True, 'import torch.nn as nn\n'), ((762, 813), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(4)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(4, 4, kernel_size=3, stride=1, padding=1)\n', (771, 813), True, 'import torch.nn as nn\n'), ((828, 837), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (835, 837), True, 'import torch.nn as nn\n'), ((852, 869), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(4)'], {}), '(4)\n', (866, 869), True, 'import torch.nn as nn\n'), ((884, 921), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(4)', 'stride': '(4)'}), '(kernel_size=4, stride=4)\n', (896, 921), True, 'import torch.nn as nn\n'), ((1894, 1924), 'torch.tensor', 'torch.tensor', (['[[data / 255.0]]'], {}), '([[data / 255.0]])\n', (1906, 1924), False, 'import torch\n'), ((1948, 1972), 'torch.tensor', 'torch.tensor', (['[location]'], {}), '([location])\n', (1960, 1972), False, 'import torch\n')] |
"""Estimates base velocity for A1 robot from accelerometer readings."""
import numpy as np
from filterpy.kalman import KalmanFilter
class A1RobotStateEstimator:
"""Estimates base velocity of A1 robot.
The velocity estimator consists of a state estimator for CoM velocity.
Two sources of information are used:
The integrated reading of accelerometer and the velocity estimation from
contact legs. The readings are fused together using a Kalman Filter.
"""
def __init__(self,
robot,
accelerometer_variance=np.array(
[1.42072319e-05, 1.57958752e-05, 8.75317619e-05]),
sensor_variance=np.array([0.33705298, 0.14858707, 0.68439632]) *
0.03,
initial_variance=0.1):
"""Initiates the velocity estimator.
See filterpy documentation in the link below for more details.
https://filterpy.readthedocs.io/en/latest/kalman/KalmanFilter.html
Args:
robot: the robot class for velocity estimation.
accelerometer_variance: noise estimation for accelerometer reading.
sensor_variance: noise estimation for motor velocity reading.
initial_covariance: covariance estimation of initial state.
"""
self.robot = robot
self.filter = KalmanFilter(dim_x=3, dim_z=3, dim_u=3)
self.filter.x = np.zeros(3)
self._initial_variance = initial_variance
self._accelerometer_variance = accelerometer_variance
self._sensor_variance = sensor_variance
self.filter.P = np.eye(3) * self._initial_variance # State covariance
self.filter.Q = np.eye(3) * accelerometer_variance
self.filter.R = np.eye(3) * sensor_variance
self.filter.H = np.eye(3) # measurement function (y=H*x)
self.filter.F = np.eye(3) # state transition matrix
self.filter.B = np.eye(3)
self.reset()
def reset(self):
self.filter.x = np.zeros(3)
self.filter.P = np.eye(3) * self._initial_variance
self._last_timestamp = 0
self._last_base_velocity_sim = np.zeros(3)
self._estimated_velocity = self.filter.x.copy()
def _compute_delta_time(self, robot_state):
del robot_state # unused
if self._last_timestamp == 0.:
# First timestamp received, return an estimated delta_time.
sim_conf = self.robot.sim_conf
delta_time_s = sim_conf.timestep * sim_conf.action_repeat
else:
delta_time_s = self.robot.time_since_reset - self._last_timestamp
self._last_timestamp = self.robot.time_since_reset
return delta_time_s
def _get_velocity_observation(self):
base_orientation = self.robot.base_orientation_quat
rot_mat = self.robot.pybullet_client.getMatrixFromQuaternion(
base_orientation)
rot_mat = np.array(rot_mat).reshape((3, 3))
observed_velocities = []
foot_contact = self.robot.foot_contacts
for leg_id in range(4):
if foot_contact[leg_id]:
jacobian = self.robot.compute_foot_jacobian(leg_id)
# Only pick the jacobian related to joint motors
joint_velocities = self.robot.motor_velocities[leg_id *
3:(leg_id + 1) * 3]
leg_velocity_in_base_frame = jacobian.dot(joint_velocities)
base_velocity_in_base_frame = -leg_velocity_in_base_frame[:3]
observed_velocities.append(rot_mat.dot(base_velocity_in_base_frame))
return observed_velocities
def update(self, robot_state):
"""Propagate current state estimate with new accelerometer reading."""
delta_time_s = self._compute_delta_time(robot_state)
sensor_acc = np.array(robot_state.imu.accelerometer)
base_orientation = self.robot.base_orientation_quat
rot_mat = self.robot.pybullet_client.getMatrixFromQuaternion(
base_orientation)
rot_mat = np.array(rot_mat).reshape((3, 3))
calibrated_acc = rot_mat.dot(sensor_acc) + np.array([0., 0., -9.8])
self.filter.predict(u=calibrated_acc * delta_time_s)
observed_velocities = self._get_velocity_observation()
if observed_velocities:
observed_velocities = np.mean(observed_velocities, axis=0)
# multiplier = np.clip(
# 1 + (np.sqrt(observed_velocities[0]**2 + \
# observed_velocities[1]**2) -
# 0.3), 1, 1.3)
# observed_velocities[0] *= 1.3
self.filter.update(observed_velocities)
self._estimated_velocity = self.filter.x.copy()
@property
def estimated_velocity(self):
return self._estimated_velocity.copy()
| [
"numpy.zeros",
"numpy.mean",
"numpy.array",
"filterpy.kalman.KalmanFilter",
"numpy.eye"
] | [((552, 610), 'numpy.array', 'np.array', (['[1.42072319e-05, 1.57958752e-05, 8.75317619e-05]'], {}), '([1.42072319e-05, 1.57958752e-05, 8.75317619e-05])\n', (560, 610), True, 'import numpy as np\n'), ((1274, 1313), 'filterpy.kalman.KalmanFilter', 'KalmanFilter', ([], {'dim_x': '(3)', 'dim_z': '(3)', 'dim_u': '(3)'}), '(dim_x=3, dim_z=3, dim_u=3)\n', (1286, 1313), False, 'from filterpy.kalman import KalmanFilter\n'), ((1334, 1345), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1342, 1345), True, 'import numpy as np\n'), ((1693, 1702), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1699, 1702), True, 'import numpy as np\n'), ((1755, 1764), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1761, 1764), True, 'import numpy as np\n'), ((1812, 1821), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1818, 1821), True, 'import numpy as np\n'), ((1879, 1890), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1887, 1890), True, 'import numpy as np\n'), ((2010, 2021), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (2018, 2021), True, 'import numpy as np\n'), ((3569, 3608), 'numpy.array', 'np.array', (['robot_state.imu.accelerometer'], {}), '(robot_state.imu.accelerometer)\n', (3577, 3608), True, 'import numpy as np\n'), ((663, 709), 'numpy.array', 'np.array', (['[0.33705298, 0.14858707, 0.68439632]'], {}), '([0.33705298, 0.14858707, 0.68439632])\n', (671, 709), True, 'import numpy as np\n'), ((1514, 1523), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1520, 1523), True, 'import numpy as np\n'), ((1589, 1598), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1595, 1598), True, 'import numpy as np\n'), ((1644, 1653), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1650, 1653), True, 'import numpy as np\n'), ((1911, 1920), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1917, 1920), True, 'import numpy as np\n'), ((3852, 3878), 'numpy.array', 'np.array', (['[0.0, 0.0, -9.8]'], {}), '([0.0, 0.0, -9.8])\n', (3860, 3878), True, 'import numpy as np\n'), ((4051, 4087), 'numpy.mean', 'np.mean', (['observed_velocities'], {'axis': '(0)'}), '(observed_velocities, axis=0)\n', (4058, 4087), True, 'import numpy as np\n'), ((2716, 2733), 'numpy.array', 'np.array', (['rot_mat'], {}), '(rot_mat)\n', (2724, 2733), True, 'import numpy as np\n'), ((3771, 3788), 'numpy.array', 'np.array', (['rot_mat'], {}), '(rot_mat)\n', (3779, 3788), True, 'import numpy as np\n')] |
#
# Copyright 2021-2022 konawasabi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
'''
import tkinter as tk
from tkinter import ttk
import tkinter.filedialog as filedialog
import tkinter.simpledialog as simpledialog
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import configparser
class BackImgControl():
class BackImgData():
def __init__(self,path):
self.path = path
self.img = Image.open(path)
self.output_data = np.array(self.img)
self.toshow = True
width = self.output_data.shape[1]
height = self.output_data.shape[0]
self.origin = [0,0]
self.shift = [0,0]
self.rotrad = 0
self.alpha = 0.5
self.extent = [0,width,0,-height]
self.scale = 1
def rotate(self,rad):
def rotmatrix(tau1):
'''2次元回転行列を返す。
tau1: 回転角度 [rad]
'''
return np.array([[np.cos(tau1), -np.sin(tau1)], [np.sin(tau1), np.cos(tau1)]])
self.rotrad = rad
self.output_data = np.array(self.img.rotate(-rad,expand=True))
width = np.array(self.img).shape[1]
height = np.array(self.img).shape[0]
shape_orig = np.vstack((0,0))
shape_orig = np.hstack((shape_orig,np.vstack((width,0))))
shape_orig = np.hstack((shape_orig,np.vstack((width,height))))
shape_orig = np.hstack((shape_orig,np.vstack((0,height))))
shape_rot = np.dot(rotmatrix(np.deg2rad(rad)),(shape_orig - np.vstack((self.origin[0],self.origin[1])))*self.scale)
shape_rot = shape_rot + np.vstack((self.shift[0],self.shift[1]))
self.extent = [min(shape_rot[0]),max(shape_rot[0]),min(shape_rot[1]),max(shape_rot[1])]
def show(self,ax,as_ratio=1,ymag=1):
if self.toshow:
self.rotate(self.rotrad)
#as_ratio_mod = (self.extent[1]-self.extent[0])/(self.extent[3]-self.extent[2])*as_ratio
ax.imshow(self.output_data,alpha=self.alpha,extent=[self.extent[0],self.extent[1],self.extent[3],self.extent[2]],aspect=ymag)
def __init__(self,mainwindow):
self.mainwindow = mainwindow
self.imgs = {}
self.conf_path = None
def create_window(self):
self.master = tk.Toplevel(self.mainwindow)
self.mainframe = ttk.Frame(self.master, padding='3 3 3 3')
self.mainframe.columnconfigure(0, weight=1)
self.mainframe.rowconfigure(0, weight=1)
self.mainframe.grid(column=0, row=0, sticky=(tk.N, tk.W, tk.E, tk.S))
self.master.title('Background images')
#self.imglist_val_list = list(self.imgs.keys())
#self.imglist_val = tk.StringVar(value=self.imglist_val_list)
self.imglist_sb = ttk.Treeview(self.mainframe,selectmode='browse',height = 4)
self.imglist_sb.column('#0',width=500)
self.imglist_sb.heading('#0',text='Filepath')
for i in list(self.imgs.keys()):
self.imglist_sb.insert('',tk.END, i, text=i)
self.imglist_sb.grid(column=0, row=0, sticky=(tk.S))
self.imglist_sb.bind('<<TreeviewSelect>>', self.clickimglist)
self.input_frame = ttk.Frame(self.mainframe, padding='3 3 3 3')
self.input_frame.grid(column=0, row=1, sticky=(tk.E,tk.W))
'''
self.xmin_l = ttk.Label(self.input_frame, text='xmin')
self.xmax_l = ttk.Label(self.input_frame, text='xmax')
self.ymin_l = ttk.Label(self.input_frame, text='ymin')
self.ymax_l = ttk.Label(self.input_frame, text='ymax')
'''
self.rot_l = ttk.Label(self.input_frame, text='rotation')
self.alpha_l = ttk.Label(self.input_frame, text='alpha')
self.xo_l = ttk.Label(self.input_frame, text='x0')
self.yo_l = ttk.Label(self.input_frame, text='y0')
self.xsh_l = ttk.Label(self.input_frame, text='xshift')
self.ysh_l = ttk.Label(self.input_frame, text='yshift')
self.scale_l = ttk.Label(self.input_frame, text='scale')
'''
self.xmin_l.grid(column=0, row=0, sticky=(tk.E,tk.W))
self.xmax_l.grid(column=0, row=1, sticky=(tk.E,tk.W))
self.ymin_l.grid(column=2, row=0, sticky=(tk.E,tk.W))
self.ymax_l.grid(column=2, row=1, sticky=(tk.E,tk.W))
'''
self.rot_l.grid(column=0, row=4, sticky=(tk.E,tk.W))
self.alpha_l.grid(column=2, row=4, sticky=(tk.E,tk.W))
self.xo_l.grid(column=0, row=2, sticky=(tk.E,tk.W))
self.yo_l.grid(column=2, row=2, sticky=(tk.E,tk.W))
self.xsh_l.grid(column=0, row=3, sticky=(tk.E,tk.W))
self.ysh_l.grid(column=2, row=3, sticky=(tk.E,tk.W))
self.scale_l.grid(column=0, row=5, sticky=(tk.E,tk.W))
self.extent = [tk.DoubleVar(value=0),tk.DoubleVar(value=0),tk.DoubleVar(value=0),tk.DoubleVar(value=0)]
self.rot_v = tk.DoubleVar(value=0)
self.alpha_v = tk.DoubleVar(value=0)
self.toshow_v = tk.BooleanVar(value=False)
self.origin = [tk.DoubleVar(value=0),tk.DoubleVar(value=0)]
self.shift = [tk.DoubleVar(value=0),tk.DoubleVar(value=0)]
self.scale_v = tk.DoubleVar(value=1)
'''
self.xmin_e = ttk.Entry(self.input_frame, textvariable=self.extent[0],width=5)
self.xmax_e = ttk.Entry(self.input_frame, textvariable=self.extent[1],width=5)
self.ymin_e = ttk.Entry(self.input_frame, textvariable=self.extent[2],width=5)
self.ymax_e = ttk.Entry(self.input_frame, textvariable=self.extent[3],width=5)
'''
self.rot_e = ttk.Entry(self.input_frame, textvariable=self.rot_v,width=5)
self.alpha_e = ttk.Entry(self.input_frame, textvariable=self.alpha_v,width=5)
self.show_chk = ttk.Checkbutton(self.input_frame, text='Show', variable=self.toshow_v)
self.xo_e = ttk.Entry(self.input_frame, textvariable=self.origin[0],width=5)
self.yo_e = ttk.Entry(self.input_frame, textvariable=self.origin[1],width=5)
self.xsh_e = ttk.Entry(self.input_frame, textvariable=self.shift[0],width=5)
self.ysh_e = ttk.Entry(self.input_frame, textvariable=self.shift[1],width=5)
self.scale_e = ttk.Entry(self.input_frame, textvariable=self.scale_v,width=5)
'''
self.xmin_e.grid(column=1, row=0, sticky=(tk.E,tk.W))
self.xmax_e.grid(column=1, row=1, sticky=(tk.E,tk.W))
self.ymin_e.grid(column=3, row=0, sticky=(tk.E,tk.W))
self.ymax_e.grid(column=3, row=1, sticky=(tk.E,tk.W))
'''
self.rot_e.grid(column=1, row=4, sticky=(tk.E,tk.W))
self.alpha_e.grid(column=3, row=4, sticky=(tk.E,tk.W))
self.show_chk.grid(column=3, row=5, sticky=(tk.E,tk.W))
self.xo_e.grid(column=1, row=2, sticky=(tk.E,tk.W))
self.yo_e.grid(column=3, row=2, sticky=(tk.E,tk.W))
self.xsh_e.grid(column=1, row=3, sticky=(tk.E,tk.W))
self.ysh_e.grid(column=3, row=3, sticky=(tk.E,tk.W))
self.scale_e.grid(column=1, row=5, sticky=(tk.E,tk.W))
self.button_frame = ttk.Frame(self.mainframe, padding='3 3 3 3')
self.button_frame.grid(column=0, row=2, sticky=(tk.E,tk.W))
self.button_add = ttk.Button(self.button_frame, text="Add", command=self.newimg)
self.button_add.grid(column=0, row=0, sticky=(tk.S))
self.button_delete = ttk.Button(self.button_frame, text="Delete", command=self.deleteimg)
self.button_delete.grid(column=1, row=0, sticky=(tk.S))
self.button_show = ttk.Button(self.button_frame, text="Refresh", command=self.showimg)
self.button_show.grid(column=2, row=0, sticky=(tk.S))
'''
self.button_close = ttk.Button(self.button_frame, text="Close", command=self.master.destroy)
self.button_close.grid(column=0, row=1, sticky=(tk.S))
'''
self.master.focus_set()
def newimg(self):
inputdir = filedialog.askopenfilename()
if inputdir != '':
self.imgs[inputdir] = self.BackImgData(inputdir)
#self.imgs.show(self.ax)
#self.imglist_val_list.append(inputdir)
#self.imglist_val.set(self.imglist_val_list)
self.imglist_sb.insert('',tk.END, inputdir, text=inputdir)
self.imglist_sb.selection_set(inputdir)
self.mainwindow.drawall()
def deleteimg(self):
selected = str(self.imglist_sb.selection()[0])
del self.imgs[selected]
self.imglist_sb.delete(selected)
self.mainwindow.drawall()
def showimg(self):
selected = str(self.imglist_sb.selection()[0])
for i in [0,1,2,3]:
self.imgs[selected].extent[i] = self.extent[i].get()
self.imgs[selected].alpha = self.alpha_v.get()
self.imgs[selected].toshow = self.toshow_v.get()
self.imgs[selected].scale = self.scale_v.get()
for i in [0,1]:
self.imgs[selected].origin[i] = self.origin[i].get()
self.imgs[selected].shift[i] = self.shift[i].get()
if self.rot_v.get() != self.imgs[selected].rotrad:
self.imgs[selected].rotrad = self.rot_v.get()
#self.imgs[selected].rotate(self.imgs[selected].rotrad)
self.mainwindow.drawall()
def clickimglist(self,event):
if len(self.imglist_sb.selection())>0:
selected = str(self.imglist_sb.selection()[0])
#print('Hi',event,selected)
for i in [0,1,2,3]:
self.extent[i].set(self.imgs[selected].extent[i])
self.rot_v.set(self.imgs[selected].rotrad)
self.alpha_v.set(self.imgs[selected].alpha)
self.toshow_v.set(self.imgs[selected].toshow)
self.scale_v.set(self.imgs[selected].scale)
for i in [0,1]:
self.origin[i].set(self.imgs[selected].origin[i])
self.shift[i].set(self.imgs[selected].shift[i])
def imgsarea(self, extent_input = None):
extent = [0,0,0,0] if extent_input == None else extent_input
for key in list(self.imgs.keys()):
img = self.imgs[key]
extent[0] = img.extent[0] if img.extent[0] < extent[0] else extent[0]
extent[1] = img.extent[1] if img.extent[1] > extent[1] else extent[1]
extent[2] = img.extent[2] if img.extent[2] < extent[2] else extent[2]
extent[3] = img.extent[3] if img.extent[3] > extent[3] else extent[3]
return extent
def save_setting(self,outputpath=None):
if outputpath is None:
outputpath = filedialog.asksaveasfilename()
if outputpath != '':
fp = open(outputpath, 'w')
for imgkey in self.imgs.keys():
fp.writelines('[{:s}]\n'.format(imgkey))
#fp.writelines('file = {:s}\n'.format(imgkey))
fp.writelines('rot = {:f}\n'.format(self.imgs[imgkey].rotrad))
fp.writelines('alpha = {:f}\n'.format(self.imgs[imgkey].alpha))
fp.writelines('scale = {:f}\n'.format(self.imgs[imgkey].scale))
fp.writelines('origin = {:f},{:f}\n'.format(self.imgs[imgkey].origin[0],self.imgs[imgkey].origin[1]))
fp.writelines('shift = {:f},{:f}\n'.format(self.imgs[imgkey].shift[0],self.imgs[imgkey].shift[1]))
fp.writelines('\n')
fp.close()
def load_setting(self,path=None):
if path is None:
path = filedialog.askopenfilename()
conf = configparser.ConfigParser()
conf.read(path)
self.conf_path = path
self.imgs = {}
for sections in conf.sections():
self.imgs[sections]=self.BackImgData(sections)
origin = conf[sections]['origin'].split(',')
self.imgs[sections].origin[0] = float(origin[0])
self.imgs[sections].origin[1] = float(origin[1])
shift = conf[sections]['shift'].split(',')
self.imgs[sections].shift[0] = float(shift[0])
self.imgs[sections].shift[1] = float(shift[1])
self.imgs[sections].rotrad = float(conf[sections]['rot'])
self.imgs[sections].alpha = float(conf[sections]['alpha'])
self.imgs[sections].scale = float(conf[sections]['scale'])
self.mainwindow.drawall()
| [
"tkinter.ttk.Label",
"tkinter.filedialog.asksaveasfilename",
"tkinter.ttk.Entry",
"numpy.deg2rad",
"configparser.ConfigParser",
"tkinter.filedialog.askopenfilename",
"tkinter.ttk.Frame",
"PIL.Image.open",
"tkinter.Toplevel",
"tkinter.BooleanVar",
"numpy.array",
"tkinter.ttk.Treeview",
"tkint... | [((2964, 2992), 'tkinter.Toplevel', 'tk.Toplevel', (['self.mainwindow'], {}), '(self.mainwindow)\n', (2975, 2992), True, 'import tkinter as tk\n'), ((3018, 3059), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.master'], {'padding': '"""3 3 3 3"""'}), "(self.master, padding='3 3 3 3')\n", (3027, 3059), False, 'from tkinter import ttk\n'), ((3469, 3528), 'tkinter.ttk.Treeview', 'ttk.Treeview', (['self.mainframe'], {'selectmode': '"""browse"""', 'height': '(4)'}), "(self.mainframe, selectmode='browse', height=4)\n", (3481, 3528), False, 'from tkinter import ttk\n'), ((3895, 3939), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.mainframe'], {'padding': '"""3 3 3 3"""'}), "(self.mainframe, padding='3 3 3 3')\n", (3904, 3939), False, 'from tkinter import ttk\n'), ((4305, 4349), 'tkinter.ttk.Label', 'ttk.Label', (['self.input_frame'], {'text': '"""rotation"""'}), "(self.input_frame, text='rotation')\n", (4314, 4349), False, 'from tkinter import ttk\n'), ((4373, 4414), 'tkinter.ttk.Label', 'ttk.Label', (['self.input_frame'], {'text': '"""alpha"""'}), "(self.input_frame, text='alpha')\n", (4382, 4414), False, 'from tkinter import ttk\n'), ((4435, 4473), 'tkinter.ttk.Label', 'ttk.Label', (['self.input_frame'], {'text': '"""x0"""'}), "(self.input_frame, text='x0')\n", (4444, 4473), False, 'from tkinter import ttk\n'), ((4494, 4532), 'tkinter.ttk.Label', 'ttk.Label', (['self.input_frame'], {'text': '"""y0"""'}), "(self.input_frame, text='y0')\n", (4503, 4532), False, 'from tkinter import ttk\n'), ((4554, 4596), 'tkinter.ttk.Label', 'ttk.Label', (['self.input_frame'], {'text': '"""xshift"""'}), "(self.input_frame, text='xshift')\n", (4563, 4596), False, 'from tkinter import ttk\n'), ((4618, 4660), 'tkinter.ttk.Label', 'ttk.Label', (['self.input_frame'], {'text': '"""yshift"""'}), "(self.input_frame, text='yshift')\n", (4627, 4660), False, 'from tkinter import ttk\n'), ((4684, 4725), 'tkinter.ttk.Label', 'ttk.Label', (['self.input_frame'], {'text': '"""scale"""'}), "(self.input_frame, text='scale')\n", (4693, 4725), False, 'from tkinter import ttk\n'), ((5570, 5591), 'tkinter.DoubleVar', 'tk.DoubleVar', ([], {'value': '(0)'}), '(value=0)\n', (5582, 5591), True, 'import tkinter as tk\n'), ((5615, 5636), 'tkinter.DoubleVar', 'tk.DoubleVar', ([], {'value': '(0)'}), '(value=0)\n', (5627, 5636), True, 'import tkinter as tk\n'), ((5661, 5687), 'tkinter.BooleanVar', 'tk.BooleanVar', ([], {'value': '(False)'}), '(value=False)\n', (5674, 5687), True, 'import tkinter as tk\n'), ((5846, 5867), 'tkinter.DoubleVar', 'tk.DoubleVar', ([], {'value': '(1)'}), '(value=1)\n', (5858, 5867), True, 'import tkinter as tk\n'), ((6262, 6323), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.input_frame'], {'textvariable': 'self.rot_v', 'width': '(5)'}), '(self.input_frame, textvariable=self.rot_v, width=5)\n', (6271, 6323), False, 'from tkinter import ttk\n'), ((6346, 6409), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.input_frame'], {'textvariable': 'self.alpha_v', 'width': '(5)'}), '(self.input_frame, textvariable=self.alpha_v, width=5)\n', (6355, 6409), False, 'from tkinter import ttk\n'), ((6433, 6503), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.input_frame'], {'text': '"""Show"""', 'variable': 'self.toshow_v'}), "(self.input_frame, text='Show', variable=self.toshow_v)\n", (6448, 6503), False, 'from tkinter import ttk\n'), ((6524, 6589), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.input_frame'], {'textvariable': 'self.origin[0]', 'width': '(5)'}), '(self.input_frame, textvariable=self.origin[0], width=5)\n', (6533, 6589), False, 'from tkinter import ttk\n'), ((6609, 6674), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.input_frame'], {'textvariable': 'self.origin[1]', 'width': '(5)'}), '(self.input_frame, textvariable=self.origin[1], width=5)\n', (6618, 6674), False, 'from tkinter import ttk\n'), ((6695, 6759), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.input_frame'], {'textvariable': 'self.shift[0]', 'width': '(5)'}), '(self.input_frame, textvariable=self.shift[0], width=5)\n', (6704, 6759), False, 'from tkinter import ttk\n'), ((6780, 6844), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.input_frame'], {'textvariable': 'self.shift[1]', 'width': '(5)'}), '(self.input_frame, textvariable=self.shift[1], width=5)\n', (6789, 6844), False, 'from tkinter import ttk\n'), ((6867, 6930), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.input_frame'], {'textvariable': 'self.scale_v', 'width': '(5)'}), '(self.input_frame, textvariable=self.scale_v, width=5)\n', (6876, 6930), False, 'from tkinter import ttk\n'), ((7733, 7777), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.mainframe'], {'padding': '"""3 3 3 3"""'}), "(self.mainframe, padding='3 3 3 3')\n", (7742, 7777), False, 'from tkinter import ttk\n'), ((7872, 7934), 'tkinter.ttk.Button', 'ttk.Button', (['self.button_frame'], {'text': '"""Add"""', 'command': 'self.newimg'}), "(self.button_frame, text='Add', command=self.newimg)\n", (7882, 7934), False, 'from tkinter import ttk\n'), ((8025, 8093), 'tkinter.ttk.Button', 'ttk.Button', (['self.button_frame'], {'text': '"""Delete"""', 'command': 'self.deleteimg'}), "(self.button_frame, text='Delete', command=self.deleteimg)\n", (8035, 8093), False, 'from tkinter import ttk\n'), ((8185, 8252), 'tkinter.ttk.Button', 'ttk.Button', (['self.button_frame'], {'text': '"""Refresh"""', 'command': 'self.showimg'}), "(self.button_frame, text='Refresh', command=self.showimg)\n", (8195, 8252), False, 'from tkinter import ttk\n'), ((8585, 8613), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (8611, 8613), True, 'import tkinter.filedialog as filedialog\n'), ((12140, 12167), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (12165, 12167), False, 'import configparser\n'), ((981, 997), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (991, 997), False, 'from PIL import Image\n'), ((1029, 1047), 'numpy.array', 'np.array', (['self.img'], {}), '(self.img)\n', (1037, 1047), True, 'import numpy as np\n'), ((1863, 1880), 'numpy.vstack', 'np.vstack', (['(0, 0)'], {}), '((0, 0))\n', (1872, 1880), True, 'import numpy as np\n'), ((5460, 5481), 'tkinter.DoubleVar', 'tk.DoubleVar', ([], {'value': '(0)'}), '(value=0)\n', (5472, 5481), True, 'import tkinter as tk\n'), ((5482, 5503), 'tkinter.DoubleVar', 'tk.DoubleVar', ([], {'value': '(0)'}), '(value=0)\n', (5494, 5503), True, 'import tkinter as tk\n'), ((5504, 5525), 'tkinter.DoubleVar', 'tk.DoubleVar', ([], {'value': '(0)'}), '(value=0)\n', (5516, 5525), True, 'import tkinter as tk\n'), ((5526, 5547), 'tkinter.DoubleVar', 'tk.DoubleVar', ([], {'value': '(0)'}), '(value=0)\n', (5538, 5547), True, 'import tkinter as tk\n'), ((5711, 5732), 'tkinter.DoubleVar', 'tk.DoubleVar', ([], {'value': '(0)'}), '(value=0)\n', (5723, 5732), True, 'import tkinter as tk\n'), ((5733, 5754), 'tkinter.DoubleVar', 'tk.DoubleVar', ([], {'value': '(0)'}), '(value=0)\n', (5745, 5754), True, 'import tkinter as tk\n'), ((5778, 5799), 'tkinter.DoubleVar', 'tk.DoubleVar', ([], {'value': '(0)'}), '(value=0)\n', (5790, 5799), True, 'import tkinter as tk\n'), ((5800, 5821), 'tkinter.DoubleVar', 'tk.DoubleVar', ([], {'value': '(0)'}), '(value=0)\n', (5812, 5821), True, 'import tkinter as tk\n'), ((11220, 11250), 'tkinter.filedialog.asksaveasfilename', 'filedialog.asksaveasfilename', ([], {}), '()\n', (11248, 11250), True, 'import tkinter.filedialog as filedialog\n'), ((12096, 12124), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (12122, 12124), True, 'import tkinter.filedialog as filedialog\n'), ((2273, 2314), 'numpy.vstack', 'np.vstack', (['(self.shift[0], self.shift[1])'], {}), '((self.shift[0], self.shift[1]))\n', (2282, 2314), True, 'import numpy as np\n'), ((1748, 1766), 'numpy.array', 'np.array', (['self.img'], {}), '(self.img)\n', (1756, 1766), True, 'import numpy as np\n'), ((1797, 1815), 'numpy.array', 'np.array', (['self.img'], {}), '(self.img)\n', (1805, 1815), True, 'import numpy as np\n'), ((1927, 1948), 'numpy.vstack', 'np.vstack', (['(width, 0)'], {}), '((width, 0))\n', (1936, 1948), True, 'import numpy as np\n'), ((1997, 2023), 'numpy.vstack', 'np.vstack', (['(width, height)'], {}), '((width, height))\n', (2006, 2023), True, 'import numpy as np\n'), ((2072, 2094), 'numpy.vstack', 'np.vstack', (['(0, height)'], {}), '((0, height))\n', (2081, 2094), True, 'import numpy as np\n'), ((2150, 2165), 'numpy.deg2rad', 'np.deg2rad', (['rad'], {}), '(rad)\n', (2160, 2165), True, 'import numpy as np\n'), ((2181, 2224), 'numpy.vstack', 'np.vstack', (['(self.origin[0], self.origin[1])'], {}), '((self.origin[0], self.origin[1]))\n', (2190, 2224), True, 'import numpy as np\n'), ((1547, 1559), 'numpy.cos', 'np.cos', (['tau1'], {}), '(tau1)\n', (1553, 1559), True, 'import numpy as np\n'), ((1578, 1590), 'numpy.sin', 'np.sin', (['tau1'], {}), '(tau1)\n', (1584, 1590), True, 'import numpy as np\n'), ((1593, 1605), 'numpy.cos', 'np.cos', (['tau1'], {}), '(tau1)\n', (1599, 1605), True, 'import numpy as np\n'), ((1562, 1574), 'numpy.sin', 'np.sin', (['tau1'], {}), '(tau1)\n', (1568, 1574), True, 'import numpy as np\n')] |
import json
import numpy as np
from scipy.signal import correlate
"""
Base class that calculates Age of Information (AoI)
"""
class AoICalc():
def __init__(self, sim_id, data_file, num_sources, save_AoI_seq="None", save_Q_seq="None", **kwargs):
self.sim_id = sim_id
self.data_file = data_file
self.num_sources = num_sources
self.save_AoI_seq = save_AoI_seq
self.save_Q_seq = save_Q_seq
self.aoi = {}
for i in range(self.num_sources):
self.aoi[i] = {}
self.aoi[i]["Preempted"] = 0
self.aoi[i]["Obsolete"] = 0
self.aoi[i]["RMSE"] = 0
# Load data
with open (self.data_file, 'r') as d:
data = json.load(d)
# Split sources
self.splitted_data = {}
for id, value in data.items():
id_splitted = id.split(",")
source = int(id_splitted[0][1:])
gen_time = float(id_splitted[2][0:-1])
deliv_time = value[2]
times = (gen_time, deliv_time)
if source in self.splitted_data:
# Remove preempted
if deliv_time == 0:
self.aoi[source]["Preempted"] += 1
continue
# Remove obsolete
elif gen_time < self.splitted_data[source][-1][0]:
self.aoi[source]["Obsolete"] += 1
continue
self.splitted_data[source].append(times)
else:
self.splitted_data[source] = []
self.splitted_data[source].append(times)
def save_seqences(self, i):
if self.save_AoI_seq != "None":
try:
arq_name = self.save_AoI_seq + "/" + self.sim_id + "_source_" + str(i) + ".txt"
with open(arq_name, 'w') as f:
f.write(str(self.Q_vector))
except Exception as e:
print("Error saving AoI sequence: %s" %e)
if self.save_Q_seq != "None":
try:
arq_name = self.save_Q_seq + "/" + self.sim_id + "_source_" + str(i) + ".txt"
with open(arq_name, 'w') as f:
f.write(str(self.Q_vector))
except Exception as e:
print("Error saving Q sequence: %s" %e)
"""
Class that calculates Mean Age of Information (Mean AoI)
Parameters
----------
sim_id: str
identification of simulation
data_file : json file
json with class:Agent "agent_id" as keys and list [arrival, service
start, departure times, agents waiting to be served and the total agents in the
system] as values
num_sources: int
number of sources of Agents. Mean AoI will be calculated for each source - Integer
save_AoI_seq: str (optional, defalut: "None")
path to folder where to save the sequence of calculated AoI
example: "/tmp/my_sim"
save_Q_seq: str (optional, defalut: "None")
path to folder where to save the sequence of calculated AoI areas Q
example: "/tmp/my_sim"
Returns
----------
mean_aoi: dict
dict with sources as keys and {"MeanAoI", "RMSE", "Preempted", "Obsolete"} as values
"""
class MeanAoICalc(AoICalc):
def __init__(self, sim_id, data_file, num_sources, save_AoI_seq="None", save_Q_seq="None", **kwargs):
super().__init__(sim_id, data_file, num_sources, save_AoI_seq, save_Q_seq, **kwargs)
self.aoi_vector = []
self.Q_vector = []
for i in range(self.num_sources):
self.aoi[i]["MeanAoI"] = np.inf
print("Calculating Mean AoI values for simulation [%s]" %self.sim_id)
for i in self.splitted_data.keys():
try:
self.calc_aoi(i)
except Exception as e:
print("Error calculating AoI for source %d: %s" %(i, e))
try:
self.save_seqences(i)
except Exception as e:
print("Error saving sequences for source %d: %s" %(i, e))
# Calculates mean AoI for a data array
def calc_aoi(self, source):
self.aoi_vector.clear()
self.Q_vector.clear()
def Qi(Ti, Yi):
Q = Ti*Yi + 0.5*(Yi**2)
return Q
data = self.splitted_data[source]
ti = data[0][0] # Gen time packet #0
t_start = ti
ti_linha = 0
Ti = 0
Qi_total = 0
N = 0 # Total delivered packets
data.pop(0) # Remove first entry
for value in data:
if value[1] == 0:
continue
N = N + 1
if value[0] < ti:
print("Error: obsolete packet!")
print("gen_time = %f" %value[0])
break
Yi = value[0] - ti
ti = value[0]
ti_linha = value[1]
Ti = ti_linha - ti
Qi_now = Qi(Ti, Yi)
Qi_total = Qi_total + Qi_now
mean_aoi = Qi_total/(ti_linha-t_start)
self.aoi_vector.append(mean_aoi)
self.Q_vector.append(Qi_now)
Qi_total = Qi_total + 0.5*(Ti**2)
self.aoi[source]["MeanAoI"] = Qi_total / (ti_linha - t_start)
self.aoi[source]["RMSE"] = self.calc_RMSE(N, ti_linha)
# Caclulate root mean squared error
def calc_RMSE(self, N, tau):
Q = self.Q_vector
mean_q = np.mean(Q)
var_q = np.var(Q)
# Normalized Q
norm_Q = (Q - mean_q)
# Autocorrelation
result = correlate(norm_Q, norm_Q, mode='same')
acorr = result [N//2 + 1:] / (var_q * np.arange(N-1, N//2, -1))
M = len(acorr)
iat = 0
# IAT - Integrated autocorrelation times
for i in range(M):
iat = iat + (1 - (i+1)/M)*acorr[i]
iat = 1 + 2*iat
var_Q_mean = var_q/len(Q)
true_var_Q_mean = var_Q_mean * iat
var_aoi_mean = (N/tau)**2 * true_var_Q_mean
RMSE = var_aoi_mean**0.5
return RMSE
"""
Class that calculates Mean Peak Age of Information (Mean pAoI)
Parameters
----------
sim_id: str
identification of simulation
data_file : json file
json with class:Agent "agent_id" as keys and list [arrival, service
start, departure times, agents waiting to be served and the total agents in the
system] as values
num_sources: int
number of sources of Agents. Mean AoI will be calculated for each source - Integer
save_AoI_seq: str (optional, defalut: "None")
path to folder where to save the sequence of calculated AoI
example: "/tmp/my_sim"
save_Q_seq: str (optional, defalut: "None")
path to folder where to save the sequence of calculated AoI areas Q
example: "/tmp/my_sim"
Returns
----------
mean_peak_aoi: dict
dict with sources as keys and {"MeanPeakAoI", "RMSE", "Preempted", "Obsolete"} as values
"""
class MeanPeakAoICalc(AoICalc):
def __init__(self, sim_id, data_file, num_sources, save_AoI_seq="None", save_Q_seq="None", **kwargs):
super().__init__(sim_id, data_file, num_sources, save_AoI_seq, save_Q_seq, **kwargs)
self.paoi_vector = []
for i in range(self.num_sources):
self.aoi[i]["MeanPeakAoI"] = np.inf
print("Calculating Mean Peak AoI values for simulation [%s]" %self.sim_id)
for i in self.splitted_data.keys():
try:
self.calc_aoi(i)
except Exception as e:
print("Error calculating AoI for source %d: %s" %(i, e))
try:
self.save_seqences(i)
except Exception as e:
print("Error saving sequences for source %d: %s" %(i, e))
def calc_aoi(self, source):
self.paoi_vector.clear()
# peak age calculation
def Ai(Ti, Yi):
return Ti + Yi
data = self.splitted_data[source]
ti = data[0][0] # Gen time packet #0
N = 0 # total delivered packets
data.pop(0) # Remove first entry
for value in data:
if value[1] == 0:
continue
N = N + 1
if value[0] < ti:
print("Error: obsolete packet!")
print("gen_time = %f" %value[0])
break
Yi = value[0] - ti
ti = value[0]
ti_linha = value[1]
Ti = ti_linha - ti
self.paoi_vector.append(Ai(Ti, Yi))
self.aoi[source]["MeanPeakAoI"] = np.mean(self.paoi_vector)
self.aoi[source]["RMSE"] = self.calc_RMSE(N, source)
def calc_RMSE(self, N, source):
# Variance
var_A = np.var(self.paoi_vector)
# Normalized A
norm_A = (self.paoi_vector - self.aoi[source]["MeanPeakAoI"])
# Autocorrelation
result = correlate(norm_A, norm_A, mode='same')
acorr = result [N//2 + 1:] / (var_A * np.arange(N-1, N//2, -1))
M = len(acorr)
iat = 0
# IAT
for i in range(M):
iat = iat + (1 - (i+1)/M)*acorr[i]
iat = 1 + 2*iat
var_A_mean = var_A/N
true_var_A_mean = var_A_mean * iat
RMSE = true_var_A_mean**0.5
return RMSE
| [
"json.load",
"scipy.signal.correlate",
"numpy.mean",
"numpy.arange",
"numpy.var"
] | [((5374, 5384), 'numpy.mean', 'np.mean', (['Q'], {}), '(Q)\n', (5381, 5384), True, 'import numpy as np\n'), ((5401, 5410), 'numpy.var', 'np.var', (['Q'], {}), '(Q)\n', (5407, 5410), True, 'import numpy as np\n'), ((5507, 5545), 'scipy.signal.correlate', 'correlate', (['norm_Q', 'norm_Q'], {'mode': '"""same"""'}), "(norm_Q, norm_Q, mode='same')\n", (5516, 5545), False, 'from scipy.signal import correlate\n'), ((8456, 8481), 'numpy.mean', 'np.mean', (['self.paoi_vector'], {}), '(self.paoi_vector)\n', (8463, 8481), True, 'import numpy as np\n'), ((8615, 8639), 'numpy.var', 'np.var', (['self.paoi_vector'], {}), '(self.paoi_vector)\n', (8621, 8639), True, 'import numpy as np\n'), ((8776, 8814), 'scipy.signal.correlate', 'correlate', (['norm_A', 'norm_A'], {'mode': '"""same"""'}), "(norm_A, norm_A, mode='same')\n", (8785, 8814), False, 'from scipy.signal import correlate\n'), ((745, 757), 'json.load', 'json.load', (['d'], {}), '(d)\n', (754, 757), False, 'import json\n'), ((5593, 5621), 'numpy.arange', 'np.arange', (['(N - 1)', '(N // 2)', '(-1)'], {}), '(N - 1, N // 2, -1)\n', (5602, 5621), True, 'import numpy as np\n'), ((8862, 8890), 'numpy.arange', 'np.arange', (['(N - 1)', '(N // 2)', '(-1)'], {}), '(N - 1, N // 2, -1)\n', (8871, 8890), True, 'import numpy as np\n')] |
import unittest
import numpy as np
from numpy.testing import assert_array_equal
from shape import (angle_absolute_error,
angle_absolute_error_direction_agnostic)
class AnglesTestCase(unittest.TestCase):
def test_angle_absolute_error(self):
self.assertEqual(angle_absolute_error(10, 20, np), 10)
self.assertEqual(angle_absolute_error(10, 180, np), 170)
self.assertEqual(angle_absolute_error(0, 180, np), 180)
self.assertEqual(angle_absolute_error(0, 190, np), 170)
self.assertEqual(angle_absolute_error(0, 270, np), 90)
self.assertEqual(angle_absolute_error(90, 270, np), 180)
self.assertEqual(angle_absolute_error(-10, 10, np), 20)
self.assertEqual(angle_absolute_error(80, 280, np), 160)
self.assertEqual(angle_absolute_error(0, -10, np), 10)
self.assertEqual(angle_absolute_error(0, 360, np), 0)
self.assertEqual(angle_absolute_error(10, 300, np), 70)
def test_angle_absolute_error_direction_agnostic(self):
self.assertEqual(angle_absolute_error_direction_agnostic(10, 20, np), 10)
self.assertEqual(angle_absolute_error_direction_agnostic(10, 180, np), 10)
self.assertEqual(angle_absolute_error_direction_agnostic(0, 180, np), 0)
self.assertEqual(angle_absolute_error_direction_agnostic(0, 190, np), 10)
self.assertEqual(angle_absolute_error_direction_agnostic(0, 270, np), 90)
self.assertEqual(angle_absolute_error_direction_agnostic(90, 270, np), 0)
self.assertEqual(angle_absolute_error_direction_agnostic(-10, 10, np), 20)
self.assertEqual(angle_absolute_error_direction_agnostic(80, 280, np), 20)
self.assertEqual(angle_absolute_error_direction_agnostic(0, -10, np), 10)
self.assertEqual(angle_absolute_error_direction_agnostic(0, 360, np), 0)
self.assertEqual(angle_absolute_error_direction_agnostic(10, 300, np), 70)
self.assertEqual(angle_absolute_error_direction_agnostic(-30, 300, np), 30)
assert_array_equal(
angle_absolute_error_direction_agnostic(
np.array([10, 0, 0]), np.array([300, 360, -10]), np
),
np.array([70, 0, 10]),
)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"shape.angle_absolute_error_direction_agnostic",
"shape.angle_absolute_error",
"numpy.array"
] | [((2260, 2275), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2273, 2275), False, 'import unittest\n'), ((292, 324), 'shape.angle_absolute_error', 'angle_absolute_error', (['(10)', '(20)', 'np'], {}), '(10, 20, np)\n', (312, 324), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((355, 388), 'shape.angle_absolute_error', 'angle_absolute_error', (['(10)', '(180)', 'np'], {}), '(10, 180, np)\n', (375, 388), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((420, 452), 'shape.angle_absolute_error', 'angle_absolute_error', (['(0)', '(180)', 'np'], {}), '(0, 180, np)\n', (440, 452), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((484, 516), 'shape.angle_absolute_error', 'angle_absolute_error', (['(0)', '(190)', 'np'], {}), '(0, 190, np)\n', (504, 516), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((548, 580), 'shape.angle_absolute_error', 'angle_absolute_error', (['(0)', '(270)', 'np'], {}), '(0, 270, np)\n', (568, 580), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((611, 644), 'shape.angle_absolute_error', 'angle_absolute_error', (['(90)', '(270)', 'np'], {}), '(90, 270, np)\n', (631, 644), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((676, 709), 'shape.angle_absolute_error', 'angle_absolute_error', (['(-10)', '(10)', 'np'], {}), '(-10, 10, np)\n', (696, 709), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((740, 773), 'shape.angle_absolute_error', 'angle_absolute_error', (['(80)', '(280)', 'np'], {}), '(80, 280, np)\n', (760, 773), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((805, 837), 'shape.angle_absolute_error', 'angle_absolute_error', (['(0)', '(-10)', 'np'], {}), '(0, -10, np)\n', (825, 837), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((868, 900), 'shape.angle_absolute_error', 'angle_absolute_error', (['(0)', '(360)', 'np'], {}), '(0, 360, np)\n', (888, 900), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((930, 963), 'shape.angle_absolute_error', 'angle_absolute_error', (['(10)', '(300)', 'np'], {}), '(10, 300, np)\n', (950, 963), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((1055, 1106), 'shape.angle_absolute_error_direction_agnostic', 'angle_absolute_error_direction_agnostic', (['(10)', '(20)', 'np'], {}), '(10, 20, np)\n', (1094, 1106), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((1137, 1189), 'shape.angle_absolute_error_direction_agnostic', 'angle_absolute_error_direction_agnostic', (['(10)', '(180)', 'np'], {}), '(10, 180, np)\n', (1176, 1189), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((1220, 1271), 'shape.angle_absolute_error_direction_agnostic', 'angle_absolute_error_direction_agnostic', (['(0)', '(180)', 'np'], {}), '(0, 180, np)\n', (1259, 1271), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((1301, 1352), 'shape.angle_absolute_error_direction_agnostic', 'angle_absolute_error_direction_agnostic', (['(0)', '(190)', 'np'], {}), '(0, 190, np)\n', (1340, 1352), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((1383, 1434), 'shape.angle_absolute_error_direction_agnostic', 'angle_absolute_error_direction_agnostic', (['(0)', '(270)', 'np'], {}), '(0, 270, np)\n', (1422, 1434), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((1465, 1517), 'shape.angle_absolute_error_direction_agnostic', 'angle_absolute_error_direction_agnostic', (['(90)', '(270)', 'np'], {}), '(90, 270, np)\n', (1504, 1517), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((1547, 1599), 'shape.angle_absolute_error_direction_agnostic', 'angle_absolute_error_direction_agnostic', (['(-10)', '(10)', 'np'], {}), '(-10, 10, np)\n', (1586, 1599), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((1630, 1682), 'shape.angle_absolute_error_direction_agnostic', 'angle_absolute_error_direction_agnostic', (['(80)', '(280)', 'np'], {}), '(80, 280, np)\n', (1669, 1682), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((1713, 1764), 'shape.angle_absolute_error_direction_agnostic', 'angle_absolute_error_direction_agnostic', (['(0)', '(-10)', 'np'], {}), '(0, -10, np)\n', (1752, 1764), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((1795, 1846), 'shape.angle_absolute_error_direction_agnostic', 'angle_absolute_error_direction_agnostic', (['(0)', '(360)', 'np'], {}), '(0, 360, np)\n', (1834, 1846), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((1876, 1928), 'shape.angle_absolute_error_direction_agnostic', 'angle_absolute_error_direction_agnostic', (['(10)', '(300)', 'np'], {}), '(10, 300, np)\n', (1915, 1928), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((1959, 2012), 'shape.angle_absolute_error_direction_agnostic', 'angle_absolute_error_direction_agnostic', (['(-30)', '(300)', 'np'], {}), '(-30, 300, np)\n', (1998, 2012), False, 'from shape import angle_absolute_error, angle_absolute_error_direction_agnostic\n'), ((2194, 2215), 'numpy.array', 'np.array', (['[70, 0, 10]'], {}), '([70, 0, 10])\n', (2202, 2215), True, 'import numpy as np\n'), ((2115, 2135), 'numpy.array', 'np.array', (['[10, 0, 0]'], {}), '([10, 0, 0])\n', (2123, 2135), True, 'import numpy as np\n'), ((2137, 2162), 'numpy.array', 'np.array', (['[300, 360, -10]'], {}), '([300, 360, -10])\n', (2145, 2162), True, 'import numpy as np\n')] |
# coding: utf-8
import os
import sys
# sys.path.append(os.pardir) # 为了导入父目录的文件而进行的设定
# from common.functions import *
# from common.gradient import numerical_gradient
# from common.functions import *
import numpy as np
def identity_function(x):
return x
def step_function(x):
return np.array(x > 0, dtype=np.int)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_grad(x):
return (1.0 - sigmoid(x)) * sigmoid(x)
def relu(x):
return np.maximum(0, x)
def relu_grad(x):
grad = np.zeros(x)
grad[ x >= 0 ] = 1
return grad
def softmax(x):
if x.ndim == 2:
x = x.T
x = x - np.max(x, axis=0)
y = np.exp(x) / np.sum(np.exp(x), axis=0)
return y.T
x = x - np.max(x) # in case of overflow
return np.exp(x) / np.sum(np.exp(x))
def mean_squared_error(y, t):
return 0.5 * np.sum((y - t) ** 2)
def cross_entropy_error(y, t):
if y.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
# 监督数据是one-hot-vector的情况下,转换为正确解标签的索引
if t.size == y.size:
t = t.argmax(axis=1)
batch_size = y.shape[ 0 ]
return -np.sum(np.log(y[ np.arange(batch_size), t ] + 1e-7)) / batch_size
def softmax_loss(X, t):
y = softmax(X)
return cross_entropy_error(y, t)
def numerical_gradient(f, x):
h = 1e-4 # 0.0001
grad = np.zeros_like(x)
it = np.nditer(x, flags=[ 'multi_index' ], op_flags=[ 'readwrite' ])
while not it.finished:
idx = it.multi_index
tmp_val = x[ idx ]
x[ idx ] = float(tmp_val) + h
fxh1 = f(x) # f(x+h)
x[ idx ] = tmp_val - h
fxh2 = f(x) # f(x-h)
grad[ idx ] = (fxh1 - fxh2) / (2 * h)
x[ idx ] = tmp_val # 还原值
it.iternext()
return grad
class Adam:
"""Adam (http://arxiv.org/abs/1412.6980v8)"""
def __init__(self, lr=0.001, beta1=0.9, beta2=0.999):
self.lr = lr
self.beta1 = beta1
self.beta2 = beta2
self.iter = 0
self.m = None
self.v = None
def update(self, params, grads):
if self.m is None:
self.m, self.v = {}, {}
for key, val in params.items():
self.m[ key ] = np.zeros_like(val)
self.v[ key ] = np.zeros_like(val)
self.iter += 1
lr_t = self.lr * np.sqrt(1.0 - self.beta2 ** self.iter) / (1.0 - self.beta1 ** self.iter)
for key in params.keys():
# self.m[key] = self.beta1*self.m[key] + (1-self.beta1)*grads[key]
# self.v[key] = self.beta2*self.v[key] + (1-self.beta2)*(grads[key]**2)
self.m[ key ] += (1 - self.beta1) * (grads[ key ] - self.m[ key ])
self.v[ key ] += (1 - self.beta2) * (grads[ key ] ** 2 - self.v[ key ])
params[ key ] -= lr_t * self.m[ key ] / (np.sqrt(self.v[ key ]) + 1e-7)
# unbias_m += (1 - self.beta1) * (grads[key] - self.m[key]) # correct bias
# unbisa_b += (1 - self.beta2) * (grads[key]*grads[key] - self.v[key]) # correct bias
# params[key] += self.lr * unbias_m / (np.sqrt(unbisa_b) + 1e-7)
class simpleNet:
def __init__(self):
self.W = np.random.randn(2, 3)
def predict(self, x):
return np.dot(x, self.W)
def loss(self, x, t):
z = self.predict(x)
y = softmax(z)
loss = cross_entropy_error(y, t)
return loss
class TwoLayerNet:
def __init__(self, input_size, hidden_size, output_size, weight_init_std=1):
# 初始化权重
self.params = dict(W1=weight_init_std * np.random.randn(input_size, hidden_size), b1=np.zeros(hidden_size),
W2=weight_init_std * np.random.randn(hidden_size, output_size), b2=np.zeros(output_size))
def predict(self, x):
W1 = self.params['W1']
W2 = W1.T
b1, b2 = self.params['b1'], self.params['b2']
a1 = np.dot(x, W1) + b1
z1 = relu(a1)
a2 = np.dot(z1, W2) + b2
# def h2(a, W1, W2): # W is the KeyA and a is the output form last layer
# return a.dot(np.linalg.inv(W1.dot(W2))) # ! use 'a' to multiply the inverse matrix of the identity matrix
y = a2.dot(np.linalg.inv(W1.dot(W2)))
return y
# x:输入数据, t:监督数据
def loss(self, x, t):
y = self.predict(x)
return mean_squared_error(y, t)
def accuracy(self, x, t):
y = self.predict(x)
y = np.argmax(y, axis=1)
t = np.argmax(t, axis=1)
accuracy = np.sum(y == t) / float(x.shape[0])
return accuracy
# x:输入数据, t:监督数据
def numerical_gradient(self, x, t):
loss_W = lambda W: self.loss(x, t)
grads = {}
grads['W1'] = numerical_gradient(loss_W, self.params['W1'])
grads['b1'] = numerical_gradient(loss_W, self.params['b1'])
grads['W2'] = numerical_gradient(loss_W, self.params['W2'])
grads['b2'] = numerical_gradient(loss_W, self.params['b2'])
return grads
def gradient(self, x, t):
W1, W2 = self.params['W1'], self.params['W2']
b1, b2 = self.params['b1'], self.params['b2']
grads = {}
batch_num = x.shape[0]
# forward
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
y = softmax(a2)
# backward
dy = (y - t) / batch_num
grads['W2'] = np.dot(z1.T, dy)
grads['b2'] = np.sum(dy, axis=0)
da1 = np.dot(dy, W2.T)
dz1 = sigmoid_grad(a1) * da1
grads['W1'] = np.dot(x.T, dz1)
grads['b1'] = np.sum(dz1, axis=0)
return grads
if __name__ == '__main__':
# test simpleNet()
# net1 = simpleNet()
# print(net1.W)
# x = np.array([ 0.6, 0.9 ])
# p = net1.predict(x)
# print(p)
# test TwoLayerNet
net = TwoLayerNet(input_size=4, hidden_size=5, output_size=4)
print(f"W1 is {net.params['W1']}")
x = np.array([[72., 101., 2., 5] ])
t = x # the tag is the data itself
print(f'input is {x}')
y = net.predict(x)
print(y)
| [
"numpy.zeros_like",
"numpy.maximum",
"numpy.sum",
"numpy.random.randn",
"numpy.argmax",
"numpy.nditer",
"numpy.zeros",
"numpy.max",
"numpy.array",
"numpy.exp",
"numpy.arange",
"numpy.dot",
"numpy.sqrt"
] | [((298, 327), 'numpy.array', 'np.array', (['(x > 0)'], {'dtype': 'np.int'}), '(x > 0, dtype=np.int)\n', (306, 327), True, 'import numpy as np\n'), ((470, 486), 'numpy.maximum', 'np.maximum', (['(0)', 'x'], {}), '(0, x)\n', (480, 486), True, 'import numpy as np\n'), ((518, 529), 'numpy.zeros', 'np.zeros', (['x'], {}), '(x)\n', (526, 529), True, 'import numpy as np\n'), ((1356, 1372), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (1369, 1372), True, 'import numpy as np\n'), ((1383, 1442), 'numpy.nditer', 'np.nditer', (['x'], {'flags': "['multi_index']", 'op_flags': "['readwrite']"}), "(x, flags=['multi_index'], op_flags=['readwrite'])\n", (1392, 1442), True, 'import numpy as np\n'), ((6013, 6046), 'numpy.array', 'np.array', (['[[72.0, 101.0, 2.0, 5]]'], {}), '([[72.0, 101.0, 2.0, 5]])\n', (6021, 6046), True, 'import numpy as np\n'), ((739, 748), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (745, 748), True, 'import numpy as np\n'), ((783, 792), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (789, 792), True, 'import numpy as np\n'), ((862, 882), 'numpy.sum', 'np.sum', (['((y - t) ** 2)'], {}), '((y - t) ** 2)\n', (868, 882), True, 'import numpy as np\n'), ((3182, 3203), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (3197, 3203), True, 'import numpy as np\n'), ((3246, 3263), 'numpy.dot', 'np.dot', (['x', 'self.W'], {}), '(x, self.W)\n', (3252, 3263), True, 'import numpy as np\n'), ((4453, 4473), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (4462, 4473), True, 'import numpy as np\n'), ((4486, 4506), 'numpy.argmax', 'np.argmax', (['t'], {'axis': '(1)'}), '(t, axis=1)\n', (4495, 4506), True, 'import numpy as np\n'), ((5467, 5483), 'numpy.dot', 'np.dot', (['z1.T', 'dy'], {}), '(z1.T, dy)\n', (5473, 5483), True, 'import numpy as np\n'), ((5506, 5524), 'numpy.sum', 'np.sum', (['dy'], {'axis': '(0)'}), '(dy, axis=0)\n', (5512, 5524), True, 'import numpy as np\n'), ((5548, 5564), 'numpy.dot', 'np.dot', (['dy', 'W2.T'], {}), '(dy, W2.T)\n', (5554, 5564), True, 'import numpy as np\n'), ((5624, 5640), 'numpy.dot', 'np.dot', (['x.T', 'dz1'], {}), '(x.T, dz1)\n', (5630, 5640), True, 'import numpy as np\n'), ((5663, 5682), 'numpy.sum', 'np.sum', (['dz1'], {'axis': '(0)'}), '(dz1, axis=0)\n', (5669, 5682), True, 'import numpy as np\n'), ((366, 376), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (372, 376), True, 'import numpy as np\n'), ((639, 656), 'numpy.max', 'np.max', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (645, 656), True, 'import numpy as np\n'), ((669, 678), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (675, 678), True, 'import numpy as np\n'), ((802, 811), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (808, 811), True, 'import numpy as np\n'), ((3904, 3917), 'numpy.dot', 'np.dot', (['x', 'W1'], {}), '(x, W1)\n', (3910, 3917), True, 'import numpy as np\n'), ((3959, 3973), 'numpy.dot', 'np.dot', (['z1', 'W2'], {}), '(z1, W2)\n', (3965, 3973), True, 'import numpy as np\n'), ((4535, 4549), 'numpy.sum', 'np.sum', (['(y == t)'], {}), '(y == t)\n', (4541, 4549), True, 'import numpy as np\n'), ((5283, 5296), 'numpy.dot', 'np.dot', (['x', 'W1'], {}), '(x, W1)\n', (5289, 5296), True, 'import numpy as np\n'), ((5340, 5354), 'numpy.dot', 'np.dot', (['z1', 'W2'], {}), '(z1, W2)\n', (5346, 5354), True, 'import numpy as np\n'), ((688, 697), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (694, 697), True, 'import numpy as np\n'), ((2221, 2239), 'numpy.zeros_like', 'np.zeros_like', (['val'], {}), '(val)\n', (2234, 2239), True, 'import numpy as np\n'), ((2272, 2290), 'numpy.zeros_like', 'np.zeros_like', (['val'], {}), '(val)\n', (2285, 2290), True, 'import numpy as np\n'), ((2340, 2378), 'numpy.sqrt', 'np.sqrt', (['(1.0 - self.beta2 ** self.iter)'], {}), '(1.0 - self.beta2 ** self.iter)\n', (2347, 2378), True, 'import numpy as np\n'), ((3616, 3637), 'numpy.zeros', 'np.zeros', (['hidden_size'], {}), '(hidden_size)\n', (3624, 3637), True, 'import numpy as np\n'), ((3733, 3754), 'numpy.zeros', 'np.zeros', (['output_size'], {}), '(output_size)\n', (3741, 3754), True, 'import numpy as np\n'), ((2828, 2848), 'numpy.sqrt', 'np.sqrt', (['self.v[key]'], {}), '(self.v[key])\n', (2835, 2848), True, 'import numpy as np\n'), ((3571, 3611), 'numpy.random.randn', 'np.random.randn', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (3586, 3611), True, 'import numpy as np\n'), ((3687, 3728), 'numpy.random.randn', 'np.random.randn', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (3702, 3728), True, 'import numpy as np\n'), ((1159, 1180), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (1168, 1180), True, 'import numpy as np\n')] |
import numpy as np
import numba
@numba.jit(nopython=True, parallel=True, nogil=True)
def div_vec_approx(collocation, # Точка коллокации x[3]
vec_collocation, # Значение вектора под дивергенцией в точке коллокации gx[3]
vec, # Весь вектор под дивергенцией g(x_i, t_{n+1}) g[N][3]
collocations, # Точки коллокаций всех разбиений rkt[N][3]
squares, # Площади разбиений Squares[N]
jmax, # Размер всего вектора под дивергенцией N
eps, # Радиус подсчёта дивергенции
appr_degree, # Степень аппроскимации
grad_Function): # Фукнция, реализующая подсчёт градиента в точке коллокации
"""
Функция рассчета поверхностной дивергенции в точке
на основе ядра свёртки интегральной функции,
аппроскимируемой полиномом
"""
div_vec_appr = 0 # Итоговая дивергенция
for j in range(jmax):
grad_psi = grad_Function(collocation, collocations[j], eps, appr_degree)
div_vec_appr += ((vec[j] - vec_collocation) @ grad_psi) * squares[j]
return div_vec_appr
@numba.jit(nopython=True, parallel=True, nogil=True)
def gradient_vec(x, # Точка коллокации которую мы наблюдаем
y, # Относительная точка коллокации
eps, # 2 диаметра разбиения
appr_degree=2): # Степень аппроксимации
x_y = x - y
r_2 = (x_y @ x_y) / (eps**2)
# Граничное условие
if r_2 > 50:
grad = np.zeros(3)
else:
if appr_degree == 1:
grad = -2 * x_y * np.exp(-r_2) / np.pi / (eps**4)
elif appr_degree == 2:
grad = (-6 + 2 * r_2) * (x_y) * np.exp(-r_2) / np.pi / (eps**4)
elif appr_degree == 3:
grad = (-12 + 8 * r_2 - (r_2) ** 2) * (x_y) * np.exp(-r_2) / np.pi / (eps ** 4)
else:
grad = (-20 + 20 * r_2 - 5 * ((r_2)**2) + ((r_2)**3) / 3) * (x_y) * np.exp(-r_2) / np.pi / (eps**4)
return grad
@numba.jit(nopython=True, parallel=True, nogil=True)
def div_element_surface(vec_element_k: np.array,
frame: np.array,
neighbors_vecs: np.array,
square: float,
norm: np.array) -> float:
"""
Функция, которая считает дивергенцию относительно одной точки центра
:param vec_element_k: Значение вектора в точке коллокации, массив (3), значение дивергенции которого мы хотим посчитать по этой ячейке
:param frame: Массив (4, 3) для точек нашего прямоугольного разбиения (единичной поверхности)
:param neighbors_vecs: массив (4, 3) из значений векторов по которому считаем дивергецию, в соседних ячейках
:param square: Площадь ячейки, число
:param norm: Нормаль к ячейке массив (3)
:return: Значение дивергенции - число
"""
result = 0
m = frame.shape[0]
for i in range(m):
vec_prod = np.cross((frame[(i+1) % m] - frame[i]), norm) # 1. Векторное произведение стороны на нормаль
sum_vecs = (vec_element_k + neighbors_vecs[i]) / 2. # 2. Сумма данного вектора и вектора в соседней ячейке
dot_prod = sum_vecs @ vec_prod # Скалярное произведение 2 и 1
result += dot_prod # Складируем результат
return result / square
@numba.jit(nopython=True, parallel=True, nogil=True)
def div_surface(vec: np.array,
frames: np.array,
neighbors_inds: np.array,
squares: np.array,
norms: np.array) -> np.array:
"""
Функция, которая считает дивергенцию вектора по всей поверхности разбиения
:param vec: Вектор, дивергенцию которого мы ищем в каждой точке разбиения, массив (N, 3)
:param frames: Массив разбиений, состоящий из структур типа ячеек, массив (N, 4, 3)
:param neighbors_inds: Массив индексов соседних элементов разбиений поверхности, относительно текущей точки, массив (N, 4)
:param squares: Массив площадей разбиений, массив (N)
:param norms: Массив нормалей к ячейкам по всей поверхности разбиения (N, 4)
:return: Массив (N) - дивергенция вектора в каждой точке разбиения
"""
N = frames.shape[0] # Общее число разбиений модуля
div_vec = np.zeros(N) # Итоговый массив дивергенций в точках
for k in range(N):
neighbors_vecs = np.zeros((4, 3))
for i in range(4):
idx = int(neighbors_inds[k][i])
if idx == (-1):
continue
else:
neighbors_vecs[i, :] = vec[idx, :]
div_vec[k] = div_element_surface(vec_element_k=vec[k],
frame=frames[k],
neighbors_vecs=neighbors_vecs,
square=squares[k],
norm=norms[k])
return div_vec
| [
"numpy.zeros",
"numpy.cross",
"numba.jit",
"numpy.exp"
] | [((35, 86), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'parallel': '(True)', 'nogil': '(True)'}), '(nopython=True, parallel=True, nogil=True)\n', (44, 86), False, 'import numba\n'), ((1241, 1292), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'parallel': '(True)', 'nogil': '(True)'}), '(nopython=True, parallel=True, nogil=True)\n', (1250, 1292), False, 'import numba\n'), ((2167, 2218), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'parallel': '(True)', 'nogil': '(True)'}), '(nopython=True, parallel=True, nogil=True)\n', (2176, 2218), False, 'import numba\n'), ((3527, 3578), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)', 'parallel': '(True)', 'nogil': '(True)'}), '(nopython=True, parallel=True, nogil=True)\n', (3536, 3578), False, 'import numba\n'), ((4454, 4465), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (4462, 4465), True, 'import numpy as np\n'), ((1678, 1689), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1686, 1689), True, 'import numpy as np\n'), ((3096, 3141), 'numpy.cross', 'np.cross', (['(frame[(i + 1) % m] - frame[i])', 'norm'], {}), '(frame[(i + 1) % m] - frame[i], norm)\n', (3104, 3141), True, 'import numpy as np\n'), ((4556, 4572), 'numpy.zeros', 'np.zeros', (['(4, 3)'], {}), '((4, 3))\n', (4564, 4572), True, 'import numpy as np\n'), ((1759, 1771), 'numpy.exp', 'np.exp', (['(-r_2)'], {}), '(-r_2)\n', (1765, 1771), True, 'import numpy as np\n'), ((1866, 1878), 'numpy.exp', 'np.exp', (['(-r_2)'], {}), '(-r_2)\n', (1872, 1878), True, 'import numpy as np\n'), ((1987, 1999), 'numpy.exp', 'np.exp', (['(-r_2)'], {}), '(-r_2)\n', (1993, 1999), True, 'import numpy as np\n'), ((2115, 2127), 'numpy.exp', 'np.exp', (['(-r_2)'], {}), '(-r_2)\n', (2121, 2127), True, 'import numpy as np\n')] |
import numpy as np
from .config import conf
import os, sys
from .config import names as gs
import pandas as pd
truth = np.genfromtxt(conf.binned_personality_file, skip_header=1, usecols=range(1, conf.n_traits+1), delimiter=',')
# all comparisons to perform. Each has
# a name,
# two annotation values that determine if classifiers trained on all data or on specific subsets only will be examined;
# names for both tasks to compare
comparisons = dict({'split halves': [conf.annotation_all, conf.annotation_all, 'first half', 'second half'],
'two ways': [conf.annotation_ways, conf.annotation_ways, 'way there', 'way back'],
'way vs shop in general classifier': [conf.annotation_all, conf.annotation_all, 'both ways' ,'shop'],
'way vs shop in specialised classifier': [conf.annotation_ways, conf.annotation_shop, 'both ways', 'shop'],
'way in specialised classifier vs way in general classifier': [conf.annotation_ways, conf.annotation_all, 'both ways', 'both ways'],
'shop in specialised classifier vs shop in general classifier': [conf.annotation_shop, conf.annotation_all, 'shop', 'shop']
})
def get_majority_vote(predictions):
if len(predictions) == 0:
return -1
(values, counts) = np.unique(predictions, return_counts=True)
ind = np.argmax(counts)
return values[ind]
def get_average_correlation(predA, predB, m_iter):
"""
:param predA: predictions for task A, n_participants x m_iter
:param predB: predictions for task B, n_participants x m_iter
:return:
"""
correlations = []
for si in range(0, m_iter):
if predB.ndim == 1:
if np.sum(predA[:,si]) > 0:
A = predA[:,si]
B = predB
consider = (A>0)
A = A[consider]
B = B[consider]
else:
continue
else:
if np.sum(predA[:,si]) > 0 and (np.sum(predB[:,si]) > 0):
A = predA[:,si]
B = predB[:,si]
consider = (A>0) & (B>0)
A = A[consider]
B = B[consider]
else:
continue
correlation = np.corrcoef(np.array([A, B]))[0][1]
correlations.append(correlation)
avg = np.tanh(np.mean(np.arctanh(np.array(correlations))))
return avg
if __name__ == "__main__":
# check if the output target folder already exists and create if not
if not os.path.exists(conf.figure_folder):
os.mkdir(conf.figure_folder)
# collect masks for each participant, annotation (all data, shop, way), window size and subset in question (e.g. first half, or way to the shop)
# each mask is True for samples of a particular participant and subset; False for all others
window_masks = []
for wsi in range(0, len(conf.all_window_sizes)):
x_file, y_file, id_file = conf.get_merged_feature_files(conf.all_window_sizes[wsi])
for annotation_value in conf.annotation_values:
ids_ws = np.genfromtxt(id_file, delimiter=',', skip_header=1).astype(int)
if annotation_value == conf.annotation_shop:
ids_ws = ids_ws[ids_ws[:, 1] == conf.time_window_annotation_shop, :]
elif annotation_value == conf.annotation_ways:
ids_ws = ids_ws[(ids_ws[:, 1] == conf.time_window_annotation_wayI) | (ids_ws[:, 1] == conf.time_window_annotation_wayII), :]
for p in range(0, conf.n_participants):
ids_ws_p = ids_ws[(ids_ws[:, 0] == p), :]
window_masks.append([annotation_value, p, wsi, 'first half', ids_ws_p[:, 2] == conf.time_window_annotation_halfI])
window_masks.append([annotation_value, p, wsi, 'second half', ids_ws_p[:, 2] == conf.time_window_annotation_halfII])
window_masks.append([annotation_value, p, wsi, 'way there', ids_ws_p[:, 1] == conf.time_window_annotation_wayI])
window_masks.append([annotation_value, p, wsi, 'way back', ids_ws_p[:, 1] == conf.time_window_annotation_wayII])
window_masks.append([annotation_value, p, wsi, 'shop', ids_ws_p[:, 1] == conf.time_window_annotation_shop])
window_masks.append([annotation_value, p, wsi, 'both ways', np.logical_or(ids_ws_p[:, 1] == conf.time_window_annotation_wayI,ids_ws_p[:, 1] == conf.time_window_annotation_wayII)])
window_masks_df = pd.DataFrame(window_masks, columns=['annotation', 'participant', 'window size index', 'subtask', 'mask'])
# collect predictions for each participant and each setting that is interesting for one of the comparisons
# Results are directly written into figures/table1-5.csv
with open(conf.figure_folder + '/table1-5.csv', 'w') as f:
f.write('comparison')
for trait in range(0, conf.n_traits):
f.write(',' + conf.medium_traitlabels[trait])
f.write('\n')
for comp_title, (annotation_value_I, annotation_value_II, subtaskI, subtaskII) in list(comparisons.items()):
f.write(comp_title)
result_filename = conf.result_folder + '/predictions_' + comp_title.replace(' ','_') + '.npz'
if not os.path.exists(result_filename):
print('computing data for', comp_title)
print('Note taht this might take a while - if the script is run again, intermediate results will be available and speed up all computations.')
predictions_I = np.zeros((conf.n_participants, conf.n_traits, conf.max_n_iter), dtype=int)
predictions_II = np.zeros((conf.n_participants, conf.n_traits, conf.max_n_iter), dtype=int)
for trait in range(0, conf.n_traits):
for si in range(0, conf.max_n_iter):
filenameI = conf.get_result_filename(annotation_value_I, trait, False, si, add_suffix=True)
filenameII = conf.get_result_filename(annotation_value_II, trait, False, si, add_suffix=True)
if os.path.exists(filenameI) and os.path.exists(filenameII):
dataI = np.load(filenameI)
detailed_predictions_I = dataI['detailed_predictions']
chosen_window_indices_I = dataI['chosen_window_indices']
dataII = np.load(filenameII)
detailed_predictions_II = dataII['detailed_predictions']
chosen_window_indices_II = dataII['chosen_window_indices']
for p, window_index_I, window_index_II, local_detailed_preds_I, local_detailed_preds_II in zip(range(0, conf.n_participants), chosen_window_indices_I, chosen_window_indices_II, detailed_predictions_I, detailed_predictions_II):
maskI = window_masks_df[(window_masks_df.annotation == annotation_value_I) &
(window_masks_df.participant == p) &
(window_masks_df['window size index'] == window_index_I) &
(window_masks_df.subtask == subtaskI)
].as_matrix(columns=['mask'])[0][0]
maskII = window_masks_df[(window_masks_df.annotation == annotation_value_II) &
(window_masks_df.participant == p) &
(window_masks_df['window size index'] == window_index_II) &
(window_masks_df.subtask == subtaskII)
].as_matrix(columns=['mask'])[0][0]
predictions_I[p, trait, si] = get_majority_vote(np.array(local_detailed_preds_I)[maskI])
predictions_II[p, trait, si] = get_majority_vote(np.array(local_detailed_preds_II)[maskII])
else:
print('did not find', filenameI, 'or', filenameII)
sys.exit(1)
np.savez(result_filename, predictions_I=predictions_I, predictions_II=predictions_II)
else:
data = np.load(result_filename)
predictions_I = data['predictions_I']
predictions_II = data['predictions_II']
# predictions_I are predictions from one context, predictions_II is the other context
# compute their average correlation and write it to file
for t in range(0, conf.n_traits):
corrI = get_average_correlation(predictions_I[:, t, :], predictions_II[:, t, :], 100)
f.write(','+'%.2f'%corrI)
f.write('\n')
| [
"pandas.DataFrame",
"os.mkdir",
"numpy.load",
"numpy.sum",
"numpy.argmax",
"os.path.exists",
"numpy.zeros",
"numpy.genfromtxt",
"numpy.array",
"numpy.logical_or",
"sys.exit",
"numpy.savez",
"numpy.unique"
] | [((1335, 1377), 'numpy.unique', 'np.unique', (['predictions'], {'return_counts': '(True)'}), '(predictions, return_counts=True)\n', (1344, 1377), True, 'import numpy as np\n'), ((1388, 1405), 'numpy.argmax', 'np.argmax', (['counts'], {}), '(counts)\n', (1397, 1405), True, 'import numpy as np\n'), ((4502, 4611), 'pandas.DataFrame', 'pd.DataFrame', (['window_masks'], {'columns': "['annotation', 'participant', 'window size index', 'subtask', 'mask']"}), "(window_masks, columns=['annotation', 'participant',\n 'window size index', 'subtask', 'mask'])\n", (4514, 4611), True, 'import pandas as pd\n'), ((2550, 2584), 'os.path.exists', 'os.path.exists', (['conf.figure_folder'], {}), '(conf.figure_folder)\n', (2564, 2584), False, 'import os, sys\n'), ((2594, 2622), 'os.mkdir', 'os.mkdir', (['conf.figure_folder'], {}), '(conf.figure_folder)\n', (2602, 2622), False, 'import os, sys\n'), ((1739, 1759), 'numpy.sum', 'np.sum', (['predA[:, si]'], {}), '(predA[:, si])\n', (1745, 1759), True, 'import numpy as np\n'), ((2396, 2418), 'numpy.array', 'np.array', (['correlations'], {}), '(correlations)\n', (2404, 2418), True, 'import numpy as np\n'), ((5275, 5306), 'os.path.exists', 'os.path.exists', (['result_filename'], {}), '(result_filename)\n', (5289, 5306), False, 'import os, sys\n'), ((5556, 5630), 'numpy.zeros', 'np.zeros', (['(conf.n_participants, conf.n_traits, conf.max_n_iter)'], {'dtype': 'int'}), '((conf.n_participants, conf.n_traits, conf.max_n_iter), dtype=int)\n', (5564, 5630), True, 'import numpy as np\n'), ((5664, 5738), 'numpy.zeros', 'np.zeros', (['(conf.n_participants, conf.n_traits, conf.max_n_iter)'], {'dtype': 'int'}), '((conf.n_participants, conf.n_traits, conf.max_n_iter), dtype=int)\n', (5672, 5738), True, 'import numpy as np\n'), ((8301, 8391), 'numpy.savez', 'np.savez', (['result_filename'], {'predictions_I': 'predictions_I', 'predictions_II': 'predictions_II'}), '(result_filename, predictions_I=predictions_I, predictions_II=\n predictions_II)\n', (8309, 8391), True, 'import numpy as np\n'), ((8428, 8452), 'numpy.load', 'np.load', (['result_filename'], {}), '(result_filename)\n', (8435, 8452), True, 'import numpy as np\n'), ((1991, 2011), 'numpy.sum', 'np.sum', (['predA[:, si]'], {}), '(predA[:, si])\n', (1997, 2011), True, 'import numpy as np\n'), ((2020, 2040), 'numpy.sum', 'np.sum', (['predB[:, si]'], {}), '(predB[:, si])\n', (2026, 2040), True, 'import numpy as np\n'), ((2293, 2309), 'numpy.array', 'np.array', (['[A, B]'], {}), '([A, B])\n', (2301, 2309), True, 'import numpy as np\n'), ((3115, 3167), 'numpy.genfromtxt', 'np.genfromtxt', (['id_file'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(id_file, delimiter=',', skip_header=1)\n", (3128, 3167), True, 'import numpy as np\n'), ((4359, 4482), 'numpy.logical_or', 'np.logical_or', (['(ids_ws_p[:, 1] == conf.time_window_annotation_wayI)', '(ids_ws_p[:, 1] == conf.time_window_annotation_wayII)'], {}), '(ids_ws_p[:, 1] == conf.time_window_annotation_wayI, ids_ws_p[\n :, 1] == conf.time_window_annotation_wayII)\n', (4372, 4482), True, 'import numpy as np\n'), ((6113, 6138), 'os.path.exists', 'os.path.exists', (['filenameI'], {}), '(filenameI)\n', (6127, 6138), False, 'import os, sys\n'), ((6143, 6169), 'os.path.exists', 'os.path.exists', (['filenameII'], {}), '(filenameII)\n', (6157, 6169), False, 'import os, sys\n'), ((6207, 6225), 'numpy.load', 'np.load', (['filenameI'], {}), '(filenameI)\n', (6214, 6225), True, 'import numpy as np\n'), ((6432, 6451), 'numpy.load', 'np.load', (['filenameII'], {}), '(filenameII)\n', (6439, 6451), True, 'import numpy as np\n'), ((8273, 8284), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8281, 8284), False, 'import os, sys\n'), ((7971, 8003), 'numpy.array', 'np.array', (['local_detailed_preds_I'], {}), '(local_detailed_preds_I)\n', (7979, 8003), True, 'import numpy as np\n'), ((8093, 8126), 'numpy.array', 'np.array', (['local_detailed_preds_II'], {}), '(local_detailed_preds_II)\n', (8101, 8126), True, 'import numpy as np\n')] |
import numpy as np
class Loss:
def mean_squared_error(self, y, t):
return 0.5 * np.sum((y-t)**2)
def cross_entropy_error(self, y, t):
if y.ndim == 1:
t = t.reshape(1, t.size)
y = y.reshape(1, y.size)
# 훈련 데이터가 원-핫 벡터라면 정답 레이블의 인덱스로 반환
if t.size == y.size:
t = t.argmax(axis=1)
batch_size = y.shape[0]
return -np.sum(np.log(y[np.arange(batch_size), t] + 1e-7)) / batch_size
def softmax_loss(self, X, t):
y = self.softmax(X)
return self.cross_entropy_error(y, t) | [
"numpy.sum",
"numpy.arange"
] | [((94, 114), 'numpy.sum', 'np.sum', (['((y - t) ** 2)'], {}), '((y - t) ** 2)\n', (100, 114), True, 'import numpy as np\n'), ((450, 471), 'numpy.arange', 'np.arange', (['batch_size'], {}), '(batch_size)\n', (459, 471), True, 'import numpy as np\n')] |
from __future__ import print_function
from . import utils, optimize, base
import numpy as np
from .ODL import ODL
class UpdateXc(optimize.Fista):
"""
Update Xc in COPAR (page 189-190 COPAR)
see COPAR paper:
http://www.cs.zju.edu.cn/people/wangdh/papers/draft_ECCV12_particularity.pdf
cost = normF2(Yc - D*Xc) + normF2(Yc - DcXcc - DCp1*XCp1c) +
sum_{i \neq c, 1 \leq i \leq C} normF2(Xic);
-----------------------------------------------
Author: <NAME>, <EMAIL>, 5/12/2016
(http://www.personal.psu.edu/thv102/)
-----------------------------------------------
"""
def __init__(self, D, D_range_ext, Y, Y_range, lambd, iterations = 100):
self.D = D
self.lambd = lambd
self.DtD = np.dot(self.D.T, self.D)
self.Y = Y
self.Y_range = Y_range
self.nclass = len(D_range_ext) - 2
self.DtY = np.dot(D.T, Y)
self.DCp1 = utils.get_block_col(D, self.nclass, D_range_ext)
self.DCp1tDCp1 = np.dot(self.DCp1.T, self.DCp1)
self.D_range_ext = D_range_ext
self.k0 = D_range_ext[-1] - D_range_ext[-2]
if self.k0 > 0:
self.L = utils.max_eig(self.DtD) + utils.max_eig(self.DCp1tDCp1)
else:
self.L = utils.max_eig(self.DtD)
self.c = -1
self.DCp1 = utils.get_block_col(D, self.nclass, self.D_range_ext)
def set_class(self, c):
self.c = c
self.Yc = utils.get_block_col(self.Y, c, self.Y_range)
self.Dc = utils.get_block_col(self.D, c, self.D_range_ext)
self.DctDc = utils.get_block(self.DtD, c, c, self.D_range_ext, self.D_range_ext)
self.DCp1tDc = utils.get_block(
self.DtD, self.nclass, c, self.D_range_ext, self.D_range_ext)
self.DtYc = utils.get_block_col(self.DtY, c, self.Y_range)
self.DtYc2 = self.DtYc.copy()
self.DtYc2[self.D_range_ext[c]:self.D_range_ext[c+1], :] = \
2*self.DtYc[self.D_range_ext[c]: self.D_range_ext[c+1], :]
self.DtYc2[self.D_range_ext[-2]:self.D_range_ext[-1], :] = \
2*self.DtYc[self.D_range_ext[-2]:self.D_range_ext[-1], :]
def _grad(self, Xc0):
Xc = Xc0.copy()
c = self.c
g0 = np.dot(self.DtD, Xc)
Xcc = utils.get_block_row(Xc, self.c, self.D_range_ext)
XCp1c = utils.get_block_row(Xc, self.nclass, self.D_range_ext)
if self.k0 > 0:
Xc[self.D_range_ext[c]: self.D_range_ext[c+1], :] = \
np.dot(self.DctDc, Xcc) + np.dot(self.DCp1tDc.T, XCp1c)
Xc[self.D_range_ext[-2]: self.D_range_ext[-1], :] = \
np.dot(self.DCp1tDCp1, XCp1c) + np.dot(self.DCp1tDc, Xcc)
else:
Xc[self.D_range_ext[c]: self.D_range_ext[c+1], :] = np.dot(self.DctDc, Xcc)
return g0 + Xc - self.DtYc2
def _calc_f(self, Xc):
"""
optimize later
"""
Xcc = utils.get_block_row(Xc, self.c, self.D_range_ext)
XCp1c = utils.get_block_row(Xc, self.nclass, self.D_range_ext)
cost = utils.normF2(self.Yc - np.dot(self.D, Xc))
cost += utils.normF2(self.Yc - np.dot(self.Dc, Xcc) - np.dot(self.DCp1, XCp1c))
for i in range(self.nclass):
if i != self.c:
Xic = utils.get_block_row(Xc, i, self.D_range_ext)
cost += utils.normF2(Xic)
return .5*cost
def lossF(self, Xc):
return self._calc_f(Xc) + utils.norm1(Xc)
class COPAR(base.BaseModel):
def __init__(self, k, k0, lambd = 0.01, eta = 0.0001, updateX_iters = 100, updateD_iters = 100):
self.k = k
self.k0 = k0
self.lambd = lambd
self.eta = eta
self.D = None
self.X = None
self.Y = None
self.updateX_iters = updateX_iters
self.updateD_iters = updateD_iters
pass
def _getYc(self, c):
return utils.get_block_col(self.Y, c, self.Y_range)
def _getDc(self, c):
return utils.get_block_col(self.D, c, self.D_range_ext)
def loss(self):
"""
cost = COPAR_cost(Y, Y_range, D, D_range_ext, X, opts):
Calculating cost function of COPAR with parameters lambda and eta are
stored in `opts.lambda` and `opts.rho`.
`f(D, X) = 0.5*sum_{c=1}^C 05*||Y - DX||_F^2 +
sum_{c=1}^C ( ||Y_c - D_Cp1 X^Cp1_c - D_c X_c^c||F^2 +
sum_{i != c}||X^i_c||_F^2) + lambda*||X||_1 +
0.5*eta*sum_{i \neq c}||Di^T*Dc||_F^2`
-----------------------------------------------
Author: <NAME>, <EMAIL>, 5/11/2016
(http://www.personal.psu.edu/thv102/)
-----------------------------------------------
"""
cost = self.lambd*utils.norm1(self.X)
cost1 = utils.normF2(self.Y - np.dot(self.D, self.X))
DCp1 = self._getDc(self.nclass)
for c in range(self.nclass):
Dc = self._getDc(c)
Yc = self._getYc(c)
Xc = utils.get_block_col(self.X, c, self.Y_range)
Xcc = utils.get_block_row(Xc, c, self.D_range_ext)
XCp1c = utils.get_block_row(Xc, self.nclass, self.D_range_ext)
cost1 += utils.normF2(Yc - np.dot(Dc, Xcc) - np.dot(DCp1, XCp1c))
XX = Xc[: self.D_range_ext[-2], :]
XX = np.delete(XX, list(range(self.D_range_ext[c], self.D_range_ext[c+1])), axis=0)
cost1 += utils.normF2(XX)
cost += cost1 + .5*self.eta*utils.normF2(
utils.erase_diagonal_blocks(np.dot(self.D.T, self.D),
self.D_range_ext,
self.D_range_ext))
return cost
def fit(self, Y, label_train, iterations=100, verbose=False, show_after=5):
self.Y = Y
del Y
self.Y_range = utils.label_to_range(label_train)
self.nclass = len(self.Y_range) - 1
D_range = [self.k*i for i in range(self.nclass+1)]
self.D_range_ext = D_range + [self.k*self.nclass + self.k0]
# init
if verbose:
print('initializing ... ')
self._initialize()
if verbose:
print('initialization cost = %.4f'%self.loss())
for it in range(iterations):
self._updateD()
self._updateX()
if verbose and (it == 0 or (it + 1) % show_after == 0):
print('iter \t%3d/%3d \t loss %.4f'%(it+1, iterations, self.loss()))
def _initialize(self):
self.D = np.zeros((self.Y.shape[0], self.D_range_ext[-1]))
self.X = np.zeros((self.D_range_ext[-1], self.Y.shape[1]))
for c in range(self.nclass):
clf = ODL(k=self.k, lambd=self.lambd)
clf.fit(self._getYc(c))
self.D[:, self.D_range_ext[c]:self.D_range_ext[c+1]] = clf.D
self.X[self.D_range_ext[c]:self.D_range_ext[c+1], \
self.Y_range[c]:self.Y_range[c+1]] = clf.X
if self.k0 > 0:
clf = ODL(k=self.k0, lambd=self.lambd)
clf.fit(self.Y)
self.D[:, self.D_range_ext[-2]:self.D_range_ext[-1]] = clf.D
self.X[self.D_range_ext[-2]:self.D_range_ext[-1]]
def _updateX(self):
updatxc = UpdateXc(
self.D, self.D_range_ext, self.Y, self.Y_range, self.lambd, iterations=100)
for c in range(self.nclass):
updatxc.set_class(c)
Xc = utils.get_block_col(self.X, c, self.Y_range)
# updatxc.check_grad(Xc)
self.X[:, self.Y_range[c]: self.Y_range[c+1]] = updatxc.solve(Xinit=Xc)
def _updateD(self):
Yhat = np.zeros_like(self.Y)
DCp1 = self._getDc(self.nclass)
for c in range(self.nclass):
Dc_range = list(range(self.D_range_ext[c], self.D_range_ext[c+1]))
Yc_range = list(range(self.Y_range[c], self.Y_range[c+1]))
Yc = self._getYc(c)
Dc = self._getDc(c)
Xc = utils.get_block_col(self.X, c, self.Y_range)
Xcc = utils.get_block_row(Xc, c, self.D_range_ext)
XCp1c = utils.get_block_row(Xc, self.nclass, self.D_range_ext)
Ychat = Yc - np.dot(self.D, Xc) + np.dot(Dc, Xcc)
Ycbar = Yc - np.dot(DCp1, XCp1c)
E = np.dot(Ychat + Ycbar, Xcc.T)
F = 2*np.dot(Xcc, Xcc.T)
A = self.D.copy()
A = np.delete(A, Dc_range, axis=1)
self.D[:,Dc_range] = optimize.DLSI_updateD(Dc, E, F, A.T, self.eta)
Yhat[:, Yc_range] = Yc - np.dot(self.D[:, Dc_range], Xcc)
## DCp1
XCp1 = utils.get_block_row(self.X, self.nclass, self.D_range_ext)
Ybar = self.Y - np.dot(self.D[:, : self.D_range_ext[-2]],
self.X[: self.D_range_ext[-2], :])
E = np.dot(Ybar + Yhat, XCp1.T)
F = 2*np.dot(XCp1, XCp1.T)
A = self.D[:, : self.D_range_ext[-2]]
DCp1_range = list(range(self.D_range_ext[-2], self.D_range_ext[-1]))
self.D[:, DCp1_range] = optimize.DLSI_updateD(self.D[:, DCp1_range], E, F, A.T, self.eta)
def predict(self, Y):
E = np.zeros((self.nclass, Y.shape[1]))
for c in range(self.nclass):
Dc = self._getDc(c)
lasso = optimize.Lasso(Dc, self.lambd)
lasso.fit(Y)
Xc = lasso.solve()
R1 = Y - np.dot(Dc, Xc)
E[c, :] = 0.5*(R1*R1).sum(axis=0) + self.lambd*abs(Xc).sum(axis=0)
return np.argmin(E, axis=0) + 1
def mini_test_unit():
print('\n================================================================')
print('Mini Unit test: COPAR')
dataset = 'myYaleB'
N_train = 5
Y_train, Y_test, label_train, label_test = utils.train_test_split(dataset, N_train)
clf = COPAR(k=4, k0=5, lambd=0.001, eta=0.01)
clf.fit(Y_train, label_train, iterations=10, verbose=True)
clf.evaluate(Y_test, label_test)
def test_unit():
print('\n================================================================')
print('Mini Unit test: COPAR')
dataset = 'myYaleB'
N_train = 15
Y_train, Y_test, label_train, label_test = utils.train_test_split(dataset, N_train)
clf = COPAR(k=10, k0=5, lambd=0.001, eta=0.01)
clf.fit(Y_train, label_train, iterations=100, verbose=True)
clf.evaluate(Y_test, label_test)
if __name__ == '__main__':
mini_test_unit()
| [
"numpy.zeros_like",
"numpy.zeros",
"numpy.argmin",
"numpy.dot",
"numpy.delete"
] | [((771, 795), 'numpy.dot', 'np.dot', (['self.D.T', 'self.D'], {}), '(self.D.T, self.D)\n', (777, 795), True, 'import numpy as np\n'), ((908, 922), 'numpy.dot', 'np.dot', (['D.T', 'Y'], {}), '(D.T, Y)\n', (914, 922), True, 'import numpy as np\n'), ((1017, 1047), 'numpy.dot', 'np.dot', (['self.DCp1.T', 'self.DCp1'], {}), '(self.DCp1.T, self.DCp1)\n', (1023, 1047), True, 'import numpy as np\n'), ((2243, 2263), 'numpy.dot', 'np.dot', (['self.DtD', 'Xc'], {}), '(self.DtD, Xc)\n', (2249, 2263), True, 'import numpy as np\n'), ((6505, 6554), 'numpy.zeros', 'np.zeros', (['(self.Y.shape[0], self.D_range_ext[-1])'], {}), '((self.Y.shape[0], self.D_range_ext[-1]))\n', (6513, 6554), True, 'import numpy as np\n'), ((6572, 6621), 'numpy.zeros', 'np.zeros', (['(self.D_range_ext[-1], self.Y.shape[1])'], {}), '((self.D_range_ext[-1], self.Y.shape[1]))\n', (6580, 6621), True, 'import numpy as np\n'), ((7616, 7637), 'numpy.zeros_like', 'np.zeros_like', (['self.Y'], {}), '(self.Y)\n', (7629, 7637), True, 'import numpy as np\n'), ((8779, 8806), 'numpy.dot', 'np.dot', (['(Ybar + Yhat)', 'XCp1.T'], {}), '(Ybar + Yhat, XCp1.T)\n', (8785, 8806), True, 'import numpy as np\n'), ((9102, 9137), 'numpy.zeros', 'np.zeros', (['(self.nclass, Y.shape[1])'], {}), '((self.nclass, Y.shape[1]))\n', (9110, 9137), True, 'import numpy as np\n'), ((2779, 2802), 'numpy.dot', 'np.dot', (['self.DctDc', 'Xcc'], {}), '(self.DctDc, Xcc)\n', (2785, 2802), True, 'import numpy as np\n'), ((8252, 8280), 'numpy.dot', 'np.dot', (['(Ychat + Ycbar)', 'Xcc.T'], {}), '(Ychat + Ycbar, Xcc.T)\n', (8258, 8280), True, 'import numpy as np\n'), ((8364, 8394), 'numpy.delete', 'np.delete', (['A', 'Dc_range'], {'axis': '(1)'}), '(A, Dc_range, axis=1)\n', (8373, 8394), True, 'import numpy as np\n'), ((8659, 8733), 'numpy.dot', 'np.dot', (['self.D[:, :self.D_range_ext[-2]]', 'self.X[:self.D_range_ext[-2], :]'], {}), '(self.D[:, :self.D_range_ext[-2]], self.X[:self.D_range_ext[-2], :])\n', (8665, 8733), True, 'import numpy as np\n'), ((8821, 8841), 'numpy.dot', 'np.dot', (['XCp1', 'XCp1.T'], {}), '(XCp1, XCp1.T)\n', (8827, 8841), True, 'import numpy as np\n'), ((9444, 9464), 'numpy.argmin', 'np.argmin', (['E'], {'axis': '(0)'}), '(E, axis=0)\n', (9453, 9464), True, 'import numpy as np\n'), ((2505, 2528), 'numpy.dot', 'np.dot', (['self.DctDc', 'Xcc'], {}), '(self.DctDc, Xcc)\n', (2511, 2528), True, 'import numpy as np\n'), ((2531, 2560), 'numpy.dot', 'np.dot', (['self.DCp1tDc.T', 'XCp1c'], {}), '(self.DCp1tDc.T, XCp1c)\n', (2537, 2560), True, 'import numpy as np\n'), ((2643, 2672), 'numpy.dot', 'np.dot', (['self.DCp1tDCp1', 'XCp1c'], {}), '(self.DCp1tDCp1, XCp1c)\n', (2649, 2672), True, 'import numpy as np\n'), ((2675, 2700), 'numpy.dot', 'np.dot', (['self.DCp1tDc', 'Xcc'], {}), '(self.DCp1tDc, Xcc)\n', (2681, 2700), True, 'import numpy as np\n'), ((3087, 3105), 'numpy.dot', 'np.dot', (['self.D', 'Xc'], {}), '(self.D, Xc)\n', (3093, 3105), True, 'import numpy as np\n'), ((3169, 3193), 'numpy.dot', 'np.dot', (['self.DCp1', 'XCp1c'], {}), '(self.DCp1, XCp1c)\n', (3175, 3193), True, 'import numpy as np\n'), ((4812, 4834), 'numpy.dot', 'np.dot', (['self.D', 'self.X'], {}), '(self.D, self.X)\n', (4818, 4834), True, 'import numpy as np\n'), ((8175, 8190), 'numpy.dot', 'np.dot', (['Dc', 'Xcc'], {}), '(Dc, Xcc)\n', (8181, 8190), True, 'import numpy as np\n'), ((8216, 8235), 'numpy.dot', 'np.dot', (['DCp1', 'XCp1c'], {}), '(DCp1, XCp1c)\n', (8222, 8235), True, 'import numpy as np\n'), ((8299, 8317), 'numpy.dot', 'np.dot', (['Xcc', 'Xcc.T'], {}), '(Xcc, Xcc.T)\n', (8305, 8317), True, 'import numpy as np\n'), ((8512, 8544), 'numpy.dot', 'np.dot', (['self.D[:, Dc_range]', 'Xcc'], {}), '(self.D[:, Dc_range], Xcc)\n', (8518, 8544), True, 'import numpy as np\n'), ((9335, 9349), 'numpy.dot', 'np.dot', (['Dc', 'Xc'], {}), '(Dc, Xc)\n', (9341, 9349), True, 'import numpy as np\n'), ((3146, 3166), 'numpy.dot', 'np.dot', (['self.Dc', 'Xcc'], {}), '(self.Dc, Xcc)\n', (3152, 3166), True, 'import numpy as np\n'), ((5235, 5254), 'numpy.dot', 'np.dot', (['DCp1', 'XCp1c'], {}), '(DCp1, XCp1c)\n', (5241, 5254), True, 'import numpy as np\n'), ((8154, 8172), 'numpy.dot', 'np.dot', (['self.D', 'Xc'], {}), '(self.D, Xc)\n', (8160, 8172), True, 'import numpy as np\n'), ((5217, 5232), 'numpy.dot', 'np.dot', (['Dc', 'Xcc'], {}), '(Dc, Xcc)\n', (5223, 5232), True, 'import numpy as np\n'), ((5528, 5552), 'numpy.dot', 'np.dot', (['self.D.T', 'self.D'], {}), '(self.D.T, self.D)\n', (5534, 5552), True, 'import numpy as np\n')] |
import numpy as np
import os
import tensorflow as tf
import urllib.request
import matplotlib.pyplot as plt
# TensorFlow-Slim aka TF-Slim
# https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim/python/slim
# https://github.com/tensorflow/models/tree/master/research/slim
# use slim from contrib
import tensorflow.contrib.slim as slim
# use slim nets from contrib rather than research/slim
from tensorflow.contrib.slim.nets import vgg
# use datasets and preprocess from research/slim as not in contrib
from datasets import imagenet
# Load the mean pixel values and the function that performs the subtraction
# Note the access to protected members of preprocessing/vgg_preprocessing.py !
from preprocessing.vgg_preprocessing import (_mean_image_subtraction, _R_MEAN, _G_MEAN, _B_MEAN)
def discrete_matshow(data, labels_names=[], title=""):
"""Function to nicely print segmentation results with colorbar showing class names"""
fig, ax = plt.subplots(figsize=(12, 5), dpi=100)
# get discrete colormap
cmap = plt.get_cmap('Paired', np.max(data) - np.min(data) + 1)
# set limits .5 outside true range
mat = ax.matshow(data,
cmap=cmap,
vmin=np.min(data) - .5,
vmax=np.max(data) + .5)
# tell the colorbar to tick at integers
cbar = fig.colorbar(mat, ticks=np.arange(np.min(data), np.max(data) + 1))
# The names to be printed aside the colorbar
if labels_names:
cbar.ax.set_yticklabels(labels_names)
if title:
fig.suptitle(title, fontsize=14, fontweight='bold')
plt.subplots_adjust(right=0.5)
plt.show()
with tf.Graph().as_default():
image_string = tf.read_file('test_image.jpg')
image = tf.image.decode_jpeg(image_string, channels=3)
# Convert image to float32 before subtracting the
# mean pixel value
image_float = tf.to_float(image, name='ToFloat')
# Subtract the mean pixel value from each pixel
processed_image = _mean_image_subtraction(image_float, [_R_MEAN, _G_MEAN, _B_MEAN])
input_image = tf.expand_dims(processed_image, 0)
with slim.arg_scope(vgg.vgg_arg_scope()):
# spatial_squeeze option enables to use network in a fully
# convolutional manner
logits, _ = vgg.vgg_16(input_image,
num_classes=1000,
is_training=False,
spatial_squeeze=False)
# For each pixel we get predictions for each class
# out of 1000. We need to pick the one with the highest
# probability. To be more precise, these are not probabilities,
# because we didn't apply softmax. But if we pick a class
# with the highest value it will be equivalent to picking
# the highest value after applying softmax
pred = tf.argmax(logits, dimension=3)
checkpoints_dir = 'slim_pretrained'
init_fn = slim.assign_from_checkpoint_fn(
os.path.join(checkpoints_dir, 'vgg_16.ckpt'),
slim.get_model_variables('vgg_16'))
with tf.Session() as sess:
init_fn(sess)
segmentation, np_image = sess.run([pred, image])
# Remove the first empty dimension
segmentation = np.squeeze(segmentation)
# Let's get unique predicted classes (from 0 to 1000) and
# re-label the original predictions so that classes are
# numerated starting from zero
unique_classes, relabeled_image = np.unique(segmentation, return_inverse=True)
segmentation_size = segmentation.shape
relabeled_image = relabeled_image.reshape(segmentation_size)
labels_names = []
names = imagenet.create_readable_names_for_imagenet_labels()
for index, current_class_number in enumerate(unique_classes):
labels_names.append(str(index) + ' ' + names[current_class_number + 1])
# Show the image
plt.figure()
plt.imshow(np_image.astype(np.uint8))
plt.suptitle("Input Image", fontsize=14, fontweight='bold')
plt.axis('off')
plt.show()
# display the segmentation
discrete_matshow(data=relabeled_image, labels_names=labels_names, title="Segmentation")
| [
"matplotlib.pyplot.suptitle",
"tensorflow.contrib.slim.nets.vgg.vgg_16",
"matplotlib.pyplot.figure",
"os.path.join",
"numpy.unique",
"datasets.imagenet.create_readable_names_for_imagenet_labels",
"numpy.max",
"tensorflow.to_float",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"tensorf... | [((3214, 3238), 'numpy.squeeze', 'np.squeeze', (['segmentation'], {}), '(segmentation)\n', (3224, 3238), True, 'import numpy as np\n'), ((3419, 3463), 'numpy.unique', 'np.unique', (['segmentation'], {'return_inverse': '(True)'}), '(segmentation, return_inverse=True)\n', (3428, 3463), True, 'import numpy as np\n'), ((3593, 3645), 'datasets.imagenet.create_readable_names_for_imagenet_labels', 'imagenet.create_readable_names_for_imagenet_labels', ([], {}), '()\n', (3643, 3645), False, 'from datasets import imagenet\n'), ((3803, 3815), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3813, 3815), True, 'import matplotlib.pyplot as plt\n'), ((3854, 3913), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['"""Input Image"""'], {'fontsize': '(14)', 'fontweight': '"""bold"""'}), "('Input Image', fontsize=14, fontweight='bold')\n", (3866, 3913), True, 'import matplotlib.pyplot as plt\n'), ((3914, 3929), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3922, 3929), True, 'import matplotlib.pyplot as plt\n'), ((3930, 3940), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3938, 3940), True, 'import matplotlib.pyplot as plt\n'), ((971, 1009), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 5)', 'dpi': '(100)'}), '(figsize=(12, 5), dpi=100)\n', (983, 1009), True, 'import matplotlib.pyplot as plt\n'), ((1615, 1645), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'right': '(0.5)'}), '(right=0.5)\n', (1634, 1645), True, 'import matplotlib.pyplot as plt\n'), ((1650, 1660), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1658, 1660), True, 'import matplotlib.pyplot as plt\n'), ((1712, 1742), 'tensorflow.read_file', 'tf.read_file', (['"""test_image.jpg"""'], {}), "('test_image.jpg')\n", (1724, 1742), True, 'import tensorflow as tf\n'), ((1755, 1801), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image_string'], {'channels': '(3)'}), '(image_string, channels=3)\n', (1775, 1801), True, 'import tensorflow as tf\n'), ((1898, 1932), 'tensorflow.to_float', 'tf.to_float', (['image'], {'name': '"""ToFloat"""'}), "(image, name='ToFloat')\n", (1909, 1932), True, 'import tensorflow as tf\n'), ((2008, 2073), 'preprocessing.vgg_preprocessing._mean_image_subtraction', '_mean_image_subtraction', (['image_float', '[_R_MEAN, _G_MEAN, _B_MEAN]'], {}), '(image_float, [_R_MEAN, _G_MEAN, _B_MEAN])\n', (2031, 2073), False, 'from preprocessing.vgg_preprocessing import _mean_image_subtraction, _R_MEAN, _G_MEAN, _B_MEAN\n'), ((2093, 2127), 'tensorflow.expand_dims', 'tf.expand_dims', (['processed_image', '(0)'], {}), '(processed_image, 0)\n', (2107, 2127), True, 'import tensorflow as tf\n'), ((2836, 2866), 'tensorflow.argmax', 'tf.argmax', (['logits'], {'dimension': '(3)'}), '(logits, dimension=3)\n', (2845, 2866), True, 'import tensorflow as tf\n'), ((2293, 2380), 'tensorflow.contrib.slim.nets.vgg.vgg_16', 'vgg.vgg_16', (['input_image'], {'num_classes': '(1000)', 'is_training': '(False)', 'spatial_squeeze': '(False)'}), '(input_image, num_classes=1000, is_training=False,\n spatial_squeeze=False)\n', (2303, 2380), False, 'from tensorflow.contrib.slim.nets import vgg\n'), ((2962, 3006), 'os.path.join', 'os.path.join', (['checkpoints_dir', '"""vgg_16.ckpt"""'], {}), "(checkpoints_dir, 'vgg_16.ckpt')\n", (2974, 3006), False, 'import os\n'), ((3016, 3050), 'tensorflow.contrib.slim.get_model_variables', 'slim.get_model_variables', (['"""vgg_16"""'], {}), "('vgg_16')\n", (3040, 3050), True, 'import tensorflow.contrib.slim as slim\n'), ((3062, 3074), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3072, 3074), True, 'import tensorflow as tf\n'), ((1668, 1678), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1676, 1678), True, 'import tensorflow as tf\n'), ((2153, 2172), 'tensorflow.contrib.slim.nets.vgg.vgg_arg_scope', 'vgg.vgg_arg_scope', ([], {}), '()\n', (2170, 2172), False, 'from tensorflow.contrib.slim.nets import vgg\n'), ((1073, 1085), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (1079, 1085), True, 'import numpy as np\n'), ((1088, 1100), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (1094, 1100), True, 'import numpy as np\n'), ((1231, 1243), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (1237, 1243), True, 'import numpy as np\n'), ((1276, 1288), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (1282, 1288), True, 'import numpy as np\n'), ((1385, 1397), 'numpy.min', 'np.min', (['data'], {}), '(data)\n', (1391, 1397), True, 'import numpy as np\n'), ((1399, 1411), 'numpy.max', 'np.max', (['data'], {}), '(data)\n', (1405, 1411), True, 'import numpy as np\n')] |
import numpy as np
from NN1 import NN1
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from nn_utils import mate, Type, sort_by_fittest
import os
class gdnn:
def __init__(self, nlayers):
self.nlayers = 1
@classmethod
def read_dataset(self, dbpath, size):
path = os.path.dirname(os.path.abspath(__file__))
dataset = np.loadtxt(
path +
dbpath,
delimiter=",",
skiprows=1,
usecols=range(
1,
180))[
0:size]
neurons = dataset.shape[1] - 1
X = dataset[:, 0:neurons]
Y = dataset[:, neurons].reshape(X.__len__(), 1)
Y[Y > 1] = 0
maxn = 100 # np.matrix(X).maxn()
# Improving gradient descent through feature scaling
X = 2 * X / float(maxn) - 1
return shuffle(X, Y, random_state=1)
@classmethod
def process(self, X, Y):
train_x, test_x, train_y, test_y = train_test_split(
X, Y, test_size=0.2, random_state=1)
epochs = 600
best_n_children = 4
population_size = 10
gen = {}
generations = 10
# Generate a poblation of neural networks each trained from a random starting weigth
# ordered by the best performers (low error)
init_pob = [NN1(train_x, train_y, test_x, test_y, epochs)
for i in range(population_size)]
init_pob = sort_by_fittest([(nn.get_error(), nn)
for nn in init_pob], Type.error)
print("600,{}".format(init_pob[0][1].get_error()))
gen[0] = init_pob
result = []
for x in range(1, generations):
population = []
for i in range(population_size):
parent1 = gen[x -
1][np.random.randint(best_n_children)][1].get_weight()
parent2 = gen[x -
1][np.random.randint(best_n_children)][1].get_weight()
w_child = mate(parent1, parent2)
aux = NN1(train_x, train_y, test_x, test_y, epochs, w_child)
population += [tuple((aux.get_error(), aux))]
gen[x] = sort_by_fittest(population, Type.error)
net = gen[x][0][1]
result.append(
"{},{},{}".format(
(x + 1) * epochs,
net.get_error(),
net.calc_accuracy(
test_x,
test_y)))
del population
return(result)
| [
"os.path.abspath",
"nn_utils.sort_by_fittest",
"nn_utils.mate",
"sklearn.model_selection.train_test_split",
"NN1.NN1",
"numpy.random.randint",
"sklearn.utils.shuffle"
] | [((888, 917), 'sklearn.utils.shuffle', 'shuffle', (['X', 'Y'], {'random_state': '(1)'}), '(X, Y, random_state=1)\n', (895, 917), False, 'from sklearn.utils import shuffle\n'), ((1008, 1061), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.2)', 'random_state': '(1)'}), '(X, Y, test_size=0.2, random_state=1)\n', (1024, 1061), False, 'from sklearn.model_selection import train_test_split\n'), ((348, 373), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (363, 373), False, 'import os\n'), ((1363, 1408), 'NN1.NN1', 'NN1', (['train_x', 'train_y', 'test_x', 'test_y', 'epochs'], {}), '(train_x, train_y, test_x, test_y, epochs)\n', (1366, 1408), False, 'from NN1 import NN1\n'), ((2255, 2294), 'nn_utils.sort_by_fittest', 'sort_by_fittest', (['population', 'Type.error'], {}), '(population, Type.error)\n', (2270, 2294), False, 'from nn_utils import mate, Type, sort_by_fittest\n'), ((2072, 2094), 'nn_utils.mate', 'mate', (['parent1', 'parent2'], {}), '(parent1, parent2)\n', (2076, 2094), False, 'from nn_utils import mate, Type, sort_by_fittest\n'), ((2117, 2171), 'NN1.NN1', 'NN1', (['train_x', 'train_y', 'test_x', 'test_y', 'epochs', 'w_child'], {}), '(train_x, train_y, test_x, test_y, epochs, w_child)\n', (2120, 2171), False, 'from NN1 import NN1\n'), ((1875, 1909), 'numpy.random.randint', 'np.random.randint', (['best_n_children'], {}), '(best_n_children)\n', (1892, 1909), True, 'import numpy as np\n'), ((1994, 2028), 'numpy.random.randint', 'np.random.randint', (['best_n_children'], {}), '(best_n_children)\n', (2011, 2028), True, 'import numpy as np\n')] |
import time
from numpy import cos, sqrt, sin, array, pi
from math import pi as π, exp
def Levy3(x):
time.sleep(0.01)
x = array(x)
n = len(x)
y = 1 + (x - 1) / 4
# calculate f(y(x))
term1 = sin(pi*y[0])**2
term3 = (y[n-1]-1)**2 *(1 + sin(2*pi*y[n-1]))**2
sum = 0
for x_i in y:
new = (x_i-1)**2 * (1+10*sin(pi*x_i+1)**2)
sum += new
return term1+term3+sum
def michalewicz(x, m=10):
x = array(x)
return -sum(sin(v)*sin(i*v**2/pi)**(2*m) for (i,v) in enumerate(x))
def ackley(x, a=20, b=0.2, c=2*π):
time.sleep(0.01)
x = array(x)
d = len(x)
return -a*exp(-b*sqrt(sum(x**2)/d)) - exp(sum(cos(c*xi) for xi in x)/d) + a + exp(1)
| [
"math.exp",
"time.sleep",
"numpy.sin",
"numpy.array",
"numpy.cos"
] | [((106, 122), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (116, 122), False, 'import time\n'), ((131, 139), 'numpy.array', 'array', (['x'], {}), '(x)\n', (136, 139), False, 'from numpy import cos, sqrt, sin, array, pi\n'), ((448, 456), 'numpy.array', 'array', (['x'], {}), '(x)\n', (453, 456), False, 'from numpy import cos, sqrt, sin, array, pi\n'), ((570, 586), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (580, 586), False, 'import time\n'), ((595, 603), 'numpy.array', 'array', (['x'], {}), '(x)\n', (600, 603), False, 'from numpy import cos, sqrt, sin, array, pi\n'), ((215, 229), 'numpy.sin', 'sin', (['(pi * y[0])'], {}), '(pi * y[0])\n', (218, 229), False, 'from numpy import cos, sqrt, sin, array, pi\n'), ((702, 708), 'math.exp', 'exp', (['(1)'], {}), '(1)\n', (705, 708), False, 'from math import pi as π, exp\n'), ((263, 285), 'numpy.sin', 'sin', (['(2 * pi * y[n - 1])'], {}), '(2 * pi * y[n - 1])\n', (266, 285), False, 'from numpy import cos, sqrt, sin, array, pi\n'), ((473, 479), 'numpy.sin', 'sin', (['v'], {}), '(v)\n', (476, 479), False, 'from numpy import cos, sqrt, sin, array, pi\n'), ((348, 365), 'numpy.sin', 'sin', (['(pi * x_i + 1)'], {}), '(pi * x_i + 1)\n', (351, 365), False, 'from numpy import cos, sqrt, sin, array, pi\n'), ((480, 500), 'numpy.sin', 'sin', (['(i * v ** 2 / pi)'], {}), '(i * v ** 2 / pi)\n', (483, 500), False, 'from numpy import cos, sqrt, sin, array, pi\n'), ((670, 681), 'numpy.cos', 'cos', (['(c * xi)'], {}), '(c * xi)\n', (673, 681), False, 'from numpy import cos, sqrt, sin, array, pi\n')] |
import numpy as np
from clustering_system.evaluator.UnsupervisedEvaluation import UnsupervisedEvaluation
from clustering_system.evaluator.measures import purity, purity2, rand_index, entropy, homogeneity, completeness, \
v_measure, mutual_information, normalized_mutual_information, normalized_mutual_information2, f1_measure, recall, \
precision, nv_measure
class SupervisedEvaluation(UnsupervisedEvaluation):
"""A class containing supervised clustering metrics."""
def __init__(self, clusters: np.ndarray, classes: np.ndarray, aic: float, bic: float, likelihood: float):
"""
:param clusters: The cluster assignments
:param classes: The class assignments
:param aic: The Akaike information criterion
:param bic: The Bayesian information criterion
:param likelihood: The log likelihood
"""
super().__init__(clusters, aic, bic, likelihood)
self.N = len(classes)
self.C = len(np.unique(classes))
self.purity = purity(clusters, classes)
self.purity2 = purity2(clusters, classes)
self.rand_index = rand_index(clusters, classes)
self.class_entropy = entropy(classes)
self.precision = precision(clusters, classes)
self.recall = recall(clusters, classes)
self.f1_measure = f1_measure(clusters, classes)
self.homogeneity = homogeneity(clusters, classes)
self.completeness = completeness(clusters, classes)
self.v_measure = v_measure(clusters, classes)
self.nv_measure = nv_measure(clusters, classes)
self.mutual_information = mutual_information(clusters, classes)
self.normalized_mutual_information = normalized_mutual_information(clusters, classes)
self.normalized_mutual_information2 = normalized_mutual_information2(clusters, classes)
@staticmethod
def get_attribute_names():
"""
Return class attribute names.
:return: A list of tuples (attribute name, attribute)
"""
return [
('AIC', 'aic'),
('BIC', 'bic'),
('likelihood', 'likelihood'),
('number of observations', 'N'),
('number of classes', 'C'),
('number of clusters', 'K'),
('purity', 'purity'),
('purity 2', 'purity2'),
('rand index', 'rand_index'),
('entropy (clusters)', 'cluster_entropy'),
('entropy (classes)', 'class_entropy'),
('precision', 'precision'),
('recall', 'recall'),
('F-measure', 'f1_measure'),
('homogeneity', 'homogeneity'),
('completeness', 'completeness'),
('V-Measure', 'v_measure'),
('NV-Measure', 'nv_measure'),
('mutual information', 'mutual_information'),
('normalized mutual information', 'normalized_mutual_information'),
('normalized mutual information 2', 'normalized_mutual_information2')
]
def __repr__(self) -> str:
return self.__str__()
def __str__(self) -> str:
string = 'SupervisedEvaluation {\n'
string += " AIC = %f,\n" % self.aic
string += " BIC = %f,\n" % self.bic
string += " likelihood = %f,\n" % self.likelihood
string += " number of observations = %d,\n" % self.N
string += " number of classes = %d,\n" % self.C
string += " number of clusters = %d,\n" % self.K
string += " purity = %f,\n" % self.purity
string += " purity 2 = %s,\n" % self.purity2
string += " rand index = %f,\n" % self.rand_index
string += " entropy (clusters) = %f,\n" % self.cluster_entropy
string += " entropy (classes) = %f,\n" % self.class_entropy
string += " homogeneity = %f,\n" % self.homogeneity
string += " completeness = %f,\n" % self.completeness
string += " V-Measure = %f,\n" % self.v_measure
string += " NV-Measure = %f,\n" % self.nv_measure
string += " mutual information = %f,\n" % self.mutual_information
string += " normalized mutual information = %f,\n" % self.normalized_mutual_information
string += " normalized mutual information 2 = %f \n" % self.normalized_mutual_information2
string += '}'
return string
| [
"clustering_system.evaluator.measures.f1_measure",
"clustering_system.evaluator.measures.entropy",
"clustering_system.evaluator.measures.normalized_mutual_information2",
"clustering_system.evaluator.measures.precision",
"clustering_system.evaluator.measures.v_measure",
"clustering_system.evaluator.measure... | [((1017, 1042), 'clustering_system.evaluator.measures.purity', 'purity', (['clusters', 'classes'], {}), '(clusters, classes)\n', (1023, 1042), False, 'from clustering_system.evaluator.measures import purity, purity2, rand_index, entropy, homogeneity, completeness, v_measure, mutual_information, normalized_mutual_information, normalized_mutual_information2, f1_measure, recall, precision, nv_measure\n'), ((1066, 1092), 'clustering_system.evaluator.measures.purity2', 'purity2', (['clusters', 'classes'], {}), '(clusters, classes)\n', (1073, 1092), False, 'from clustering_system.evaluator.measures import purity, purity2, rand_index, entropy, homogeneity, completeness, v_measure, mutual_information, normalized_mutual_information, normalized_mutual_information2, f1_measure, recall, precision, nv_measure\n'), ((1119, 1148), 'clustering_system.evaluator.measures.rand_index', 'rand_index', (['clusters', 'classes'], {}), '(clusters, classes)\n', (1129, 1148), False, 'from clustering_system.evaluator.measures import purity, purity2, rand_index, entropy, homogeneity, completeness, v_measure, mutual_information, normalized_mutual_information, normalized_mutual_information2, f1_measure, recall, precision, nv_measure\n'), ((1178, 1194), 'clustering_system.evaluator.measures.entropy', 'entropy', (['classes'], {}), '(classes)\n', (1185, 1194), False, 'from clustering_system.evaluator.measures import purity, purity2, rand_index, entropy, homogeneity, completeness, v_measure, mutual_information, normalized_mutual_information, normalized_mutual_information2, f1_measure, recall, precision, nv_measure\n'), ((1220, 1248), 'clustering_system.evaluator.measures.precision', 'precision', (['clusters', 'classes'], {}), '(clusters, classes)\n', (1229, 1248), False, 'from clustering_system.evaluator.measures import purity, purity2, rand_index, entropy, homogeneity, completeness, v_measure, mutual_information, normalized_mutual_information, normalized_mutual_information2, f1_measure, recall, precision, nv_measure\n'), ((1271, 1296), 'clustering_system.evaluator.measures.recall', 'recall', (['clusters', 'classes'], {}), '(clusters, classes)\n', (1277, 1296), False, 'from clustering_system.evaluator.measures import purity, purity2, rand_index, entropy, homogeneity, completeness, v_measure, mutual_information, normalized_mutual_information, normalized_mutual_information2, f1_measure, recall, precision, nv_measure\n'), ((1323, 1352), 'clustering_system.evaluator.measures.f1_measure', 'f1_measure', (['clusters', 'classes'], {}), '(clusters, classes)\n', (1333, 1352), False, 'from clustering_system.evaluator.measures import purity, purity2, rand_index, entropy, homogeneity, completeness, v_measure, mutual_information, normalized_mutual_information, normalized_mutual_information2, f1_measure, recall, precision, nv_measure\n'), ((1380, 1410), 'clustering_system.evaluator.measures.homogeneity', 'homogeneity', (['clusters', 'classes'], {}), '(clusters, classes)\n', (1391, 1410), False, 'from clustering_system.evaluator.measures import purity, purity2, rand_index, entropy, homogeneity, completeness, v_measure, mutual_information, normalized_mutual_information, normalized_mutual_information2, f1_measure, recall, precision, nv_measure\n'), ((1439, 1470), 'clustering_system.evaluator.measures.completeness', 'completeness', (['clusters', 'classes'], {}), '(clusters, classes)\n', (1451, 1470), False, 'from clustering_system.evaluator.measures import purity, purity2, rand_index, entropy, homogeneity, completeness, v_measure, mutual_information, normalized_mutual_information, normalized_mutual_information2, f1_measure, recall, precision, nv_measure\n'), ((1496, 1524), 'clustering_system.evaluator.measures.v_measure', 'v_measure', (['clusters', 'classes'], {}), '(clusters, classes)\n', (1505, 1524), False, 'from clustering_system.evaluator.measures import purity, purity2, rand_index, entropy, homogeneity, completeness, v_measure, mutual_information, normalized_mutual_information, normalized_mutual_information2, f1_measure, recall, precision, nv_measure\n'), ((1551, 1580), 'clustering_system.evaluator.measures.nv_measure', 'nv_measure', (['clusters', 'classes'], {}), '(clusters, classes)\n', (1561, 1580), False, 'from clustering_system.evaluator.measures import purity, purity2, rand_index, entropy, homogeneity, completeness, v_measure, mutual_information, normalized_mutual_information, normalized_mutual_information2, f1_measure, recall, precision, nv_measure\n'), ((1615, 1652), 'clustering_system.evaluator.measures.mutual_information', 'mutual_information', (['clusters', 'classes'], {}), '(clusters, classes)\n', (1633, 1652), False, 'from clustering_system.evaluator.measures import purity, purity2, rand_index, entropy, homogeneity, completeness, v_measure, mutual_information, normalized_mutual_information, normalized_mutual_information2, f1_measure, recall, precision, nv_measure\n'), ((1698, 1746), 'clustering_system.evaluator.measures.normalized_mutual_information', 'normalized_mutual_information', (['clusters', 'classes'], {}), '(clusters, classes)\n', (1727, 1746), False, 'from clustering_system.evaluator.measures import purity, purity2, rand_index, entropy, homogeneity, completeness, v_measure, mutual_information, normalized_mutual_information, normalized_mutual_information2, f1_measure, recall, precision, nv_measure\n'), ((1793, 1842), 'clustering_system.evaluator.measures.normalized_mutual_information2', 'normalized_mutual_information2', (['clusters', 'classes'], {}), '(clusters, classes)\n', (1823, 1842), False, 'from clustering_system.evaluator.measures import purity, purity2, rand_index, entropy, homogeneity, completeness, v_measure, mutual_information, normalized_mutual_information, normalized_mutual_information2, f1_measure, recall, precision, nv_measure\n'), ((975, 993), 'numpy.unique', 'np.unique', (['classes'], {}), '(classes)\n', (984, 993), True, 'import numpy as np\n')] |
"""
"""
import numpy as np
from astropy.table import Table
from astropy.utils.misc import NumpyRNGContext
from astropy.tests.helper import pytest
from ...nfw_phase_space import NFWPhaseSpace
__all__ = ('test_mc_unit_sphere_stochasticity', )
fixed_seed = 43
def get_dummy_halo_table(npts):
x = np.linspace(-1, 1, npts)
c = np.zeros(npts) + 5.
m = np.zeros(npts) + 1e12
zeros = np.zeros(npts)
return Table({'halo_x': x, 'halo_y': zeros+0.25, 'halo_z': zeros+0.5,
'host_centric_distance': x, 'halo_rvir': 3*x, 'conc_NFWmodel': c,
'halo_vx': zeros, 'halo_vy': zeros, 'halo_vz': zeros, 'halo_mvir': m,
'x': zeros, 'y': zeros, 'z': zeros, 'vx': zeros, 'vy': zeros, 'vz': zeros})
def test_mc_unit_sphere_stochasticity():
r""" Method used to test correctness of stochasticity/deterministic behavior of
`~halotools.empirical_models.NFWPhaseSpace.mc_unit_sphere`.
"""
nfw = NFWPhaseSpace(concentration_bins=np.array((2, 5, 10)))
x1, y1, z1 = nfw.mc_unit_sphere(100, seed=43)
x2, y2, z2 = nfw.mc_unit_sphere(100, seed=43)
x3, y3, z3 = nfw.mc_unit_sphere(100, seed=None)
assert np.allclose(x1, x2, rtol=0.001)
assert not np.allclose(x1, x3, rtol=0.001)
def test_mc_unit_sphere():
r""" Method used to test `~halotools.empirical_models.NFWPhaseSpace.mc_unit_sphere`.
This test verifies that all returned 3d points are at unit distance from the origin.
"""
nfw = NFWPhaseSpace(concentration_bins=np.array((2, 5, 10)))
x, y, z = nfw.mc_unit_sphere(100, seed=43)
pos = np.vstack([x, y, z]).T
norm = np.linalg.norm(pos, axis=1)
assert np.allclose(norm, 1, rtol=1e-4)
def test_mc_dimensionless_radial_distance_stochasticity():
r""" Method used to test correctness of stochasticity/deterministic behavior of
`~halotools.empirical_models.NFWPhaseSpace._mc_dimensionless_radial_distance`.
"""
Npts = int(100)
c5 = np.zeros(Npts) + 5
nfw = NFWPhaseSpace(concentration_bins=np.array((2, 5, 10)))
x1 = nfw._mc_dimensionless_radial_distance(c5, seed=43)
x2 = nfw._mc_dimensionless_radial_distance(c5, seed=43)
x3 = nfw._mc_dimensionless_radial_distance(c5, seed=None)
assert np.allclose(x1, x2, rtol=0.001)
assert not np.allclose(x1, x3, rtol=0.001)
def test_mc_solid_sphere():
r""" Method used to test `~halotools.empirical_models.NFWPhaseSpace.mc_solid_sphere`.
Method ensures that all returned points lie inside the unit sphere.
"""
Npts = int(100)
c5 = np.zeros(Npts) + 5
nfw = NFWPhaseSpace(concentration_bins=np.array((2, 5, 10)))
x, y, z = nfw.mc_solid_sphere(c5, seed=43)
pos = np.vstack([x, y, z]).T
norm = np.linalg.norm(pos, axis=1)
assert np.all(norm < 1)
assert np.all(norm > 0)
assert np.all(x > -1)
assert np.all(x < 1)
assert np.all(y > -1)
assert np.all(y < 1)
assert np.all(z > -1)
assert np.all(z < 1)
def test_mc_solid_sphere_stochasticity():
r""" Method used to test correctness of stochasticity/deterministic behavior of
`~halotools.empirical_models.NFWPhaseSpace.mc_solid_sphere`.
"""
Npts = int(100)
c5 = np.zeros(Npts) + 5
nfw = NFWPhaseSpace(concentration_bins=np.array((2, 5, 10)))
x1, y1, z1 = nfw.mc_solid_sphere(c5, seed=43)
x2, y2, z2 = nfw.mc_solid_sphere(c5, seed=43)
x3, y3, z3 = nfw.mc_solid_sphere(c5, seed=None)
assert np.allclose(x1, x2, rtol=0.001)
assert not np.allclose(x1, x3, rtol=0.001)
def test_mc_halo_centric_pos():
r""" Method used to test `~halotools.empirical_models.NFWPhaseSpace.mc_halo_centric_pos`.
Method verifies
1. All returned points lie within the correct radial distance
2. Increasing the input concentration decreases the mean and median radial distance of the returned points.
"""
r = 0.25
Npts = int(100)
c5 = np.zeros(Npts) + 5
c10 = np.zeros(Npts) + 10
c15 = np.zeros(Npts) + 15
halo_radius = np.zeros(len(c5)) + r
nfw = NFWPhaseSpace(concentration_bins=np.array((5, 10, 15)))
x15, y15, z15 = nfw.mc_halo_centric_pos(c15,
halo_radius=halo_radius, seed=43)
assert np.all(x15 > -r)
assert np.all(x15 < r)
assert np.all(y15 > -r)
assert np.all(y15 < r)
assert np.all(z15 > -r)
assert np.all(z15 < r)
pos15 = np.vstack([x15, y15, z15]).T
norm15 = np.linalg.norm(pos15, axis=1)
assert np.all(norm15 < r)
assert np.all(norm15 > 0)
x5, y5, z5 = nfw.mc_halo_centric_pos(c5,
halo_radius=halo_radius, seed=43)
pos5 = np.vstack([x5, y5, z5]).T
norm5 = np.linalg.norm(pos5, axis=1)
x10, y10, z10 = nfw.mc_halo_centric_pos(c10,
halo_radius=halo_radius, seed=43)
pos10 = np.vstack([x10, y10, z10]).T
norm10 = np.linalg.norm(pos10, axis=1)
assert np.mean(norm5) > np.mean(norm10)
assert np.mean(norm10) > np.mean(norm15)
assert np.median(norm5) > np.median(norm10)
assert np.median(norm10) > np.median(norm15)
x10a, y10a, z10a = nfw.mc_halo_centric_pos(c10,
halo_radius=halo_radius*2, seed=43)
pos10a = np.vstack([x10a, y10a, z10a]).T
norm10a = np.linalg.norm(pos10a, axis=1)
assert np.any(norm10a > r)
assert np.all(norm10a < 2*r)
def test_mc_halo_centric_pos_stochasticity():
r""" Method used to test stochasticity/deterministic behavior of
`~halotools.empirical_models.NFWPhaseSpace.mc_halo_centric_pos`.
"""
r = 0.25
Npts = int(100)
c15 = np.zeros(Npts) + 15
nfw = NFWPhaseSpace(concentration_bins=np.array((5, 10, 15)))
halo_radius = np.zeros(len(c15)) + r
x15a, y15a, z15a = nfw.mc_halo_centric_pos(c15,
halo_radius=halo_radius, seed=43)
x15b, y15b, z15b = nfw.mc_halo_centric_pos(c15,
halo_radius=halo_radius, seed=43)
x15c, y15c, z15c = nfw.mc_halo_centric_pos(c15,
halo_radius=halo_radius, seed=None)
assert np.allclose(x15a, x15b, rtol=0.01)
assert not np.allclose(x15a, x15c, rtol=0.01)
def test_mc_pos():
r""" Method used to test `~halotools.empirical_models.NFWPhaseSpace.mc_halo_centric_pos`.
Method verifies that passing an input ``seed`` results in deterministic behavior.
"""
r = 0.25
Npts = int(100)
c15 = np.zeros(Npts) + 15
nfw = NFWPhaseSpace(concentration_bins=np.array((5, 10, 15)))
halo_radius = np.zeros(len(c15)) + r
x1, y1, z1 = nfw.mc_pos(c15,
halo_radius=halo_radius, seed=43)
x2, y2, z2 = nfw.mc_halo_centric_pos(c15,
halo_radius=halo_radius, seed=43)
assert np.all(x1 == x2)
assert np.all(y1 == y2)
assert np.all(z1 == z2)
nfw.mc_pos(table=get_dummy_halo_table(Npts))
def test_mc_radial_velocity_consistency():
r""" Method used to test `~halotools.empirical_models.NFWPhaseSpace.mc_radial_velocity`.
Method generates a Monte Carlo velocity profile realization with all points at
a specific radius and compares the manually computed velocity dispersion
to the analytical expectation.
"""
npts = int(1e4)
conc = 10
mass = 1e12
scaled_radius = 0.4
scaled_radius_array = np.zeros(npts) + scaled_radius
concarr = np.zeros_like(scaled_radius_array) + conc
nfw = NFWPhaseSpace(concentration_bins=np.array((5, 10, 15)))
mc_vr = nfw.mc_radial_velocity(scaled_radius_array, mass, concarr, seed=43)
vr_dispersion_from_monte_carlo = np.std(mc_vr)
rvir = nfw.halo_mass_to_halo_radius(mass)
radius = scaled_radius*rvir
analytical_result = nfw.radial_velocity_dispersion(radius, mass, conc)
assert np.allclose(vr_dispersion_from_monte_carlo, analytical_result, rtol=0.05)
def test_mc_radial_velocity_stochasticity():
r""" Method used to verify correct deterministic/stochastic behavior of
`~halotools.empirical_models.NFWPhaseSpace.mc_radial_velocity`.
"""
nfw = NFWPhaseSpace(concentration_bins=np.array((5, 10, 15)))
npts = int(100)
conc = 10
carr = np.zeros(npts) + conc
mass = 1e12
rmax = nfw.rmax(mass, conc)
r = np.zeros(npts) + rmax
rvir = nfw.halo_mass_to_halo_radius(mass)
scaled_radius = r/rvir
mc_vr_seed43a = nfw.mc_radial_velocity(scaled_radius, mass, carr, seed=43)
mc_vr_seed43b = nfw.mc_radial_velocity(scaled_radius, mass, carr, seed=43)
mc_vr_seed44 = nfw.mc_radial_velocity(scaled_radius, mass, carr, seed=44)
assert np.allclose(mc_vr_seed43a, mc_vr_seed43b, rtol=0.001)
assert not np.allclose(mc_vr_seed43a, mc_vr_seed44, rtol=0.001)
def test_mc_pos1():
r""" Verify that the seed keyword is treated properly.
This function serves as a regression test for https://github.com/astropy/halotools/issues/672.
"""
nfw = NFWPhaseSpace(concentration_bins=np.array((5, 10, 15)))
halos = Table()
num_halos = 200
halos['x'] = np.zeros(num_halos)
halos['y'] = np.zeros(num_halos)
halos['z'] = np.zeros(num_halos)
halos['host_centric_distance'] = 0.
halos['halo_rvir'] = 1.
halos['conc_NFWmodel'] = 5.
halos['halo_mvir'] = 1e12
nfw.mc_pos(table=halos, seed=43)
assert np.min(halos['x']) < -0.7
assert np.min(halos['y']) < -0.7
assert np.min(halos['z']) < -0.7
assert np.max(halos['x']) > 0.7
assert np.max(halos['y']) > 0.7
assert np.max(halos['z']) > 0.7
def test_mc_vel1():
r"""
"""
nfw = NFWPhaseSpace(concentration_bins=np.array((5, 10, 15)))
halo_table = get_dummy_halo_table(10)
assert np.all(halo_table['vx'] == halo_table['halo_vx'])
nfw.mc_vel(halo_table, seed=fixed_seed)
assert np.any(halo_table['vx'] != halo_table['halo_vx'])
def test_mc_vel2():
r""" Method verifies that seed keyword is treated properly.
This serves as a regression test for https://github.com/astropy/halotools/issues/672
"""
nfw = NFWPhaseSpace(concentration_bins=np.array((5, 10, 15)))
halos = Table()
num_halos = 100
halos['vx'] = np.zeros(num_halos)
halos['vy'] = np.zeros(num_halos)
halos['vz'] = np.zeros(num_halos)
halos['host_centric_distance'] = np.logspace(-2, 0, num_halos)
halos['halo_rvir'] = 1.
halos['conc_NFWmodel'] = 5.
halos['halo_mvir'] = 1e12
nfw.mc_vel(halos, seed=43)
assert not np.all(halos['vx'] == halos['vy'])
assert not np.all(halos['vx'] == halos['vz'])
def test_seed_treatment1():
r""" Regression test for https://github.com/astropy/halotools/issues/672.
"""
nfw = NFWPhaseSpace(concentration_bins=np.array((5, 10, 15)))
satellites = nfw.mc_generate_nfw_phase_space_points(seed=43, Ngals=10)
assert np.any(satellites['vx'] != satellites['vy'])
| [
"numpy.zeros_like",
"astropy.table.Table",
"numpy.std",
"numpy.logspace",
"numpy.allclose",
"numpy.median",
"numpy.zeros",
"numpy.any",
"numpy.min",
"numpy.mean",
"numpy.linalg.norm",
"numpy.max",
"numpy.linspace",
"numpy.array",
"numpy.vstack",
"numpy.all"
] | [((303, 327), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'npts'], {}), '(-1, 1, npts)\n', (314, 327), True, 'import numpy as np\n'), ((398, 412), 'numpy.zeros', 'np.zeros', (['npts'], {}), '(npts)\n', (406, 412), True, 'import numpy as np\n'), ((425, 717), 'astropy.table.Table', 'Table', (["{'halo_x': x, 'halo_y': zeros + 0.25, 'halo_z': zeros + 0.5,\n 'host_centric_distance': x, 'halo_rvir': 3 * x, 'conc_NFWmodel': c,\n 'halo_vx': zeros, 'halo_vy': zeros, 'halo_vz': zeros, 'halo_mvir': m,\n 'x': zeros, 'y': zeros, 'z': zeros, 'vx': zeros, 'vy': zeros, 'vz': zeros}"], {}), "({'halo_x': x, 'halo_y': zeros + 0.25, 'halo_z': zeros + 0.5,\n 'host_centric_distance': x, 'halo_rvir': 3 * x, 'conc_NFWmodel': c,\n 'halo_vx': zeros, 'halo_vy': zeros, 'halo_vz': zeros, 'halo_mvir': m,\n 'x': zeros, 'y': zeros, 'z': zeros, 'vx': zeros, 'vy': zeros, 'vz': zeros})\n", (430, 717), False, 'from astropy.table import Table\n'), ((1151, 1182), 'numpy.allclose', 'np.allclose', (['x1', 'x2'], {'rtol': '(0.001)'}), '(x1, x2, rtol=0.001)\n', (1162, 1182), True, 'import numpy as np\n'), ((1602, 1629), 'numpy.linalg.norm', 'np.linalg.norm', (['pos'], {'axis': '(1)'}), '(pos, axis=1)\n', (1616, 1629), True, 'import numpy as np\n'), ((1641, 1674), 'numpy.allclose', 'np.allclose', (['norm', '(1)'], {'rtol': '(0.0001)'}), '(norm, 1, rtol=0.0001)\n', (1652, 1674), True, 'import numpy as np\n'), ((2216, 2247), 'numpy.allclose', 'np.allclose', (['x1', 'x2'], {'rtol': '(0.001)'}), '(x1, x2, rtol=0.001)\n', (2227, 2247), True, 'import numpy as np\n'), ((2701, 2728), 'numpy.linalg.norm', 'np.linalg.norm', (['pos'], {'axis': '(1)'}), '(pos, axis=1)\n', (2715, 2728), True, 'import numpy as np\n'), ((2740, 2756), 'numpy.all', 'np.all', (['(norm < 1)'], {}), '(norm < 1)\n', (2746, 2756), True, 'import numpy as np\n'), ((2768, 2784), 'numpy.all', 'np.all', (['(norm > 0)'], {}), '(norm > 0)\n', (2774, 2784), True, 'import numpy as np\n'), ((2796, 2810), 'numpy.all', 'np.all', (['(x > -1)'], {}), '(x > -1)\n', (2802, 2810), True, 'import numpy as np\n'), ((2822, 2835), 'numpy.all', 'np.all', (['(x < 1)'], {}), '(x < 1)\n', (2828, 2835), True, 'import numpy as np\n'), ((2847, 2861), 'numpy.all', 'np.all', (['(y > -1)'], {}), '(y > -1)\n', (2853, 2861), True, 'import numpy as np\n'), ((2873, 2886), 'numpy.all', 'np.all', (['(y < 1)'], {}), '(y < 1)\n', (2879, 2886), True, 'import numpy as np\n'), ((2898, 2912), 'numpy.all', 'np.all', (['(z > -1)'], {}), '(z > -1)\n', (2904, 2912), True, 'import numpy as np\n'), ((2924, 2937), 'numpy.all', 'np.all', (['(z < 1)'], {}), '(z < 1)\n', (2930, 2937), True, 'import numpy as np\n'), ((3417, 3448), 'numpy.allclose', 'np.allclose', (['x1', 'x2'], {'rtol': '(0.001)'}), '(x1, x2, rtol=0.001)\n', (3428, 3448), True, 'import numpy as np\n'), ((4165, 4181), 'numpy.all', 'np.all', (['(x15 > -r)'], {}), '(x15 > -r)\n', (4171, 4181), True, 'import numpy as np\n'), ((4193, 4208), 'numpy.all', 'np.all', (['(x15 < r)'], {}), '(x15 < r)\n', (4199, 4208), True, 'import numpy as np\n'), ((4220, 4236), 'numpy.all', 'np.all', (['(y15 > -r)'], {}), '(y15 > -r)\n', (4226, 4236), True, 'import numpy as np\n'), ((4248, 4263), 'numpy.all', 'np.all', (['(y15 < r)'], {}), '(y15 < r)\n', (4254, 4263), True, 'import numpy as np\n'), ((4275, 4291), 'numpy.all', 'np.all', (['(z15 > -r)'], {}), '(z15 > -r)\n', (4281, 4291), True, 'import numpy as np\n'), ((4303, 4318), 'numpy.all', 'np.all', (['(z15 < r)'], {}), '(z15 < r)\n', (4309, 4318), True, 'import numpy as np\n'), ((4374, 4403), 'numpy.linalg.norm', 'np.linalg.norm', (['pos15'], {'axis': '(1)'}), '(pos15, axis=1)\n', (4388, 4403), True, 'import numpy as np\n'), ((4415, 4433), 'numpy.all', 'np.all', (['(norm15 < r)'], {}), '(norm15 < r)\n', (4421, 4433), True, 'import numpy as np\n'), ((4445, 4463), 'numpy.all', 'np.all', (['(norm15 > 0)'], {}), '(norm15 > 0)\n', (4451, 4463), True, 'import numpy as np\n'), ((4601, 4629), 'numpy.linalg.norm', 'np.linalg.norm', (['pos5'], {'axis': '(1)'}), '(pos5, axis=1)\n', (4615, 4629), True, 'import numpy as np\n'), ((4777, 4806), 'numpy.linalg.norm', 'np.linalg.norm', (['pos10'], {'axis': '(1)'}), '(pos10, axis=1)\n', (4791, 4806), True, 'import numpy as np\n'), ((5151, 5181), 'numpy.linalg.norm', 'np.linalg.norm', (['pos10a'], {'axis': '(1)'}), '(pos10a, axis=1)\n', (5165, 5181), True, 'import numpy as np\n'), ((5194, 5213), 'numpy.any', 'np.any', (['(norm10a > r)'], {}), '(norm10a > r)\n', (5200, 5213), True, 'import numpy as np\n'), ((5225, 5248), 'numpy.all', 'np.all', (['(norm10a < 2 * r)'], {}), '(norm10a < 2 * r)\n', (5231, 5248), True, 'import numpy as np\n'), ((5908, 5942), 'numpy.allclose', 'np.allclose', (['x15a', 'x15b'], {'rtol': '(0.01)'}), '(x15a, x15b, rtol=0.01)\n', (5919, 5942), True, 'import numpy as np\n'), ((6548, 6564), 'numpy.all', 'np.all', (['(x1 == x2)'], {}), '(x1 == x2)\n', (6554, 6564), True, 'import numpy as np\n'), ((6576, 6592), 'numpy.all', 'np.all', (['(y1 == y2)'], {}), '(y1 == y2)\n', (6582, 6592), True, 'import numpy as np\n'), ((6604, 6620), 'numpy.all', 'np.all', (['(z1 == z2)'], {}), '(z1 == z2)\n', (6610, 6620), True, 'import numpy as np\n'), ((7385, 7398), 'numpy.std', 'np.std', (['mc_vr'], {}), '(mc_vr)\n', (7391, 7398), True, 'import numpy as np\n'), ((7565, 7638), 'numpy.allclose', 'np.allclose', (['vr_dispersion_from_monte_carlo', 'analytical_result'], {'rtol': '(0.05)'}), '(vr_dispersion_from_monte_carlo, analytical_result, rtol=0.05)\n', (7576, 7638), True, 'import numpy as np\n'), ((8374, 8427), 'numpy.allclose', 'np.allclose', (['mc_vr_seed43a', 'mc_vr_seed43b'], {'rtol': '(0.001)'}), '(mc_vr_seed43a, mc_vr_seed43b, rtol=0.001)\n', (8385, 8427), True, 'import numpy as np\n'), ((8762, 8769), 'astropy.table.Table', 'Table', ([], {}), '()\n', (8767, 8769), False, 'from astropy.table import Table\n'), ((8807, 8826), 'numpy.zeros', 'np.zeros', (['num_halos'], {}), '(num_halos)\n', (8815, 8826), True, 'import numpy as np\n'), ((8844, 8863), 'numpy.zeros', 'np.zeros', (['num_halos'], {}), '(num_halos)\n', (8852, 8863), True, 'import numpy as np\n'), ((8881, 8900), 'numpy.zeros', 'np.zeros', (['num_halos'], {}), '(num_halos)\n', (8889, 8900), True, 'import numpy as np\n'), ((9447, 9496), 'numpy.all', 'np.all', (["(halo_table['vx'] == halo_table['halo_vx'])"], {}), "(halo_table['vx'] == halo_table['halo_vx'])\n", (9453, 9496), True, 'import numpy as np\n'), ((9553, 9602), 'numpy.any', 'np.any', (["(halo_table['vx'] != halo_table['halo_vx'])"], {}), "(halo_table['vx'] != halo_table['halo_vx'])\n", (9559, 9602), True, 'import numpy as np\n'), ((9864, 9871), 'astropy.table.Table', 'Table', ([], {}), '()\n', (9869, 9871), False, 'from astropy.table import Table\n'), ((9910, 9929), 'numpy.zeros', 'np.zeros', (['num_halos'], {}), '(num_halos)\n', (9918, 9929), True, 'import numpy as np\n'), ((9948, 9967), 'numpy.zeros', 'np.zeros', (['num_halos'], {}), '(num_halos)\n', (9956, 9967), True, 'import numpy as np\n'), ((9986, 10005), 'numpy.zeros', 'np.zeros', (['num_halos'], {}), '(num_halos)\n', (9994, 10005), True, 'import numpy as np\n'), ((10043, 10072), 'numpy.logspace', 'np.logspace', (['(-2)', '(0)', 'num_halos'], {}), '(-2, 0, num_halos)\n', (10054, 10072), True, 'import numpy as np\n'), ((10562, 10606), 'numpy.any', 'np.any', (["(satellites['vx'] != satellites['vy'])"], {}), "(satellites['vx'] != satellites['vy'])\n", (10568, 10606), True, 'import numpy as np\n'), ((336, 350), 'numpy.zeros', 'np.zeros', (['npts'], {}), '(npts)\n', (344, 350), True, 'import numpy as np\n'), ((364, 378), 'numpy.zeros', 'np.zeros', (['npts'], {}), '(npts)\n', (372, 378), True, 'import numpy as np\n'), ((1198, 1229), 'numpy.allclose', 'np.allclose', (['x1', 'x3'], {'rtol': '(0.001)'}), '(x1, x3, rtol=0.001)\n', (1209, 1229), True, 'import numpy as np\n'), ((1568, 1588), 'numpy.vstack', 'np.vstack', (['[x, y, z]'], {}), '([x, y, z])\n', (1577, 1588), True, 'import numpy as np\n'), ((1938, 1952), 'numpy.zeros', 'np.zeros', (['Npts'], {}), '(Npts)\n', (1946, 1952), True, 'import numpy as np\n'), ((2263, 2294), 'numpy.allclose', 'np.allclose', (['x1', 'x3'], {'rtol': '(0.001)'}), '(x1, x3, rtol=0.001)\n', (2274, 2294), True, 'import numpy as np\n'), ((2525, 2539), 'numpy.zeros', 'np.zeros', (['Npts'], {}), '(Npts)\n', (2533, 2539), True, 'import numpy as np\n'), ((2667, 2687), 'numpy.vstack', 'np.vstack', (['[x, y, z]'], {}), '([x, y, z])\n', (2676, 2687), True, 'import numpy as np\n'), ((3168, 3182), 'numpy.zeros', 'np.zeros', (['Npts'], {}), '(Npts)\n', (3176, 3182), True, 'import numpy as np\n'), ((3464, 3495), 'numpy.allclose', 'np.allclose', (['x1', 'x3'], {'rtol': '(0.001)'}), '(x1, x3, rtol=0.001)\n', (3475, 3495), True, 'import numpy as np\n'), ((3876, 3890), 'numpy.zeros', 'np.zeros', (['Npts'], {}), '(Npts)\n', (3884, 3890), True, 'import numpy as np\n'), ((3905, 3919), 'numpy.zeros', 'np.zeros', (['Npts'], {}), '(Npts)\n', (3913, 3919), True, 'import numpy as np\n'), ((3935, 3949), 'numpy.zeros', 'np.zeros', (['Npts'], {}), '(Npts)\n', (3943, 3949), True, 'import numpy as np\n'), ((4332, 4358), 'numpy.vstack', 'np.vstack', (['[x15, y15, z15]'], {}), '([x15, y15, z15])\n', (4341, 4358), True, 'import numpy as np\n'), ((4563, 4586), 'numpy.vstack', 'np.vstack', (['[x5, y5, z5]'], {}), '([x5, y5, z5])\n', (4572, 4586), True, 'import numpy as np\n'), ((4735, 4761), 'numpy.vstack', 'np.vstack', (['[x10, y10, z10]'], {}), '([x10, y10, z10])\n', (4744, 4761), True, 'import numpy as np\n'), ((4819, 4833), 'numpy.mean', 'np.mean', (['norm5'], {}), '(norm5)\n', (4826, 4833), True, 'import numpy as np\n'), ((4836, 4851), 'numpy.mean', 'np.mean', (['norm10'], {}), '(norm10)\n', (4843, 4851), True, 'import numpy as np\n'), ((4863, 4878), 'numpy.mean', 'np.mean', (['norm10'], {}), '(norm10)\n', (4870, 4878), True, 'import numpy as np\n'), ((4881, 4896), 'numpy.mean', 'np.mean', (['norm15'], {}), '(norm15)\n', (4888, 4896), True, 'import numpy as np\n'), ((4909, 4925), 'numpy.median', 'np.median', (['norm5'], {}), '(norm5)\n', (4918, 4925), True, 'import numpy as np\n'), ((4928, 4945), 'numpy.median', 'np.median', (['norm10'], {}), '(norm10)\n', (4937, 4945), True, 'import numpy as np\n'), ((4957, 4974), 'numpy.median', 'np.median', (['norm10'], {}), '(norm10)\n', (4966, 4974), True, 'import numpy as np\n'), ((4977, 4994), 'numpy.median', 'np.median', (['norm15'], {}), '(norm15)\n', (4986, 4994), True, 'import numpy as np\n'), ((5105, 5134), 'numpy.vstack', 'np.vstack', (['[x10a, y10a, z10a]'], {}), '([x10a, y10a, z10a])\n', (5114, 5134), True, 'import numpy as np\n'), ((5485, 5499), 'numpy.zeros', 'np.zeros', (['Npts'], {}), '(Npts)\n', (5493, 5499), True, 'import numpy as np\n'), ((5958, 5992), 'numpy.allclose', 'np.allclose', (['x15a', 'x15c'], {'rtol': '(0.01)'}), '(x15a, x15c, rtol=0.01)\n', (5969, 5992), True, 'import numpy as np\n'), ((6247, 6261), 'numpy.zeros', 'np.zeros', (['Npts'], {}), '(Npts)\n', (6255, 6261), True, 'import numpy as np\n'), ((7113, 7127), 'numpy.zeros', 'np.zeros', (['npts'], {}), '(npts)\n', (7121, 7127), True, 'import numpy as np\n'), ((7158, 7192), 'numpy.zeros_like', 'np.zeros_like', (['scaled_radius_array'], {}), '(scaled_radius_array)\n', (7171, 7192), True, 'import numpy as np\n'), ((7951, 7965), 'numpy.zeros', 'np.zeros', (['npts'], {}), '(npts)\n', (7959, 7965), True, 'import numpy as np\n'), ((8030, 8044), 'numpy.zeros', 'np.zeros', (['npts'], {}), '(npts)\n', (8038, 8044), True, 'import numpy as np\n'), ((8443, 8495), 'numpy.allclose', 'np.allclose', (['mc_vr_seed43a', 'mc_vr_seed44'], {'rtol': '(0.001)'}), '(mc_vr_seed43a, mc_vr_seed44, rtol=0.001)\n', (8454, 8495), True, 'import numpy as np\n'), ((9080, 9098), 'numpy.min', 'np.min', (["halos['x']"], {}), "(halos['x'])\n", (9086, 9098), True, 'import numpy as np\n'), ((9117, 9135), 'numpy.min', 'np.min', (["halos['y']"], {}), "(halos['y'])\n", (9123, 9135), True, 'import numpy as np\n'), ((9154, 9172), 'numpy.min', 'np.min', (["halos['z']"], {}), "(halos['z'])\n", (9160, 9172), True, 'import numpy as np\n'), ((9191, 9209), 'numpy.max', 'np.max', (["halos['x']"], {}), "(halos['x'])\n", (9197, 9209), True, 'import numpy as np\n'), ((9227, 9245), 'numpy.max', 'np.max', (["halos['y']"], {}), "(halos['y'])\n", (9233, 9245), True, 'import numpy as np\n'), ((9263, 9281), 'numpy.max', 'np.max', (["halos['z']"], {}), "(halos['z'])\n", (9269, 9281), True, 'import numpy as np\n'), ((10209, 10243), 'numpy.all', 'np.all', (["(halos['vx'] == halos['vy'])"], {}), "(halos['vx'] == halos['vy'])\n", (10215, 10243), True, 'import numpy as np\n'), ((10259, 10293), 'numpy.all', 'np.all', (["(halos['vx'] == halos['vz'])"], {}), "(halos['vx'] == halos['vz'])\n", (10265, 10293), True, 'import numpy as np\n'), ((966, 986), 'numpy.array', 'np.array', (['(2, 5, 10)'], {}), '((2, 5, 10))\n', (974, 986), True, 'import numpy as np\n'), ((1489, 1509), 'numpy.array', 'np.array', (['(2, 5, 10)'], {}), '((2, 5, 10))\n', (1497, 1509), True, 'import numpy as np\n'), ((2001, 2021), 'numpy.array', 'np.array', (['(2, 5, 10)'], {}), '((2, 5, 10))\n', (2009, 2021), True, 'import numpy as np\n'), ((2588, 2608), 'numpy.array', 'np.array', (['(2, 5, 10)'], {}), '((2, 5, 10))\n', (2596, 2608), True, 'import numpy as np\n'), ((3231, 3251), 'numpy.array', 'np.array', (['(2, 5, 10)'], {}), '((2, 5, 10))\n', (3239, 3251), True, 'import numpy as np\n'), ((4039, 4060), 'numpy.array', 'np.array', (['(5, 10, 15)'], {}), '((5, 10, 15))\n', (4047, 4060), True, 'import numpy as np\n'), ((5548, 5569), 'numpy.array', 'np.array', (['(5, 10, 15)'], {}), '((5, 10, 15))\n', (5556, 5569), True, 'import numpy as np\n'), ((6310, 6331), 'numpy.array', 'np.array', (['(5, 10, 15)'], {}), '((5, 10, 15))\n', (6318, 6331), True, 'import numpy as np\n'), ((7244, 7265), 'numpy.array', 'np.array', (['(5, 10, 15)'], {}), '((5, 10, 15))\n', (7252, 7265), True, 'import numpy as np\n'), ((7882, 7903), 'numpy.array', 'np.array', (['(5, 10, 15)'], {}), '((5, 10, 15))\n', (7890, 7903), True, 'import numpy as np\n'), ((8727, 8748), 'numpy.array', 'np.array', (['(5, 10, 15)'], {}), '((5, 10, 15))\n', (8735, 8748), True, 'import numpy as np\n'), ((9370, 9391), 'numpy.array', 'np.array', (['(5, 10, 15)'], {}), '((5, 10, 15))\n', (9378, 9391), True, 'import numpy as np\n'), ((9829, 9850), 'numpy.array', 'np.array', (['(5, 10, 15)'], {}), '((5, 10, 15))\n', (9837, 9850), True, 'import numpy as np\n'), ((10453, 10474), 'numpy.array', 'np.array', (['(5, 10, 15)'], {}), '((5, 10, 15))\n', (10461, 10474), True, 'import numpy as np\n')] |
# slicer imports
from __main__ import vtk, slicer
# python includes
import logging
import sys
import time
import numpy as np
import os
import math
class Helper(object):
'''
classdocs
'''
@staticmethod
def Info(message):
'''
'''
# logging.debug("[PedicleScrewSimulatorPy " + time.strftime( "%Y-%m-%d %H:%M:%S" ) + "]: " + str( message ))
# sys.stdout.flush()
@staticmethod
def Warning(message):
'''
'''
# logging.debug("[PedicleScrewSimulatorPy " + time.strftime( "%Y-%m-%d %H:%M:%S" ) + "]: WARNING: " + str( message ))
# sys.stdout.flush()
@staticmethod
def Error(message):
'''
'''
logging.debug("[PedicleScrewSimulatorPy " + time.strftime("%Y-%m-%d %H:%M:%S") + "]: ERROR: " + str(message))
sys.stdout.flush()
@staticmethod
def ErrorPopup(message):
'''
'''
messageBox = qt.QMessageBox()
messageBox.critical(None, '', message)
@staticmethod
def Debug(message):
'''
'''
showDebugOutput = 0
from time import strftime
if showDebugOutput:
logging.debug("[PedicleScrewSimulatorPy " + time.strftime("%Y-%m-%d %H:%M:%S") + "] DEBUG: " + str(message))
sys.stdout.flush()
@staticmethod
def CreateSpace(n):
'''
'''
spacer = ""
for s in range(n):
spacer += " "
return spacer
@staticmethod
def GetNthStepId(n):
'''
'''
steps = [None, # 0
'LoadData', # 1
'DefineROI', # 2
'Measurements', # 3
'Landmarks', # 4
'Final', # 5
]
if n < 0 or n > len(steps):
n = 0
return steps[n]
@staticmethod
def SetBgFgVolumes(bg):
appLogic = slicer.app.applicationLogic()
selectionNode = appLogic.GetSelectionNode()
selectionNode.SetReferenceActiveVolumeID(bg)
appLogic.PropagateVolumeSelection()
@staticmethod
def SetLabelVolume(lb):
appLogic = slicer.app.applicationLogic()
selectionNode = appLogic.GetSelectionNode()
selectionNode.SetReferenceActiveLabelVolumeID(lb)
appLogic.PropagateVolumeSelection()
@staticmethod
def findChildren(widget=None, name="", text=""):
""" return a list of child widgets that match the passed name """
# TODO: figure out why the native QWidget.findChildren method
# does not seem to work from PythonQt
if not widget:
widget = mainWindow()
children = []
parents = [widget]
while parents != []:
p = parents.pop()
parents += p.children()
if name and p.name.find(name) >= 0:
children.append(p)
elif text:
try:
p.text
if p.text.find(text) >= 0:
children.append(p)
except AttributeError:
pass
return children
@staticmethod
def getNodeByID(id):
return slicer.mrmlScene.GetNodeByID(id)
@staticmethod
def readFileAsString(fname):
s = ''
with open(fname, 'r') as f:
s = f.read()
return s
@staticmethod
def probeVolume(P1, P2, Ps=20, thV=150, Vol="baselineROI"):
volumeNode = slicer.util.getNode(Vol)
voxels = slicer.util.arrayFromVolume(volumeNode)
L = int(np.linalg.norm(P1 - P2))
for j in range(Ps + L, 0, -1):
P = Helper.p2pexLine(P1, P2, j - L)
# addFid(P,lableName="{}".format(j))
volumeRasToIjk = vtk.vtkMatrix4x4()
volumeNode.GetRASToIJKMatrix(volumeRasToIjk)
point_Ijk = [0, 0, 0, 1]
volumeRasToIjk.MultiplyPoint(np.append(P, 1.0), point_Ijk)
point_Ijk = [int(round(c)) for c in point_Ijk[0:3]]
voxelValue = voxels[point_Ijk[2], point_Ijk[1], point_Ijk[0]]
if voxelValue > thV:
Helper.addFid(P, 1, lableName="PB")
return P
@staticmethod
def myColor(colorName):
if colorName == "red":
colorArr = [1, 0, 0]
elif colorName == "green":
colorArr = [0, 1, 0]
elif colorName == "blue":
colorArr = [0, 0, 1]
elif colorName == "black":
colorArr = [0, 0, 0]
elif colorName == "white":
colorArr = [1, 1, 1]
elif colorName == "yellow":
colorArr = [1, 1, 0]
elif colorName == "pink":
colorArr = [1, 0, 1]
elif colorName == "cyan":
colorArr = [0, 1, 1]
return (colorArr)
@staticmethod
def p2pexLine(pos1, pos2, plus=200, Dim=5.0, modName="", color="red", Visibility2D=True):
"""The coordinates of the point-to-point line and extension line"""
direction = (pos2 - pos1) / np.linalg.norm(pos2 - pos1)
pos3 = pos1 + direction / np.linalg.norm(direction) * (plus + np.linalg.norm(pos2 - pos1))
if modName != "":
markupsNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLMarkupsLineNode', modName)
markupsNode.AddControlPoint(vtk.vtkVector3d(pos1))
markupsNode.AddControlPoint(vtk.vtkVector3d(pos3))
markupsNode.CreateDefaultDisplayNodes()
dn = markupsNode.GetDisplayNode()
dn.SetGlyphTypeFromString("CrossDot2D")
dn.SetGlyphScale(Dim * 10)
# dn.SetSliceIntersectionThickness(3)
dn.SetSelectedColor(Helper.myColor(color))
# dn.SetSliceDisplayModeToProjection()
dn.SetVisibility2D(Visibility2D)
# Hide measurement result while markup up
markupsNode.GetMeasurement('length').SetEnabled(True)
return np.array(pos3)
@staticmethod
def Psline(nodName):
"""Take the line coordinates"""
lineNode = slicer.util.getNode(nodName)
lineStartP = np.zeros(3)
lineEndP = np.zeros(3)
lineNode.GetNthControlPointPositionWorld(0, lineStartP)
lineNode.GetNthControlPointPositionWorld(1, lineEndP)
length = lineNode.GetMeasurement('length').GetValue()
dn = lineNode.GetDisplayNode()
Dim = dn.GetGlyphScale() / 10
return lineStartP, lineEndP, length, Dim
# @staticmethod
def addFid(data, Dim=.5, nodName="N", lableName="1", color="red", GlyphType=1):
"""add a larger point"""
xyz = tuple(data)
tipFiducial = slicer.mrmlScene.AddNode(slicer.vtkMRMLMarkupsFiducialNode())
tipFiducial.SetName(nodName)
tipFiducial.AddFiducial(xyz[0], xyz[1], xyz[2])
tipFiducial.SetNthFiducialLabel(0, lableName)
slicer.mrmlScene.AddNode(tipFiducial)
tipFiducial.SetDisplayVisibility(True)
tipFiducial.GetDisplayNode().SetGlyphType(GlyphType) # Vertex2D
tipFiducial.GetDisplayNode().SetGlyphScale(Dim * 10)
tipFiducial.GetDisplayNode().SetTextScale(3)
tipFiducial.GetDisplayNode().SetSelectedColor(Helper.myColor(color))
''' GlyphShapes {
GlyphTypeInvalid = 0, 1-StarBurst2D, 2-Cross2D, 3-CrossDot2D,
4-ThickCross2D, 5-Dash2D, 6-Sphere3D, 7-Vertex2D,
8-Circle2D,9-Triangle2D, 10-Square2D, Diamond2D,
Arrow2D, ThickArrow2D, HookedArrow2D, GlyphType_Last
}'''
@staticmethod
def p3Angle(P0, P1, P2):
"""3 o'clock angle"""
markupsNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLMarkupsAngleNode')
markupsNode.AddControlPoint(vtk.vtkVector3d(P0))
markupsNode.AddControlPoint(vtk.vtkVector3d(P1))
markupsNode.AddControlPoint(vtk.vtkVector3d(P2))
measurement = markupsNode.GetMeasurement("angle").GetValue()
slicer.mrmlScene.AddNode(markupsNode)
markupsNode.SetDisplayVisibility(False)
return np.around(measurement, 1)
@staticmethod
def UpdateSlicePlane(data, view, P1=1, P2=2):
"""Three o'clock RedSlicer"""
# a = data
# points = data
points = np.array([list(zip(*data))])[0]
# b2 = np.array([list(zip(*a))])
sliceNode = slicer.app.layoutManager().sliceWidget(view).mrmlSliceNode()
planePosition = points.mean(axis=1)
# print (planePosition)
planeNormal = np.cross(points[:, 1] - points[:, 0], points[:, 2] - points[:, 0])
# pPlane1 = 2 if view == "Red" else 1
# pPlane2 = 1 if view == "Yellow" else 2
# (pPlane1, pPlane2) = (2, 1) if view == "red" else (1, 2)
planeX = points[:, P1] - points[:, P2]
sliceNode.SetSliceToRASByNTP(planeNormal[0], planeNormal[1], planeNormal[2],
planeX[0], planeX[1], planeX[2],
planePosition[0], planePosition[1], planePosition[2], 0)
@staticmethod
def p2pCyl(startPoint, endPoint, radius=10, modName="Cyl", plus=0, Seg=3,
color="red", Opacity=1, RotY=0, Tx=0):
"""12 prisms of point-to-point"""
cylinderSource = vtk.vtkCylinderSource()
cylinderSource.SetRadius(radius)
cylinderSource.SetResolution(Seg)
rng = vtk.vtkMinimalStandardRandomSequence()
rng.SetSeed(8775070) # For testing 8775070
# Compute a basis
normalizedX = [0] * 3
normalizedY = [0] * 3
normalizedZ = [0] * 3
# The X axis is a vector from start to end
vtk.vtkMath.Subtract(endPoint, startPoint, normalizedX)
length = vtk.vtkMath.Norm(normalizedX) + plus
# length = 20
vtk.vtkMath.Normalize(normalizedX)
# The Xn axis is an arbitrary vector cross X
arbitrary = [0] * 3
for i in range(0, 3):
rng.Next()
arbitrary[i] = rng.GetRangeValue(-10, 10)
vtk.vtkMath.Cross(normalizedX, arbitrary, normalizedZ)
vtk.vtkMath.Normalize(normalizedZ)
# The Zn axis is Xn cross X
vtk.vtkMath.Cross(normalizedZ, normalizedX, normalizedY)
matrix = vtk.vtkMatrix4x4()
# Create the direction cosine matrix
matrix.Identity()
for i in range(0, 3):
matrix.SetElement(i, 0, normalizedX[i])
matrix.SetElement(i, 1, normalizedY[i])
matrix.SetElement(i, 2, normalizedZ[i])
# Apply the transforms
transform = vtk.vtkTransform()
transform.Translate(startPoint) # translate to starting point
transform.Concatenate(matrix) # apply direction cosines
transform.RotateZ(-90.0) # align cylinder to x axis
transform.Scale(1.0, length, 1.0) # scale along the height vector
transform.Translate(0, .5, 0) # translate to start of cylinder
transform.RotateY(RotY)
transform.Translate(Tx, 0, 0)
# Transform the polydata
transformPD = vtk.vtkTransformPolyDataFilter()
transformPD.SetTransform(transform)
transformPD.SetInputConnection(cylinderSource.GetOutputPort())
stlMapper = vtk.vtkPolyDataMapper()
stlMapper.SetInputConnection(transformPD.GetOutputPort())
vtkNode = slicer.modules.models.logic().AddModel(transformPD.GetOutputPort())
vtkNode.SetName(modName)
modelDisplay = vtkNode.GetDisplayNode()
modelDisplay.SetColor(Helper.myColor(color))
modelDisplay.SetOpacity(Opacity)
modelDisplay.SetBackfaceCulling(0)
# modelDisplay.SetVisibility(1)
modelDisplay.SetVisibility2D(True)
# modelDisplay.SetSliceDisplayModeToProjection()
# dn.SetVisibility2D(True)
return
@staticmethod
def Pcoord(modName="CylR"):
"""Find the diameter cylinder axis and edge coordinate points"""
modelNode = slicer.util.getNode(modName) # Read the node (module)
sr = modelNode.GetPolyData() # module turn polygons
pxyz = [0, 0, 0]
NumP = sr.GetNumberOfPoints() # The number of points in the polygon
for i in range(NumP // 2): # circulate: i=NumP//2
sr.GetPoint(i, pxyz) # Get the point coordinates in turn
# becomes a matrix
if i == 0:
Pxyz = np.array([pxyz])
else:
Pxyz = np.append(Pxyz, np.array([pxyz]), axis=0)
axisMed0 = (Pxyz[0] + Pxyz[NumP // 4]) / 2
axisMed1 = (Pxyz[1] + Pxyz[1 + NumP // 4]) / 2
dimeter = np.linalg.norm(Pxyz[0] - Pxyz[NumP // 4])
return np.array([axisMed0, axisMed1]), np.around(dimeter), Pxyz
@staticmethod
def delNode(nodeName):
"""What to do to delete multiple Node deletions of nodes (TODO)."""
slicer.util.getNode(nodeName)
slicer.mrmlScene.RemoveNode(slicer.util.getNode(nodeName))
return
@staticmethod
def Pdata(fidNode="T", groups=2):
"""Read the value of the fidlist"""
fidList = slicer.util.getNode(fidNode)
numFids = fidList.GetNumberOfFiducials()
Mdata = [0, 0, 0]
for i in range(numFids):
ras = [0, 0, 0]
# Mdata = [0,0,0]
fidList.GetNthFiducialPosition(i, ras)
if i == 0:
Mdata = np.array([ras])
else:
Mdata = np.append(Mdata, np.array([ras]), axis=0)
data = np.array(Mdata).ravel()
zcount = int(data.size / (3 * groups))
data = np.array(data.reshape(zcount, groups, 3))
# ras = [-1, -1, 1] * data # ras coordinate
return (data)
@staticmethod
def Pdata3(fidName="T", N=0):
"""Three o'clock turns two o'clock"""
Data = Helper.Pdata(fidName, 3)
Numzhuiti = Data.shape[0]
# Data3 = []
for i in range(0, Numzhuiti):
Pa = Data[i, 0]
Pl = Data[i, 1]
Pr = Data[i, 2]
Plo = Pa[1] - Pl[1]
Pro = Pa[1] - Pr[1]
Pal = Pa + [0, Plo * N, 0]
Par = Pa + [0, Pro * N, 0]
if i == 0:
Data3 = np.array([Pal])
Data3 = np.append(Data3, Pl)
Data3 = np.append(Data3, Par)
Data3 = np.append(Data3, Pr)
else:
Data3 = np.append(Data3, Pal)
Data3 = np.append(Data3, Pl)
Data3 = np.append(Data3, Par)
Data3 = np.append(Data3, Pr)
Data3 = np.array(Data3.reshape(Data.shape[0] * 2, 2, 3))
return Data3
@staticmethod
def Screws(fidName="T"):
"""Show the screw"""
Data = Helper.Pdata3(fidName) # Gets the data coordinates
Pa = Data[:, 0, :]
Pz = Data[:, 1, :]
# P0 = FidP[0]
fids = Data.shape[0] # The number of groups
for i in range(fids):
PA = Pa[i] # The front point of the vertebrae
PZ = Pz[i] # The narrowest point
boneP = Helper.probeVolume(PA, PZ)
PA_PB = np.linalg.norm(PA - boneP) # length
Length = (PA_PB // 5 - 2) * 5 # The screws are rounded
Dim = Helper.estimateDim(PA, PZ)
Helper.p2pexLine(boneP, PA, Length - PA_PB, Dim, "w_{0}_D:{1}_L".format(i, Dim), "blue")
return Data
@staticmethod
def estimateDim(Pz, Pa, volumeName="baselineROI", minThread=150):
volume_node = slicer.util.getNode(volumeName)
voxels = slicer.util.arrayFromVolume(volume_node)
VVList = []
seg = 12
for ii in range(20):
i = 2 + ii * .5
cylName = "Cyl{}".format(str(i))
cyl = Helper.p2pCyl(Pa, Pz, i, cylName, 1 - np.linalg.norm(Pz - Pa), Seg=seg)
pzP = Helper.Pcoord(cylName)[2]
Helper.delNode(cylName)
for j in range(seg):
P = pzP[j * 2]
volumeRasToIjk = vtk.vtkMatrix4x4()
volume_node.GetRASToIJKMatrix(volumeRasToIjk)
point_Ijk = [0, 0, 0, 1]
volumeRasToIjk.MultiplyPoint(np.append(P, 1.0), point_Ijk)
point_Ijk = [int(round(c)) for c in point_Ijk[0:3]]
voxelValue = voxels[point_Ijk[2], point_Ijk[1], point_Ijk[0]]
# print(voxelValue)
# if voxelValue<150:
# AddPoint(P)
if j == 0:
VVlist = np.array([voxelValue])
else:
VVlist = np.append(VVlist, voxelValue)
VVList.append(VVlist)
arrayV = np.array(VVList).mean(0)
return np.argmax(arrayV) * .5
@staticmethod
def Screw(No, Pz, sDim=0, manulYN=False):
"""Adjust the angle diameter and length of the screw"""
lineNode = "w_{}*".format(No)
lineN = Helper.Psline(lineNode)
PB0 = lineN[0]
# Helper.addFid(PB0,lableName="PB00")
Pa = lineN[1]
# Dim = Helper.estimateDim(Pz, Pa)
Lscrew = lineN[2]
if manulYN is False:
PB = PB0
PT = Pa
Helper.delNode(lineNode)
B_T = np.linalg.norm(PB - PT)
Length = 5 * (B_T // 5) - B_T
screwDim = np.around(lineN[3], 1)
Helper.p2pexLine(PB, PT, Length, screwDim, "w_{}_D:{}_L".format(No, screwDim), "red")
else:
PT = Pa
# Helper.addFid(PB0, lableName="PB0")
PB = Helper.probeVolume(PT, PB0)
# Helper.addFid(PB)
# logging.debug("PB:{}".format(PB))
# logging.debug("PT:{}".format(PT))
B_T = np.linalg.norm(PB - PT)
Length = 5 * (B_T // 5) - B_T
screwDim = np.around(lineN[3], 1)
Helper.delNode(lineNode)
Helper.p2pexLine(PB, PT, Length, screwDim, "w_{}_D:{}_L".format(No, screwDim), "red")
return int(Length + B_T), screwDim, PB, PT
@staticmethod
def screwAngle(No, Pz):
"""Screw angle"""
lineNode = "w_{}*".format(No)
lineN = Helper.Psline(lineNode)
PB0 = lineN[0]
Pa = lineN[1]
Lscrew = lineN[2]
x = [Pa[0], Pz[1], Pz[2]]
xy = [Pa[0], Pa[1], Pz[2]]
y = [Pz[0], Pa[1], Pz[2]]
yz = [Pz[0], Pa[1], Pa[2]]
'''
1. SPA: x_o_xy_Angle((PA[0],PZ[1],PZ[2]),PZ,(PA[0],PA[1],PZ[2]))
2. TPA: y_o_yz_Angle((PZ[0],PA[1],PZ[2]),PZ,(PZ[0],PA)[1],PA[2]))
3. xyy: x*tan(SPA+3*sn)
4. yzz: y*tan(TPA+3*tn)
'''
SPA = Helper.p3Angle(x, Pz, xy) # Coronary Angle (PSA)
# logging.debug("SPA:{}".format(SPA))
TPA = Helper.p3Angle(y, Pz, yz) # Syryatic angle (PTA)
# logging.debug("TPA:{}".format(TPA))
return np.around(SPA), np.around(TPA)
| [
"__main__.slicer.mrmlScene.GetNodeByID",
"numpy.argmax",
"__main__.vtk.vtkMath.Norm",
"time.strftime",
"numpy.around",
"__main__.vtk.vtkMath.Subtract",
"sys.stdout.flush",
"numpy.linalg.norm",
"__main__.slicer.app.applicationLogic",
"__main__.vtk.vtkTransformPolyDataFilter",
"numpy.append",
"_... | [((746, 764), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (762, 764), False, 'import sys\n'), ((1593, 1622), '__main__.slicer.app.applicationLogic', 'slicer.app.applicationLogic', ([], {}), '()\n', (1620, 1622), False, 'from __main__ import vtk, slicer\n'), ((1808, 1837), '__main__.slicer.app.applicationLogic', 'slicer.app.applicationLogic', ([], {}), '()\n', (1835, 1837), False, 'from __main__ import vtk, slicer\n'), ((2618, 2650), '__main__.slicer.mrmlScene.GetNodeByID', 'slicer.mrmlScene.GetNodeByID', (['id'], {}), '(id)\n', (2646, 2650), False, 'from __main__ import vtk, slicer\n'), ((2855, 2879), '__main__.slicer.util.getNode', 'slicer.util.getNode', (['Vol'], {}), '(Vol)\n', (2874, 2879), False, 'from __main__ import vtk, slicer\n'), ((2891, 2930), '__main__.slicer.util.arrayFromVolume', 'slicer.util.arrayFromVolume', (['volumeNode'], {}), '(volumeNode)\n', (2918, 2930), False, 'from __main__ import vtk, slicer\n'), ((4906, 4920), 'numpy.array', 'np.array', (['pos3'], {}), '(pos3)\n', (4914, 4920), True, 'import numpy as np\n'), ((5006, 5034), '__main__.slicer.util.getNode', 'slicer.util.getNode', (['nodName'], {}), '(nodName)\n', (5025, 5034), False, 'from __main__ import vtk, slicer\n'), ((5050, 5061), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5058, 5061), True, 'import numpy as np\n'), ((5075, 5086), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (5083, 5086), True, 'import numpy as np\n'), ((5720, 5757), '__main__.slicer.mrmlScene.AddNode', 'slicer.mrmlScene.AddNode', (['tipFiducial'], {}), '(tipFiducial)\n', (5744, 5757), False, 'from __main__ import vtk, slicer\n'), ((6388, 6449), '__main__.slicer.mrmlScene.AddNewNodeByClass', 'slicer.mrmlScene.AddNewNodeByClass', (['"""vtkMRMLMarkupsAngleNode"""'], {}), "('vtkMRMLMarkupsAngleNode')\n", (6422, 6449), False, 'from __main__ import vtk, slicer\n'), ((6669, 6706), '__main__.slicer.mrmlScene.AddNode', 'slicer.mrmlScene.AddNode', (['markupsNode'], {}), '(markupsNode)\n', (6693, 6706), False, 'from __main__ import vtk, slicer\n'), ((6758, 6783), 'numpy.around', 'np.around', (['measurement', '(1)'], {}), '(measurement, 1)\n', (6767, 6783), True, 'import numpy as np\n'), ((7153, 7219), 'numpy.cross', 'np.cross', (['(points[:, 1] - points[:, 0])', '(points[:, 2] - points[:, 0])'], {}), '(points[:, 1] - points[:, 0], points[:, 2] - points[:, 0])\n', (7161, 7219), True, 'import numpy as np\n'), ((7785, 7808), '__main__.vtk.vtkCylinderSource', 'vtk.vtkCylinderSource', ([], {}), '()\n', (7806, 7808), False, 'from __main__ import vtk, slicer\n'), ((7889, 7927), '__main__.vtk.vtkMinimalStandardRandomSequence', 'vtk.vtkMinimalStandardRandomSequence', ([], {}), '()\n', (7925, 7927), False, 'from __main__ import vtk, slicer\n'), ((8115, 8170), '__main__.vtk.vtkMath.Subtract', 'vtk.vtkMath.Subtract', (['endPoint', 'startPoint', 'normalizedX'], {}), '(endPoint, startPoint, normalizedX)\n', (8135, 8170), False, 'from __main__ import vtk, slicer\n'), ((8241, 8275), '__main__.vtk.vtkMath.Normalize', 'vtk.vtkMath.Normalize', (['normalizedX'], {}), '(normalizedX)\n', (8262, 8275), False, 'from __main__ import vtk, slicer\n'), ((8431, 8485), '__main__.vtk.vtkMath.Cross', 'vtk.vtkMath.Cross', (['normalizedX', 'arbitrary', 'normalizedZ'], {}), '(normalizedX, arbitrary, normalizedZ)\n', (8448, 8485), False, 'from __main__ import vtk, slicer\n'), ((8488, 8522), '__main__.vtk.vtkMath.Normalize', 'vtk.vtkMath.Normalize', (['normalizedZ'], {}), '(normalizedZ)\n', (8509, 8522), False, 'from __main__ import vtk, slicer\n'), ((8556, 8612), '__main__.vtk.vtkMath.Cross', 'vtk.vtkMath.Cross', (['normalizedZ', 'normalizedX', 'normalizedY'], {}), '(normalizedZ, normalizedX, normalizedY)\n', (8573, 8612), False, 'from __main__ import vtk, slicer\n'), ((8624, 8642), '__main__.vtk.vtkMatrix4x4', 'vtk.vtkMatrix4x4', ([], {}), '()\n', (8640, 8642), False, 'from __main__ import vtk, slicer\n'), ((8894, 8912), '__main__.vtk.vtkTransform', 'vtk.vtkTransform', ([], {}), '()\n', (8910, 8912), False, 'from __main__ import vtk, slicer\n'), ((9329, 9361), '__main__.vtk.vtkTransformPolyDataFilter', 'vtk.vtkTransformPolyDataFilter', ([], {}), '()\n', (9359, 9361), False, 'from __main__ import vtk, slicer\n'), ((9480, 9503), '__main__.vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (9501, 9503), False, 'from __main__ import vtk, slicer\n'), ((10118, 10146), '__main__.slicer.util.getNode', 'slicer.util.getNode', (['modName'], {}), '(modName)\n', (10137, 10146), False, 'from __main__ import vtk, slicer\n'), ((10664, 10705), 'numpy.linalg.norm', 'np.linalg.norm', (['(Pxyz[0] - Pxyz[NumP // 4])'], {}), '(Pxyz[0] - Pxyz[NumP // 4])\n', (10678, 10705), True, 'import numpy as np\n'), ((10884, 10913), '__main__.slicer.util.getNode', 'slicer.util.getNode', (['nodeName'], {}), '(nodeName)\n', (10903, 10913), False, 'from __main__ import vtk, slicer\n'), ((11085, 11113), '__main__.slicer.util.getNode', 'slicer.util.getNode', (['fidNode'], {}), '(fidNode)\n', (11104, 11113), False, 'from __main__ import vtk, slicer\n'), ((13037, 13068), '__main__.slicer.util.getNode', 'slicer.util.getNode', (['volumeName'], {}), '(volumeName)\n', (13056, 13068), False, 'from __main__ import vtk, slicer\n'), ((13080, 13120), '__main__.slicer.util.arrayFromVolume', 'slicer.util.arrayFromVolume', (['volume_node'], {}), '(volume_node)\n', (13107, 13120), False, 'from __main__ import vtk, slicer\n'), ((1135, 1153), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1151, 1153), False, 'import sys\n'), ((2941, 2964), 'numpy.linalg.norm', 'np.linalg.norm', (['(P1 - P2)'], {}), '(P1 - P2)\n', (2955, 2964), True, 'import numpy as np\n'), ((3100, 3118), '__main__.vtk.vtkMatrix4x4', 'vtk.vtkMatrix4x4', ([], {}), '()\n', (3116, 3118), False, 'from __main__ import vtk, slicer\n'), ((4140, 4167), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos2 - pos1)'], {}), '(pos2 - pos1)\n', (4154, 4167), True, 'import numpy as np\n'), ((4298, 4367), '__main__.slicer.mrmlScene.AddNewNodeByClass', 'slicer.mrmlScene.AddNewNodeByClass', (['"""vtkMRMLMarkupsLineNode"""', 'modName'], {}), "('vtkMRMLMarkupsLineNode', modName)\n", (4332, 4367), False, 'from __main__ import vtk, slicer\n'), ((5552, 5587), '__main__.slicer.vtkMRMLMarkupsFiducialNode', 'slicer.vtkMRMLMarkupsFiducialNode', ([], {}), '()\n', (5585, 5587), False, 'from __main__ import vtk, slicer\n'), ((6480, 6499), '__main__.vtk.vtkVector3d', 'vtk.vtkVector3d', (['P0'], {}), '(P0)\n', (6495, 6499), False, 'from __main__ import vtk, slicer\n'), ((6531, 6550), '__main__.vtk.vtkVector3d', 'vtk.vtkVector3d', (['P1'], {}), '(P1)\n', (6546, 6550), False, 'from __main__ import vtk, slicer\n'), ((6582, 6601), '__main__.vtk.vtkVector3d', 'vtk.vtkVector3d', (['P2'], {}), '(P2)\n', (6597, 6601), False, 'from __main__ import vtk, slicer\n'), ((8182, 8211), '__main__.vtk.vtkMath.Norm', 'vtk.vtkMath.Norm', (['normalizedX'], {}), '(normalizedX)\n', (8198, 8211), False, 'from __main__ import vtk, slicer\n'), ((10715, 10745), 'numpy.array', 'np.array', (['[axisMed0, axisMed1]'], {}), '([axisMed0, axisMed1])\n', (10723, 10745), True, 'import numpy as np\n'), ((10747, 10765), 'numpy.around', 'np.around', (['dimeter'], {}), '(dimeter)\n', (10756, 10765), True, 'import numpy as np\n'), ((10944, 10973), '__main__.slicer.util.getNode', 'slicer.util.getNode', (['nodeName'], {}), '(nodeName)\n', (10963, 10973), False, 'from __main__ import vtk, slicer\n'), ((12700, 12726), 'numpy.linalg.norm', 'np.linalg.norm', (['(PA - boneP)'], {}), '(PA - boneP)\n', (12714, 12726), True, 'import numpy as np\n'), ((13966, 13983), 'numpy.argmax', 'np.argmax', (['arrayV'], {}), '(arrayV)\n', (13975, 13983), True, 'import numpy as np\n'), ((14385, 14408), 'numpy.linalg.norm', 'np.linalg.norm', (['(PB - PT)'], {}), '(PB - PT)\n', (14399, 14408), True, 'import numpy as np\n'), ((14456, 14478), 'numpy.around', 'np.around', (['lineN[3]', '(1)'], {}), '(lineN[3], 1)\n', (14465, 14478), True, 'import numpy as np\n'), ((14774, 14797), 'numpy.linalg.norm', 'np.linalg.norm', (['(PB - PT)'], {}), '(PB - PT)\n', (14788, 14797), True, 'import numpy as np\n'), ((14845, 14867), 'numpy.around', 'np.around', (['lineN[3]', '(1)'], {}), '(lineN[3], 1)\n', (14854, 14867), True, 'import numpy as np\n'), ((15752, 15766), 'numpy.around', 'np.around', (['SPA'], {}), '(SPA)\n', (15761, 15766), True, 'import numpy as np\n'), ((15768, 15782), 'numpy.around', 'np.around', (['TPA'], {}), '(TPA)\n', (15777, 15782), True, 'import numpy as np\n'), ((3227, 3244), 'numpy.append', 'np.append', (['P', '(1.0)'], {}), '(P, 1.0)\n', (3236, 3244), True, 'import numpy as np\n'), ((4399, 4420), '__main__.vtk.vtkVector3d', 'vtk.vtkVector3d', (['pos1'], {}), '(pos1)\n', (4414, 4420), False, 'from __main__ import vtk, slicer\n'), ((4453, 4474), '__main__.vtk.vtkVector3d', 'vtk.vtkVector3d', (['pos3'], {}), '(pos3)\n', (4468, 4474), False, 'from __main__ import vtk, slicer\n'), ((9576, 9605), '__main__.slicer.modules.models.logic', 'slicer.modules.models.logic', ([], {}), '()\n', (9603, 9605), False, 'from __main__ import vtk, slicer\n'), ((10479, 10495), 'numpy.array', 'np.array', (['[pxyz]'], {}), '([pxyz])\n', (10487, 10495), True, 'import numpy as np\n'), ((11320, 11335), 'numpy.array', 'np.array', (['[ras]'], {}), '([ras])\n', (11328, 11335), True, 'import numpy as np\n'), ((11408, 11423), 'numpy.array', 'np.array', (['Mdata'], {}), '(Mdata)\n', (11416, 11423), True, 'import numpy as np\n'), ((11975, 11990), 'numpy.array', 'np.array', (['[Pal]'], {}), '([Pal])\n', (11983, 11990), True, 'import numpy as np\n'), ((12003, 12023), 'numpy.append', 'np.append', (['Data3', 'Pl'], {}), '(Data3, Pl)\n', (12012, 12023), True, 'import numpy as np\n'), ((12036, 12057), 'numpy.append', 'np.append', (['Data3', 'Par'], {}), '(Data3, Par)\n', (12045, 12057), True, 'import numpy as np\n'), ((12070, 12090), 'numpy.append', 'np.append', (['Data3', 'Pr'], {}), '(Data3, Pr)\n', (12079, 12090), True, 'import numpy as np\n'), ((12112, 12133), 'numpy.append', 'np.append', (['Data3', 'Pal'], {}), '(Data3, Pal)\n', (12121, 12133), True, 'import numpy as np\n'), ((12146, 12166), 'numpy.append', 'np.append', (['Data3', 'Pl'], {}), '(Data3, Pl)\n', (12155, 12166), True, 'import numpy as np\n'), ((12179, 12200), 'numpy.append', 'np.append', (['Data3', 'Par'], {}), '(Data3, Par)\n', (12188, 12200), True, 'import numpy as np\n'), ((12213, 12233), 'numpy.append', 'np.append', (['Data3', 'Pr'], {}), '(Data3, Pr)\n', (12222, 12233), True, 'import numpy as np\n'), ((13431, 13449), '__main__.vtk.vtkMatrix4x4', 'vtk.vtkMatrix4x4', ([], {}), '()\n', (13447, 13449), False, 'from __main__ import vtk, slicer\n'), ((13931, 13947), 'numpy.array', 'np.array', (['VVList'], {}), '(VVList)\n', (13939, 13947), True, 'import numpy as np\n'), ((4196, 4221), 'numpy.linalg.norm', 'np.linalg.norm', (['direction'], {}), '(direction)\n', (4210, 4221), True, 'import numpy as np\n'), ((4232, 4259), 'numpy.linalg.norm', 'np.linalg.norm', (['(pos2 - pos1)'], {}), '(pos2 - pos1)\n', (4246, 4259), True, 'import numpy as np\n'), ((10532, 10548), 'numpy.array', 'np.array', (['[pxyz]'], {}), '([pxyz])\n', (10540, 10548), True, 'import numpy as np\n'), ((11374, 11389), 'numpy.array', 'np.array', (['[ras]'], {}), '([ras])\n', (11382, 11389), True, 'import numpy as np\n'), ((13271, 13294), 'numpy.linalg.norm', 'np.linalg.norm', (['(Pz - Pa)'], {}), '(Pz - Pa)\n', (13285, 13294), True, 'import numpy as np\n'), ((13562, 13579), 'numpy.append', 'np.append', (['P', '(1.0)'], {}), '(P, 1.0)\n', (13571, 13579), True, 'import numpy as np\n'), ((13818, 13840), 'numpy.array', 'np.array', (['[voxelValue]'], {}), '([voxelValue])\n', (13826, 13840), True, 'import numpy as np\n'), ((13865, 13894), 'numpy.append', 'np.append', (['VVlist', 'voxelValue'], {}), '(VVlist, voxelValue)\n', (13874, 13894), True, 'import numpy as np\n'), ((678, 712), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (691, 712), False, 'import time\n'), ((7010, 7036), '__main__.slicer.app.layoutManager', 'slicer.app.layoutManager', ([], {}), '()\n', (7034, 7036), False, 'from __main__ import vtk, slicer\n'), ((1067, 1101), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M:%S"""'], {}), "('%Y-%m-%d %H:%M:%S')\n", (1080, 1101), False, 'import time\n')] |
import numpy as np
from galry import *
class DepthVisual(Visual):
def initialize(self, position, color):
self.size = position.shape[0]
self.add_attribute('position', ndim=3, data=position)
self.add_attribute('color', ndim=4, data=color)
self.add_varying('vcolor', ndim=4)
self.primitive_type = 'POINTS'
self.depth_enabled = True
self.add_vertex_main("""
gl_PointSize = 100.;
vcolor = color;
""")
self.add_fragment_main("""
out_color = vcolor;
""")
figure(activate3D=True)
position = np.zeros((10, 3))
position[:,0] = position[:,1] = np.linspace(-.5, .5, 10)
position[:,2] = np.linspace(0., 1., 10)
color = np.tile(np.linspace(0., 1., 10).reshape((-1, 1)), (1, 4))
color[:,-1] = 0.5
visual(DepthVisual, position, color)
show()
| [
"numpy.zeros",
"numpy.linspace"
] | [((594, 611), 'numpy.zeros', 'np.zeros', (['(10, 3)'], {}), '((10, 3))\n', (602, 611), True, 'import numpy as np\n'), ((644, 670), 'numpy.linspace', 'np.linspace', (['(-0.5)', '(0.5)', '(10)'], {}), '(-0.5, 0.5, 10)\n', (655, 670), True, 'import numpy as np\n'), ((685, 710), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(10)'], {}), '(0.0, 1.0, 10)\n', (696, 710), True, 'import numpy as np\n'), ((726, 751), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', '(10)'], {}), '(0.0, 1.0, 10)\n', (737, 751), True, 'import numpy as np\n')] |
__author__ = 'shyue'
import unittest
import numpy as np
from pyhull.convex_hull import ConvexHull
class ConvexHullTestCase(unittest.TestCase):
def setUp(self):
data = [[-0.5, -0.5], [-0.5, 0.5], [0.5, -0.5], [0.5, 0.5]]
self.hull = ConvexHull(data)
sphere_data = [[0.3927286959385721, 0.3027233106882571,
-0.0642087887467873],
[-0.3040289937812381, 0.08411211324060132,
-0.3879323695524365],
[-0.4167147320140305, -0.1124203247935928,
0.252409395022804],
[-0.09784613055257477, 0.3994051836806832,
0.2844321254445218],
[0.0167085276338464, 0.4969839143091518,
0.05222847903455247],
[-0.3010383814570601, 0.3973744439739354,
0.03833332970300512],
[0.321916886905792, -0.3514017778986294,
-0.1512822144687402],
[-0.02357104947939958, 0.4654006301246191,
-0.1812364728912125],
[0.3199720537828011, -0.3299443654301472,
-0.1968618818332127],
[-0.4630278928730662, -0.1886147011806086,
0.005446551209538857]]
self.sphull = ConvexHull(sphere_data)
#Make sure higher dim works.
points = np.random.randn(10, 5)
self.hdhull = ConvexHull(points)
#Check that bad points raises an error.
bad_points = [[0,0], [0,1,0], [0,0]]
self.assertRaises(ValueError, ConvexHull, bad_points)
def test_vertices(self):
expected_ans = [[0, 2], [1, 0], [2, 3], [3, 1]]
self.assertEqual(self.hull.vertices, expected_ans)
expected_ans = [[1, 5, 9], [6, 3, 0], [6, 8, 9], [8, 1, 9], [8, 6, 0],
[1, 8, 0], [7, 1, 0], [7, 5, 1], [2, 6, 9], [2, 3, 6],
[5, 2, 9], [3, 2, 5], [4, 3, 5], [7, 4, 5], [3, 4, 0],
[4, 7, 0]]
self.assertEqual(self.sphull.vertices, expected_ans)
def test_joggle(self):
joggled_hull = ConvexHull(self.hull.points, joggle=True)
expected_ans = set([(0, 2), (1, 0), (2, 3), (3, 1)])
ans = set([tuple(x) for x in joggled_hull.vertices])
self.assertEqual(ans, expected_ans)
joggled_sphull = ConvexHull(self.sphull.points, joggle=True)
expected_ans = set([(1, 5, 9), (6, 3, 0), (6, 8, 9), (8, 1, 9), (8, 6, 0),
(1, 8, 0), (7, 1, 0), (7, 5, 1), (2, 6, 9), (2, 3, 6),
(5, 2, 9), (3, 2, 5), (4, 3, 5), (7, 4, 5), (3, 4, 0),
(4, 7, 0)])
ans = set([tuple(x) for x in joggled_sphull.vertices])
self.assertEqual(ans, expected_ans)
def test_redundant_points(self):
data = self.hull.points
expected_ans = [[0, 2], [1, 0], [2, 3], [3, 1]]
data.extend([[0, -0.5], [0, 0.5], [-0.5, 0], [0.5, 0]])
self.assertEqual(ConvexHull(data).vertices, expected_ans)
def test_simplices(self):
self.assertEqual(len(self.hull.simplices), 4)
self.assertEqual(len(self.sphull.simplices), 16)
def test_dim(self):
self.assertEqual(self.hull.dim, 2)
self.assertEqual(self.sphull.dim, 3)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"pyhull.convex_hull.ConvexHull",
"numpy.random.randn"
] | [((3459, 3474), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3472, 3474), False, 'import unittest\n'), ((257, 273), 'pyhull.convex_hull.ConvexHull', 'ConvexHull', (['data'], {}), '(data)\n', (267, 273), False, 'from pyhull.convex_hull import ConvexHull\n'), ((1406, 1429), 'pyhull.convex_hull.ConvexHull', 'ConvexHull', (['sphere_data'], {}), '(sphere_data)\n', (1416, 1429), False, 'from pyhull.convex_hull import ConvexHull\n'), ((1485, 1507), 'numpy.random.randn', 'np.random.randn', (['(10)', '(5)'], {}), '(10, 5)\n', (1500, 1507), True, 'import numpy as np\n'), ((1530, 1548), 'pyhull.convex_hull.ConvexHull', 'ConvexHull', (['points'], {}), '(points)\n', (1540, 1548), False, 'from pyhull.convex_hull import ConvexHull\n'), ((2242, 2283), 'pyhull.convex_hull.ConvexHull', 'ConvexHull', (['self.hull.points'], {'joggle': '(True)'}), '(self.hull.points, joggle=True)\n', (2252, 2283), False, 'from pyhull.convex_hull import ConvexHull\n'), ((2475, 2518), 'pyhull.convex_hull.ConvexHull', 'ConvexHull', (['self.sphull.points'], {'joggle': '(True)'}), '(self.sphull.points, joggle=True)\n', (2485, 2518), False, 'from pyhull.convex_hull import ConvexHull\n'), ((3131, 3147), 'pyhull.convex_hull.ConvexHull', 'ConvexHull', (['data'], {}), '(data)\n', (3141, 3147), False, 'from pyhull.convex_hull import ConvexHull\n')] |
import os
import json
import albumentations
import numpy as np
from PIL import Image
from tqdm import tqdm
from torch.utils.data import Dataset
from forks.taming_transformers.taming.data.sflckr import SegmentationBase # for examples included in repo
class Examples(SegmentationBase):
def __init__(self, size=256, random_crop=False, interpolation="bicubic"):
super().__init__(data_csv="data/coco_examples.txt",
data_root="data/coco_images",
segmentation_root="data/coco_segmentations",
size=size, random_crop=random_crop,
interpolation=interpolation,
n_labels=183, shift_segmentation=True)
class CocoBase(Dataset):
"""needed for (image, caption, segmentation) pairs"""
def __init__(self, size=None, dataroot="", datajson="", onehot_segmentation=False, use_stuffthing=False,
crop_size=None, force_no_crop=False, given_files=None):
self.split = self.get_split()
self.size = size
if crop_size is None:
self.crop_size = size
else:
self.crop_size = crop_size
self.onehot = onehot_segmentation # return segmentation as rgb or one hot
self.stuffthing = use_stuffthing # include thing in segmentation
if self.onehot and not self.stuffthing:
raise NotImplemented("One hot mode is only supported for the "
"stuffthings version because labels are stored "
"a bit different.")
data_json = datajson
with open(data_json) as json_file:
self.json_data = json.load(json_file)
self.img_id_to_captions = dict()
self.img_id_to_filepath = dict()
self.img_id_to_segmentation_filepath = dict()
assert data_json.split("/")[-1] in ["captions_train2017.json",
"captions_val2017.json"]
if self.stuffthing:
self.segmentation_prefix = (
"data/cocostuffthings/val2017" if
data_json.endswith("captions_val2017.json") else
"data/cocostuffthings/train2017")
else:
self.segmentation_prefix = (
"data/coco/annotations/stuff_val2017_pixelmaps" if
data_json.endswith("captions_val2017.json") else
"data/coco/annotations/stuff_train2017_pixelmaps")
imagedirs = self.json_data["images"]
self.labels = {"image_ids": list()}
for imgdir in tqdm(imagedirs, desc="ImgToPath"):
self.img_id_to_filepath[imgdir["id"]] = os.path.join(dataroot, imgdir["file_name"])
self.img_id_to_captions[imgdir["id"]] = list()
pngfilename = imgdir["file_name"].replace("jpg", "png")
self.img_id_to_segmentation_filepath[imgdir["id"]] = os.path.join(
self.segmentation_prefix, pngfilename)
if given_files is not None:
if pngfilename in given_files:
self.labels["image_ids"].append(imgdir["id"])
else:
self.labels["image_ids"].append(imgdir["id"])
capdirs = self.json_data["annotations"]
for capdir in tqdm(capdirs, desc="ImgToCaptions"):
# there are in average 5 captions per image
self.img_id_to_captions[capdir["image_id"]].append(np.array([capdir["caption"]]))
self.rescaler = albumentations.SmallestMaxSize(max_size=self.size)
if self.split=="validation":
self.cropper = albumentations.CenterCrop(height=self.crop_size, width=self.crop_size)
else:
self.cropper = albumentations.RandomCrop(height=self.crop_size, width=self.crop_size)
self.preprocessor = albumentations.Compose(
[self.rescaler, self.cropper],
additional_targets={"segmentation": "image"})
if force_no_crop:
self.rescaler = albumentations.Resize(height=self.size, width=self.size)
self.preprocessor = albumentations.Compose(
[self.rescaler],
additional_targets={"segmentation": "image"})
def __len__(self):
return len(self.labels["image_ids"])
def preprocess_image(self, image_path, segmentation_path):
image = Image.open(image_path)
if not image.mode == "RGB":
image = image.convert("RGB")
image = np.array(image).astype(np.uint8)
segmentation = Image.open(segmentation_path)
if not self.onehot and not segmentation.mode == "RGB":
segmentation = segmentation.convert("RGB")
segmentation = np.array(segmentation).astype(np.uint8)
if self.onehot:
assert self.stuffthing
# stored in caffe format: unlabeled==255. stuff and thing from
# 0-181. to be compatible with the labels in
# https://github.com/nightrome/cocostuff/blob/master/labels.txt
# we shift stuffthing one to the right and put unlabeled in zero
# as long as segmentation is uint8 shifting to right handles the
# latter too
assert segmentation.dtype == np.uint8
segmentation = segmentation + 1
processed = self.preprocessor(image=image, segmentation=segmentation)
image, segmentation = processed["image"], processed["segmentation"]
image = (image / 127.5 - 1.0).astype(np.float32)
if self.onehot:
assert segmentation.dtype == np.uint8
# make it one hot
n_labels = 183
flatseg = np.ravel(segmentation)
onehot = np.zeros((flatseg.size, n_labels), dtype=np.bool)
onehot[np.arange(flatseg.size), flatseg] = True
onehot = onehot.reshape(segmentation.shape + (n_labels,)).astype(int)
segmentation = onehot
else:
segmentation = (segmentation / 127.5 - 1.0).astype(np.float32)
return image, segmentation
def __getitem__(self, i):
img_path = self.img_id_to_filepath[self.labels["image_ids"][i]]
seg_path = self.img_id_to_segmentation_filepath[self.labels["image_ids"][i]]
image, segmentation = self.preprocess_image(img_path, seg_path)
captions = self.img_id_to_captions[self.labels["image_ids"][i]]
# randomly draw one of all available captions per image
caption = captions[np.random.randint(0, len(captions))]
example = {"image": image,
"caption": [str(caption[0])],
"segmentation": segmentation,
"img_path": img_path,
"seg_path": seg_path,
"filename_": img_path.split(os.sep)[-1]
}
return example
class CocoImagesAndCaptionsTrain(CocoBase):
"""returns a pair of (image, caption)"""
def __init__(self, size, onehot_segmentation=False, use_stuffthing=False, crop_size=None, force_no_crop=False):
super().__init__(size=size,
dataroot="data/coco/train2017",
datajson="data/coco/annotations/captions_train2017.json",
onehot_segmentation=onehot_segmentation,
use_stuffthing=use_stuffthing, crop_size=crop_size, force_no_crop=force_no_crop)
def get_split(self):
return "train"
class CocoImagesAndCaptionsValidation(CocoBase):
"""returns a pair of (image, caption)"""
def __init__(self, size, onehot_segmentation=False, use_stuffthing=False, crop_size=None, force_no_crop=False,
given_files=None):
super().__init__(size=size,
dataroot="data/coco/val2017",
datajson="data/coco/annotations/captions_val2017.json",
onehot_segmentation=onehot_segmentation,
use_stuffthing=use_stuffthing, crop_size=crop_size, force_no_crop=force_no_crop,
given_files=given_files)
def get_split(self):
return "validation"
| [
"tqdm.tqdm",
"json.load",
"albumentations.Compose",
"albumentations.CenterCrop",
"albumentations.Resize",
"numpy.ravel",
"numpy.zeros",
"albumentations.SmallestMaxSize",
"PIL.Image.open",
"numpy.array",
"numpy.arange",
"albumentations.RandomCrop",
"os.path.join"
] | [((2615, 2648), 'tqdm.tqdm', 'tqdm', (['imagedirs'], {'desc': '"""ImgToPath"""'}), "(imagedirs, desc='ImgToPath')\n", (2619, 2648), False, 'from tqdm import tqdm\n'), ((3311, 3346), 'tqdm.tqdm', 'tqdm', (['capdirs'], {'desc': '"""ImgToCaptions"""'}), "(capdirs, desc='ImgToCaptions')\n", (3315, 3346), False, 'from tqdm import tqdm\n'), ((3523, 3573), 'albumentations.SmallestMaxSize', 'albumentations.SmallestMaxSize', ([], {'max_size': 'self.size'}), '(max_size=self.size)\n', (3553, 3573), False, 'import albumentations\n'), ((3849, 3953), 'albumentations.Compose', 'albumentations.Compose', (['[self.rescaler, self.cropper]'], {'additional_targets': "{'segmentation': 'image'}"}), "([self.rescaler, self.cropper], additional_targets={\n 'segmentation': 'image'})\n", (3871, 3953), False, 'import albumentations\n'), ((4385, 4407), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (4395, 4407), False, 'from PIL import Image\n'), ((4558, 4587), 'PIL.Image.open', 'Image.open', (['segmentation_path'], {}), '(segmentation_path)\n', (4568, 4587), False, 'from PIL import Image\n'), ((1705, 1725), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1714, 1725), False, 'import json\n'), ((2702, 2745), 'os.path.join', 'os.path.join', (['dataroot', "imgdir['file_name']"], {}), "(dataroot, imgdir['file_name'])\n", (2714, 2745), False, 'import os\n'), ((2938, 2989), 'os.path.join', 'os.path.join', (['self.segmentation_prefix', 'pngfilename'], {}), '(self.segmentation_prefix, pngfilename)\n', (2950, 2989), False, 'import os\n'), ((3638, 3708), 'albumentations.CenterCrop', 'albumentations.CenterCrop', ([], {'height': 'self.crop_size', 'width': 'self.crop_size'}), '(height=self.crop_size, width=self.crop_size)\n', (3663, 3708), False, 'import albumentations\n'), ((3750, 3820), 'albumentations.RandomCrop', 'albumentations.RandomCrop', ([], {'height': 'self.crop_size', 'width': 'self.crop_size'}), '(height=self.crop_size, width=self.crop_size)\n', (3775, 3820), False, 'import albumentations\n'), ((4028, 4084), 'albumentations.Resize', 'albumentations.Resize', ([], {'height': 'self.size', 'width': 'self.size'}), '(height=self.size, width=self.size)\n', (4049, 4084), False, 'import albumentations\n'), ((4117, 4206), 'albumentations.Compose', 'albumentations.Compose', (['[self.rescaler]'], {'additional_targets': "{'segmentation': 'image'}"}), "([self.rescaler], additional_targets={'segmentation':\n 'image'})\n", (4139, 4206), False, 'import albumentations\n'), ((5675, 5697), 'numpy.ravel', 'np.ravel', (['segmentation'], {}), '(segmentation)\n', (5683, 5697), True, 'import numpy as np\n'), ((5719, 5768), 'numpy.zeros', 'np.zeros', (['(flatseg.size, n_labels)'], {'dtype': 'np.bool'}), '((flatseg.size, n_labels), dtype=np.bool)\n', (5727, 5768), True, 'import numpy as np\n'), ((3467, 3496), 'numpy.array', 'np.array', (["[capdir['caption']]"], {}), "([capdir['caption']])\n", (3475, 3496), True, 'import numpy as np\n'), ((4501, 4516), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (4509, 4516), True, 'import numpy as np\n'), ((4729, 4751), 'numpy.array', 'np.array', (['segmentation'], {}), '(segmentation)\n', (4737, 4751), True, 'import numpy as np\n'), ((5788, 5811), 'numpy.arange', 'np.arange', (['flatseg.size'], {}), '(flatseg.size)\n', (5797, 5811), True, 'import numpy as np\n')] |
import os
import cv2
import glob
import time
import argparse
import numpy as np
from tqdm import tqdm
from joblib import Parallel, delayed
from PIL import Image
from utils.utils import make_directory, read_csv
NUM_MASKSIZE = 512 # Size of the mask map
DIR_INFO = "trainset_info" # Pre-given information for test set
# Command line arguments
parser = argparse.ArgumentParser(description="")
parser.add_argument("--dir_intermediate", help="Path directory of intermediate silhouttes",
default="../algorithm/data/*/silhouettes/*", type=str)
parser.add_argument("--dir_trainset", help="Path directory of generated train set",
default="../algorithm/data/*/train_mask/*", type=str)
parser.add_argument('--n_jobs', help='How many processes to use',
default=1, type=int)
args = parser.parse_args()
def get_testset(list_info, pos):
pbar = tqdm(total=len(list_info), position=pos)
for info in list_info:
pbar.update(1)
dir_intermediate = args.dir_intermediate.replace("*", "%s") % (os.path.dirname(info[-1]), os.path.basename(info[-1]))
silhouette_map = np.array(Image.open(os.path.join(dir_intermediate, info[1])))
# Superimpose silhouette map onto a 512x512 region
mask = np.zeros((NUM_MASKSIZE, NUM_MASKSIZE), dtype=np.uint8)
mask[int(info[2]):int(info[2])+silhouette_map.shape[0],int(info[3]):int(info[3])+silhouette_map.shape[1]] = silhouette_map
# Save mask
dir_trainset = args.dir_trainset.replace("*", "%s") % (os.path.dirname(info[-1]), os.path.basename(info[-1]))
make_directory(dir_trainset)
cv2.imwrite(os.path.join(dir_trainset, info[0]), mask)
def main():
curr_time = time.time()
for interval_csv in glob.glob(os.path.join(DIR_INFO, "*.csv")):
list_info = read_csv(interval_csv, trainset_mask=True)
chunk_size = len(list_info) // args.n_jobs + (1 if len(list_info) % args.n_jobs > 0 else 0)
Parallel(n_jobs=args.n_jobs)(
delayed(get_testset)(list_info[start:start+chunk_size], start//chunk_size)
for start in range(0, len(list_info), chunk_size)
)
print("Processing time: %.2f sec" % (time.time() - curr_time))
if __name__ == "__main__":
main()
| [
"utils.utils.read_csv",
"argparse.ArgumentParser",
"os.path.basename",
"os.path.dirname",
"numpy.zeros",
"time.time",
"utils.utils.make_directory",
"joblib.Parallel",
"joblib.delayed",
"os.path.join"
] | [((359, 398), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (382, 398), False, 'import argparse\n'), ((1740, 1751), 'time.time', 'time.time', ([], {}), '()\n', (1749, 1751), False, 'import time\n'), ((1285, 1339), 'numpy.zeros', 'np.zeros', (['(NUM_MASKSIZE, NUM_MASKSIZE)'], {'dtype': 'np.uint8'}), '((NUM_MASKSIZE, NUM_MASKSIZE), dtype=np.uint8)\n', (1293, 1339), True, 'import numpy as np\n'), ((1618, 1646), 'utils.utils.make_directory', 'make_directory', (['dir_trainset'], {}), '(dir_trainset)\n', (1632, 1646), False, 'from utils.utils import make_directory, read_csv\n'), ((1787, 1818), 'os.path.join', 'os.path.join', (['DIR_INFO', '"""*.csv"""'], {}), "(DIR_INFO, '*.csv')\n", (1799, 1818), False, 'import os\n'), ((1841, 1883), 'utils.utils.read_csv', 'read_csv', (['interval_csv'], {'trainset_mask': '(True)'}), '(interval_csv, trainset_mask=True)\n', (1849, 1883), False, 'from utils.utils import make_directory, read_csv\n'), ((1667, 1702), 'os.path.join', 'os.path.join', (['dir_trainset', 'info[0]'], {}), '(dir_trainset, info[0])\n', (1679, 1702), False, 'import os\n'), ((1993, 2021), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'args.n_jobs'}), '(n_jobs=args.n_jobs)\n', (2001, 2021), False, 'from joblib import Parallel, delayed\n'), ((1068, 1093), 'os.path.dirname', 'os.path.dirname', (['info[-1]'], {}), '(info[-1])\n', (1083, 1093), False, 'import os\n'), ((1095, 1121), 'os.path.basename', 'os.path.basename', (['info[-1]'], {}), '(info[-1])\n', (1111, 1121), False, 'import os\n'), ((1168, 1207), 'os.path.join', 'os.path.join', (['dir_intermediate', 'info[1]'], {}), '(dir_intermediate, info[1])\n', (1180, 1207), False, 'import os\n'), ((1555, 1580), 'os.path.dirname', 'os.path.dirname', (['info[-1]'], {}), '(info[-1])\n', (1570, 1580), False, 'import os\n'), ((1582, 1608), 'os.path.basename', 'os.path.basename', (['info[-1]'], {}), '(info[-1])\n', (1598, 1608), False, 'import os\n'), ((2224, 2235), 'time.time', 'time.time', ([], {}), '()\n', (2233, 2235), False, 'import time\n'), ((2035, 2055), 'joblib.delayed', 'delayed', (['get_testset'], {}), '(get_testset)\n', (2042, 2055), False, 'from joblib import Parallel, delayed\n')] |
import numpy as np
from vispy import scene, app
from ..base.CLU import CLU
from .color_scheme import palette
from ..core.correlate import correlate
from phy.plot import View
class correlogram_view(View):
''' For grid purpose, phy.view is much faster than vispy.grid_view
Parameters
----------
correlate : func
a func to calculate correlate which defined within core/correlate.py
fs : float
sample rate
window_bins : int
the number of bins of window
bin_size : int
the time interval(ms) to sum spikes.
'''
def __init__(self, correlate=correlate, fs=25e3, window_bins=50, bin_size=1, show=False):
super(correlogram_view, self).__init__('grid')
self._window_bins = window_bins
self._bin_size = bin_size
self._default_color = np.ones(4,dtype='int32')
# inject the function to calculate correlare
self._correlate = correlate
self._fs = fs
### ----------------------------------------------
### public method
### ----------------------------------------------
def set_bin_window(self, bin=None, window=None):
'''
set size of bin and window, the unit is ms
'''
self._window_bins = window
self._bin_size = bin
assert self._window_bins % 2 == 0
assert self._window_bins % self._bin_size == 0
def set_data(self, clu, spk_times):
self._clu = clu
self._spike_time = spk_times
# Not rendering immedially now, waiting for shortcut
self._render()
def register_event(self):
@self._clu.connect
def on_cluster(*args, **kwargs):
self._render()
def change_correlate_func(self, func):
'''
change the correlate func
'''
self._correlate = func
self._render()
### ----------------------------------------------
### private method
### ----------------------------------------------
def _correlogram(self):
return self._correlate(self._spike_time, self._clu.membership, self._clu.index_id, window_bins=self._window_bins, bin_size=self._bin_size)
def _pair_clusters(self):
'''
pair every clusters but ignore the duplicate one.
'''
for i in reversed(range(self._clu.nclu)):
for j in range(i + 1):
yield i,j
def _render(self):
'''
draw correlogram within grid. eg: if we have 4 clu:
3+ + + +
2+ + +
1+ +
0+
0 1 2 3
on the digonal line, it is the auto-correlogram, so the cluster will have the same color which in spike view.
'''
self.clear()
self.grid.shape = (self._clu.nclu,self._clu.nclu)
hists = self._correlogram()
# begin draw
with self.building():
for i,j in self._pair_clusters():
color = self._default_color if (i != j) else np.hstack((palette[i],1))
row, col = self._clu.nclu - 1 - i, j
self[row,col].hist(hist=hists[i][j],color=color)
| [
"numpy.ones",
"numpy.hstack"
] | [((883, 908), 'numpy.ones', 'np.ones', (['(4)'], {'dtype': '"""int32"""'}), "(4, dtype='int32')\n", (890, 908), True, 'import numpy as np\n'), ((3137, 3163), 'numpy.hstack', 'np.hstack', (['(palette[i], 1)'], {}), '((palette[i], 1))\n', (3146, 3163), True, 'import numpy as np\n')] |
import numpy as np
def get_2d_projection(activation_batch):
# TBD: use pytorch batch svd implementation
projections = []
for activations in activation_batch:
reshaped_activations = (
(activations).reshape(activations.shape[0], -1).transpose()
)
# Centering before the SVD seems to be important here,
# Otherwise the image returned is negative
reshaped_activations = reshaped_activations - reshaped_activations.mean(axis=0)
U, S, VT = np.linalg.svd(reshaped_activations, full_matrices=True)
projection = reshaped_activations @ VT[0, :]
projection = projection.reshape(activations.shape[1:])
projections.append(projection)
return np.float32(projections)
| [
"numpy.float32",
"numpy.linalg.svd"
] | [((730, 753), 'numpy.float32', 'np.float32', (['projections'], {}), '(projections)\n', (740, 753), True, 'import numpy as np\n'), ((508, 563), 'numpy.linalg.svd', 'np.linalg.svd', (['reshaped_activations'], {'full_matrices': '(True)'}), '(reshaped_activations, full_matrices=True)\n', (521, 563), True, 'import numpy as np\n')] |
# Import modules and libraries
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# ======================================================================================================================
# Overlap measure + KDE loss in 3D
# ======================================================================================================================
def dice_loss(pred, target):
"""
This definition generalize to real valued pred and target vector.
pred: tensor with first dimension as batch
target: tensor with first dimension as batch
"""
smooth = 1.
# have to use contiguous since they may from a torch.view op
iflat = pred.contiguous().view(-1)
tflat = target.contiguous().view(-1)
intersection = (iflat * tflat).sum()
A_sum = torch.sum(tflat * iflat)
B_sum = torch.sum(tflat * tflat)
return 1 - ((2. * intersection + smooth) / (A_sum + B_sum + smooth))
# create a 3D gaussian kernel
def GaussianKernel(shape=(7, 7, 7), sigma=1, normfactor=1):
"""
3D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma]) in 3D
"""
m, n, p = [(ss - 1.) / 2. for ss in shape]
y, x, z = np.ogrid[-m:m + 1, -n:n + 1, -p:p + 1]
h = np.exp(-(x * x + y * y + z * z) / (2 * sigma ** 2))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
"""
sumh = h.sum()
if sumh != 0:
h /= sumh
h = h * normfactor
"""
maxh = h.max()
if maxh != 0:
h /= maxh
h = h * normfactor
h = torch.from_numpy(h).type(torch.FloatTensor).cuda() # Variable()
h = h.unsqueeze(0)
h = h.unsqueeze(1)
return h
# define the 3D extended loss function from DeepSTORM
class KDE_loss3D(nn.Module):
def __init__(self, factor):
super(KDE_loss3D, self).__init__()
self.kernel = GaussianKernel()
self.factor = factor
def forward(self, pred_bol, target_bol):
# extract kernel dimensions
N, C, D, H, W = self.kernel.size()
# extend prediction and target to have a single channel
target_bol = target_bol.unsqueeze(1)
pred_bol = pred_bol.unsqueeze(1)
# KDE for both input and ground truth spikes
Din = F.conv3d(pred_bol, self.kernel, padding=(int(np.round((D - 1) / 2)), 0, 0))
Dtar = F.conv3d(target_bol, self.factor*self.kernel, padding=(int(np.round((D - 1) / 2)), 0, 0))
# kde loss
kde_loss = nn.MSELoss()(Din, Dtar)
# final loss
final_loss = kde_loss + dice_loss(pred_bol/self.factor, target_bol)
return final_loss
# ======================================================================================================================
# Jaccard index
# ======================================================================================================================
# calculates the jaccard coefficient approximation using per-voxel probabilities
def jaccard_coeff(pred, target):
"""
jaccard index = TP / (TP + FP + FN)
pred: tensor with first dimension as batch
target: tensor with first dimension as batch
"""
# smoothing parameter
smooth = 1e-6
# number of examples in the batch
N = pred.size(0)
# have to use contiguous since they may from a torch.view op
iflat = pred.contiguous().view(N,-1)
tflat = target.contiguous().view(N,-1)
intersection = (iflat * tflat).sum(1)
jacc_index = (intersection / (iflat.sum(1) + tflat.sum(1) - intersection + smooth)).mean()
return jacc_index
| [
"torch.nn.MSELoss",
"numpy.finfo",
"numpy.exp",
"numpy.round",
"torch.sum",
"torch.from_numpy"
] | [((852, 876), 'torch.sum', 'torch.sum', (['(tflat * iflat)'], {}), '(tflat * iflat)\n', (861, 876), False, 'import torch\n'), ((890, 914), 'torch.sum', 'torch.sum', (['(tflat * tflat)'], {}), '(tflat * tflat)\n', (899, 914), False, 'import torch\n'), ((1330, 1381), 'numpy.exp', 'np.exp', (['(-(x * x + y * y + z * z) / (2 * sigma ** 2))'], {}), '(-(x * x + y * y + z * z) / (2 * sigma ** 2))\n', (1336, 1381), True, 'import numpy as np\n'), ((2580, 2592), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (2590, 2592), True, 'import torch.nn as nn\n'), ((1393, 1410), 'numpy.finfo', 'np.finfo', (['h.dtype'], {}), '(h.dtype)\n', (1401, 1410), True, 'import numpy as np\n'), ((1629, 1648), 'torch.from_numpy', 'torch.from_numpy', (['h'], {}), '(h)\n', (1645, 1648), False, 'import torch\n'), ((2401, 2422), 'numpy.round', 'np.round', (['((D - 1) / 2)'], {}), '((D - 1) / 2)\n', (2409, 2422), True, 'import numpy as np\n'), ((2507, 2528), 'numpy.round', 'np.round', (['((D - 1) / 2)'], {}), '((D - 1) / 2)\n', (2515, 2528), True, 'import numpy as np\n')] |
import os
import torch.optim as optim
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.distributions import Normal
from torch.optim import Adam
import numpy as np
import random
import copy
import gym
import json
from sklearn.decomposition import PCA
# torch.autograd.set_detect_anomaly(True)
from torch.optim.lr_scheduler import StepLR
import time
import math
import sys
if torch.cuda.is_available():
torch.cuda.set_device(1)
if torch.cuda.is_available():
torch.set_default_tensor_type(torch.cuda.FloatTensor)
class RBC_Agent:
def __init__(self, actions_spaces):
self.actions_spaces = actions_spaces
self.reset_action_tracker()
def reset_action_tracker(self):
self.action_tracker = []
def select_action(self, states):
hour_day = states[2][2]
# Daytime: release stored energy
a = [[0.0 for _ in range(len(self.actions_spaces[i].sample()))] for i in range(len(self.actions_spaces))]
if hour_day >= 9 and hour_day <= 21:
a = [[-0.08 for _ in range(len(self.actions_spaces[i].sample()))] for i in range(len(self.actions_spaces))]
# Early nightime: store DHW and/or cooling energy
if (hour_day >= 1 and hour_day <= 8) or (hour_day >= 22 and hour_day <= 24):
a = []
for i in range(len(self.actions_spaces)):
if len(self.actions_spaces[i].sample()) == 2:
a.append([0.091, 0.091])
else:
a.append([0.091])
self.action_tracker.append(a)
return np.array(a)
class PolicyNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, action_space, action_scaling_coef, hidden_dim=[400, 300],
init_w=3e-3, log_std_min=-20, log_std_max=2, epsilon=1e-6):
super(PolicyNetwork, self).__init__()
self.log_std_min = log_std_min
self.log_std_max = log_std_max
self.epsilon = epsilon
#print(num_inputs)
#sys.exit()
self.linear1 = nn.Linear(num_inputs, hidden_dim[0])
self.linear2 = nn.Linear(hidden_dim[0], hidden_dim[1])
self.mean_linear = nn.Linear(hidden_dim[1], num_actions)
self.log_std_linear = nn.Linear(hidden_dim[1], num_actions)
self.mean_linear.weight.data.uniform_(-init_w, init_w)
self.mean_linear.bias.data.uniform_(-init_w, init_w)
self.log_std_linear.weight.data.uniform_(-init_w, init_w)
self.log_std_linear.bias.data.uniform_(-init_w, init_w)
self.action_scale = torch.FloatTensor(
action_scaling_coef * (action_space.high - action_space.low) / 2.)
self.action_bias = torch.FloatTensor(
action_scaling_coef * (action_space.high + action_space.low) / 2.)
def forward(self, state):
#print('input to netwrork : ',state.shape)
#sys.exit()
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
mean = self.mean_linear(x)
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, min=self.log_std_min, max=self.log_std_max)
return mean, log_std
def sample(self, state):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
x_t = normal.rsample() # for reparameterization trick (mean + std * N(0,1))
y_t = torch.tanh(x_t)
action = y_t * self.action_scale + self.action_bias
log_prob = normal.log_prob(x_t)
# Enforcing Action Bound
log_prob -= torch.log(self.action_scale * (1 - y_t.pow(2)) + self.epsilon)
log_prob = log_prob.sum(1, keepdim=True)
mean = torch.tanh(mean) * self.action_scale + self.action_bias
return action, log_prob, mean
def to(self, device):
self.action_scale = self.action_scale.to(device)
self.action_bias = self.action_bias.to(device)
return super(PolicyNetwork, self).to(device)
class SoftQNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_size=[400, 300], init_w=3e-3):
super(SoftQNetwork, self).__init__()
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_size[0])
self.linear2 = nn.Linear(hidden_size[0], hidden_size[1])
self.linear3 = nn.Linear(hidden_size[1], 1)
self.ln1 = nn.LayerNorm(hidden_size[0])
self.ln2 = nn.LayerNorm(hidden_size[1])
self.linear3.weight.data.uniform_(-init_w, init_w)
self.linear3.bias.data.uniform_(-init_w, init_w)
def forward(self, state, action):
x = torch.cat([state, action], 1)
x = self.ln1(F.relu(self.linear1(x)))
x = self.ln2(F.relu(self.linear2(x)))
x = self.linear3(x)
return x
class ReplayBuffer:
def __init__(self, capacity):
self.capacity = capacity
self.buffer = []
self.position = 0
def push(self, state, action, reward, next_state, done):
if len(self.buffer) < self.capacity:
self.buffer.append(None)
self.buffer[self.position] = (state, action, reward, next_state, done)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
batch = random.sample(self.buffer, batch_size)
state, action, reward, next_state, done = map(np.stack, zip(*batch))
return state, action, reward, next_state, done
def __len__(self):
return len(self.buffer)
class Net_consumption_Buffer:
def __init__(self,size, dim):
self.buffer = np.zeros((size,dim),dtype=np.float32)
# note that the size refers to the no.of previous time steps stored.
# dimensions refers to the number of features of the states that are stored.
self.max_size=size
self.dim = dim
self.size = 0
self.ptr= 0
def push(self, last_step_sample):
#need to be improved as the no. of dimensions stored increases.
# as of now, it works for single dimension and single storage.
self.buffer[self.ptr] = last_step_sample
self.ptr = (self.ptr+1) % self.max_size
self.size = min(self.size+1, self.max_size)
def __len__(self):
return len(self.buffer)
class SAC_single_agent:
def __init__(self, building_ids, buildings_states_actions, building_info, observation_spaces=None,
action_spaces=None, hidden_dim=[400, 300], discount=0.99, tau=5e-3, lr=3e-4, batch_size=100,
replay_buffer_capacity=1e5, start_training = None,exploration_period = None,safe_exploration= False,
action_scaling_coef=1., reward_scaling=1., update_per_step=1,seed=0,pca_compression = 1.,add_previous_consumption=False):
with open(buildings_states_actions) as json_file:
self.buildings_states_actions = json.load(json_file)
self.building_ids = building_ids
self.start_training = start_training
self.discount = discount
self.batch_size = batch_size
self.tau = tau
self.action_scaling_coef = action_scaling_coef
self.reward_scaling = reward_scaling
torch.manual_seed(seed)
np.random.seed(seed)
self.deterministic = False
self.update_per_step = update_per_step
#self.iterations_as = iterations_as
self.safe_exploration = safe_exploration
self.exploration_period = exploration_period
self.add_previous_consumption = add_previous_consumption
self.action_list_ = []
self.action_list2_ = []
self.time_step = 0
self.pca_flag = 0
self.norm_flag = 0
self.action_spaces = action_spaces
self.observation_spaces = observation_spaces
print(self.observation_spaces)
# Optimizers/Loss using the Huber loss
self.soft_q_criterion = nn.SmoothL1Loss()
# device
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.critic1_loss_, self.critic2_loss_, self.actor_loss_, self.alpha_loss_, self.alpha_, self.q_tracker = {}, {}, {}, {}, {}, {}
self.critic1_loss_, self.critic2_loss_, self.actor_loss_, self.alpha_loss_, self.alpha_, self.q_tracker, self.log_pi_tracker = [], [], [], [], [], [], []
self.state_memory = Net_consumption_Buffer(2, 1)
#state_dim = len(self.observation_spaces.low) + 1 # +1 for previous step consumption
#state_dim = len(self.observation_spaces[0].low) + 1
state_dim = len(self.observation_spaces.low)
#print(state_dim)
if self.add_previous_consumption:
state_dim = int((pca_compression) * (1+state_dim))
else:
state_dim = int((pca_compression) * state_dim)
#print(state_dim)
#sys.exit()
action_dim = self.action_spaces.shape[0]
#action_dim = self.action_spaces[0].shape[0]
self.alpha = 0.2
self.pca = PCA(n_components=state_dim)
self.replay_buffer = ReplayBuffer(int(replay_buffer_capacity))
# init networks
self.soft_q_net1 = SoftQNetwork(state_dim, action_dim, hidden_dim).to(self.device)
self.soft_q_net2 = SoftQNetwork(state_dim, action_dim, hidden_dim).to(self.device)
self.target_soft_q_net1 = SoftQNetwork(state_dim, action_dim, hidden_dim).to(self.device)
self.target_soft_q_net2 = SoftQNetwork(state_dim, action_dim, hidden_dim).to(self.device)
for target_param, param in zip(self.target_soft_q_net1.parameters(),
self.soft_q_net1.parameters()):
target_param.data.copy_(param.data)
for target_param, param in zip(self.target_soft_q_net2.parameters(),
self.soft_q_net2.parameters()):
target_param.data.copy_(param.data)
# Policy
self.policy_net = PolicyNetwork(state_dim, action_dim, self.action_spaces, self.action_scaling_coef,
hidden_dim).to(self.device)
self.soft_q_optimizer1 = optim.Adam(self.soft_q_net1.parameters(), lr=lr)
self.soft_q1_scheduler= StepLR(self.soft_q_optimizer1,step_size=1, gamma=0.98)
self.soft_q_optimizer2 = optim.Adam(self.soft_q_net2.parameters(), lr=lr)
self.soft_q2_scheduler= StepLR(self.soft_q_optimizer2,step_size=1, gamma=0.98)
self.policy_optimizer = optim.Adam(self.policy_net.parameters(), lr=lr)
self.soft_pi_scheduler= StepLR(self.policy_optimizer,step_size=1, gamma=0.98)
self.target_entropy = -np.prod(self.action_spaces.shape).item()
self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)
self.alpha_optimizer = optim.Adam([self.log_alpha], lr=lr)
def select_action(self, states, deterministic=False):
self.time_step += 1
explore = self.time_step <= self.exploration_period
k = 0
if explore:
if self.safe_exploration:
hour_day = states[2]
a_dim = len(self.action_spaces.sample())
# Daytime: release stored energy
act = [0.0 for _ in range(a_dim)]
if hour_day >= 9 and hour_day <= 21:
act = [-0.08 for _ in range(a_dim)]
# Early nightime: store DHW and/or cooling energy
if (hour_day >= 1 and hour_day <= 8) or (hour_day >= 22 and hour_day <= 24):
act = [0.091 for _ in range(a_dim)]
else:
act = self.action_scaling_coef * self.action_spaces.sample()
k += 1
else:
state_ = states
# Adding previous consumption information information to the state
if self.add_previous_consumption:
state_ = np.hstack((states, self.state_memory.buffer[-1]))
#ok the thing that you are doing below is good, maintaining a running mean and std.
state_ = (state_ - self.norm_mean) / self.norm_std
state_ = self.pca.transform(state_.reshape(1, -1))[0]
state_ = torch.FloatTensor(state_).unsqueeze(0).to(self.device)
if deterministic is False:
act, _, _ = self.policy_net.sample(state_)
else:
_, _, act = self.policy_net.sample(state_)
act = act.detach().cpu().numpy()[0]
k += 1
return act
def add_to_buffer(self, states, actions, rewards, next_states, done):
#first you need to collect the reward and push it to the consumption memory for that agent.
#next take the previous step consumption from memory and concatenate it to the current state.
# and take the current reward/consumption and concatenate it to the the next state.
# if you have collected enough samples for the buffer, then calculate statistics and normalize them
#normalize when you are about to start training the networks, until then add the previous electricity consumption and push it to buffer
if self.time_step >= self.start_training and self.batch_size <= len(self.replay_buffer):
# This code only runs once. Once the random exploration phase is over, we normalize all the states and rewards to make them have mean=0 and std=1, and apply PCA. We push the normalized compressed values back into the buffer, replacing the old buffer.
if self.pca_flag == 0:
#if self.norm_flag == 0:
X = np.array([j[0] for j in self.replay_buffer.buffer])
self.norm_mean = np.mean(X, axis=0)
self.norm_std = np.std(X, axis=0) + 1e-5
X = (X - self.norm_mean) / self.norm_std
R = np.array([j[2] for j in self.replay_buffer.buffer])
self.r_norm_mean = np.mean(R)
self.r_norm_std = np.std(R) / self.reward_scaling + 1e-5
self.pca.fit(X)
new_buffer = []
for s, a, r, s2, dones in self.replay_buffer.buffer:
s_buffer = np.hstack(self.pca.transform(((s - self.norm_mean)/self.norm_std).reshape(1,-1))[0])
s2_buffer = np.hstack(self.pca.transform(((s2 - self.norm_mean)/self.norm_std).reshape(1,-1))[0])
new_buffer.append((s_buffer, a, (r - self.r_norm_mean) / self.r_norm_std, s2_buffer, dones))
#print(new_buffer[0][0])
#print(new_buffer[0][2])
#sys.exit()
self.replay_buffer.buffer = new_buffer
self.pca_flag = 1
self.norm_flag = 1
# if you are just collecting the samples, push the samples normally to respective buffers and normalize them on the go.
# you need to concatenate here once also, the current consumption to the next state and the previous step consumption to the current state.
# mind that you maintain raw electricity consumption in the state buffer but normalized ones in the main buffer.
# Push inputs and targets to the regression buffer. The targets are the net electricity consumption.
self.state_memory.push(rewards)
#print(self.state_memory.buffer[0])
#print(np.hstack((states, self.state_memory.buffer[0])))
#print(np.hstack((states, self.state_memory.buffer[0])).shape)
#sys.exit()
if self.add_previous_consumption:
states = np.hstack((states, self.state_memory.buffer[0]))
next_states = np.hstack((next_states, self.state_memory.buffer[-1]))
#if self.norm_flag == 1 :
if self.pca_flag == 1:
states = (states - self.norm_mean) / self.norm_std
states = self.pca.transform(states.reshape(1, -1))[0]
next_states = (next_states - self.norm_mean) / self.norm_std
next_states = self.pca.transform(next_states.reshape(1, -1))[0]
#print('next_states shape is ',next_states.shape)
#sys.exit()
rewards = (rewards - self.r_norm_mean) / self.r_norm_std
self.replay_buffer.push(states, actions, rewards, next_states, done)
def update(self):
# for _ in range(1 + max(0, self.time_step - 8760)//5000):
#if self.time_step >= self.start_training and self.batch_size <= len(self.replay_buffer):
#for _ in range(self.update_per_step):
state, action, reward, next_state, done = self.replay_buffer.sample(self.batch_size)
#print('size of state is ', state.shape)
if self.device.type == "cuda":
state = torch.cuda.FloatTensor(state).to(self.device)
next_state = torch.cuda.FloatTensor(next_state).to(self.device)
action = torch.cuda.FloatTensor(action).to(self.device)
reward = torch.cuda.FloatTensor(reward).unsqueeze(1).to(self.device)
done = torch.cuda.FloatTensor(done).unsqueeze(1).to(self.device)
else:
state = torch.FloatTensor(state).to(self.device)
next_state = torch.FloatTensor(next_state).to(self.device)
action = torch.FloatTensor(action).to(self.device)
reward = torch.FloatTensor(reward).unsqueeze(1).to(self.device)
done = torch.FloatTensor(done).unsqueeze(1).to(self.device)
with torch.no_grad():
# Update Q-values. First, sample an action from the Gaussian policy/distribution for the current (next) state and its associated log probability of occurrence.
new_next_actions, new_log_pi, _ = self.policy_net.sample(next_state)
# The updated Q-value is found by subtracting the logprob of the sampled action (proportional to the entropy) to the Q-values estimated by the target networks.
target_q_values = torch.min(
self.target_soft_q_net1(next_state, new_next_actions),
self.target_soft_q_net2(next_state, new_next_actions),
) - self.alpha * new_log_pi
q_target = reward + (1 - done) * self.discount * target_q_values
self.q_tracker.append(q_target.mean())
# Update Soft Q-Networks
q1_pred = self.soft_q_net1(state, action)
q2_pred = self.soft_q_net2(state, action)
q1_loss = self.soft_q_criterion(q1_pred, q_target)
q2_loss = self.soft_q_criterion(q2_pred, q_target)
self.soft_q_optimizer1.zero_grad()
q1_loss.backward()
self.soft_q_optimizer1.step()
self.soft_q_optimizer2.zero_grad()
q2_loss.backward()
self.soft_q_optimizer2.step()
# Update Policy
new_actions, log_pi, _ = self.policy_net.sample(state)
q_new_actions = torch.min(
self.soft_q_net1(state, new_actions),
self.soft_q_net2(state, new_actions)
)
policy_loss = (self.alpha * log_pi - q_new_actions).mean()
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
# Optimize the temperature parameter alpha, used for exploration through entropy maximization
#alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()
#self.alpha_optimizer.zero_grad()
#alpha_loss.backward()
#self.alpha_optimizer.step()
self.alpha = 0.2 # self.log_alpha[uid].exp()
#self.alpha =self.log_alpha.exp()
self.critic1_loss_.append(q1_loss.item())
self.critic2_loss_.append(q2_loss.item())
self.actor_loss_.append(policy_loss.item())
#self.alpha_loss_.append(alpha_loss.item())
#self.alpha_.append(self.alpha.item())
self.log_pi_tracker.append(log_pi.mean())
# Soft Updates
for target_param, param in zip(self.target_soft_q_net1.parameters(),
self.soft_q_net1.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - self.tau) + param.data * self.tau
)
for target_param, param in zip(self.target_soft_q_net2.parameters(),
self.soft_q_net2.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - self.tau) + param.data * self.tau
)
#return q1_loss.item(),q2_loss.item(),policy_loss.item(),alpha_loss.item(),self.alpha.item()
return q1_loss.item(),q2_loss.item(),policy_loss.item()
def save_model(self,path):
"""Saves a checkpoint of all the models
"""
save_path =path+'/monitor/checkpoints/iter_{}'.format(self.time_step)
#if not os.path.exists(path+'/monitor/checkpoints/iter_{self.time_step}'):
#save_path = os.makedirs(path+'/monitor/checkpoints/iter_{self.time_step}')
if not os.path.exists(save_path):
os.makedirs(save_path)
#print(save_path)
actor_path = save_path+"/sac_actor"
critic1_path = save_path+"/sac_critic1"
critic2_path = save_path+"/sac_critic2"
#print('Saving models to {} and {}'.format(actor_path, critic_path))
torch.save(self.policy_net.state_dict(), actor_path)
torch.save(self.soft_q_net1.state_dict(), critic1_path)
torch.save(self.soft_q_net2.state_dict(), critic2_path)
# Load model parameters
def load_model(self, actor_path, critic1_path,critic2_path):
print('Loading models from {} and {}'.format(actor_path, critic1_path,critic2_path))
if actor_path is not None:
self.policy_net.load_state_dict(torch.load(actor_path))
if critic1_path is not None:
self.soft_q_net1.load_state_dict(torch.load(critic1_path))
if critic2_path is not None:
self.soft_q_net2.load_state_dict(torch.load(critic2_path))
| [
"numpy.random.seed",
"torch.optim.lr_scheduler.StepLR",
"random.sample",
"torch.set_default_tensor_type",
"torch.cat",
"numpy.mean",
"torch.no_grad",
"numpy.prod",
"numpy.std",
"torch.load",
"torch.FloatTensor",
"os.path.exists",
"torch.cuda.FloatTensor",
"torch.nn.LayerNorm",
"torch.nn.... | [((404, 429), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (427, 429), False, 'import torch\n'), ((469, 494), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (492, 494), False, 'import torch\n'), ((435, 459), 'torch.cuda.set_device', 'torch.cuda.set_device', (['(1)'], {}), '(1)\n', (456, 459), False, 'import torch\n'), ((500, 553), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.cuda.FloatTensor'], {}), '(torch.cuda.FloatTensor)\n', (529, 553), False, 'import torch\n'), ((1593, 1604), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (1601, 1604), True, 'import numpy as np\n'), ((2066, 2102), 'torch.nn.Linear', 'nn.Linear', (['num_inputs', 'hidden_dim[0]'], {}), '(num_inputs, hidden_dim[0])\n', (2075, 2102), True, 'import torch.nn as nn\n'), ((2126, 2165), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim[0]', 'hidden_dim[1]'], {}), '(hidden_dim[0], hidden_dim[1])\n', (2135, 2165), True, 'import torch.nn as nn\n'), ((2194, 2231), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim[1]', 'num_actions'], {}), '(hidden_dim[1], num_actions)\n', (2203, 2231), True, 'import torch.nn as nn\n'), ((2262, 2299), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim[1]', 'num_actions'], {}), '(hidden_dim[1], num_actions)\n', (2271, 2299), True, 'import torch.nn as nn\n'), ((2585, 2675), 'torch.FloatTensor', 'torch.FloatTensor', (['(action_scaling_coef * (action_space.high - action_space.low) / 2.0)'], {}), '(action_scaling_coef * (action_space.high - action_space.\n low) / 2.0)\n', (2602, 2675), False, 'import torch\n'), ((2710, 2800), 'torch.FloatTensor', 'torch.FloatTensor', (['(action_scaling_coef * (action_space.high + action_space.low) / 2.0)'], {}), '(action_scaling_coef * (action_space.high + action_space.\n low) / 2.0)\n', (2727, 2800), False, 'import torch\n'), ((3080, 3144), 'torch.clamp', 'torch.clamp', (['log_std'], {'min': 'self.log_std_min', 'max': 'self.log_std_max'}), '(log_std, min=self.log_std_min, max=self.log_std_max)\n', (3091, 3144), False, 'import torch\n'), ((3293, 3310), 'torch.distributions.Normal', 'Normal', (['mean', 'std'], {}), '(mean, std)\n', (3299, 3310), False, 'from torch.distributions import Normal\n'), ((3410, 3425), 'torch.tanh', 'torch.tanh', (['x_t'], {}), '(x_t)\n', (3420, 3425), False, 'import torch\n'), ((4180, 4231), 'torch.nn.Linear', 'nn.Linear', (['(num_inputs + num_actions)', 'hidden_size[0]'], {}), '(num_inputs + num_actions, hidden_size[0])\n', (4189, 4231), True, 'import torch.nn as nn\n'), ((4255, 4296), 'torch.nn.Linear', 'nn.Linear', (['hidden_size[0]', 'hidden_size[1]'], {}), '(hidden_size[0], hidden_size[1])\n', (4264, 4296), True, 'import torch.nn as nn\n'), ((4320, 4348), 'torch.nn.Linear', 'nn.Linear', (['hidden_size[1]', '(1)'], {}), '(hidden_size[1], 1)\n', (4329, 4348), True, 'import torch.nn as nn\n'), ((4368, 4396), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['hidden_size[0]'], {}), '(hidden_size[0])\n', (4380, 4396), True, 'import torch.nn as nn\n'), ((4416, 4444), 'torch.nn.LayerNorm', 'nn.LayerNorm', (['hidden_size[1]'], {}), '(hidden_size[1])\n', (4428, 4444), True, 'import torch.nn as nn\n'), ((4613, 4642), 'torch.cat', 'torch.cat', (['[state, action]', '(1)'], {}), '([state, action], 1)\n', (4622, 4642), False, 'import torch\n'), ((5255, 5293), 'random.sample', 'random.sample', (['self.buffer', 'batch_size'], {}), '(self.buffer, batch_size)\n', (5268, 5293), False, 'import random\n'), ((5569, 5608), 'numpy.zeros', 'np.zeros', (['(size, dim)'], {'dtype': 'np.float32'}), '((size, dim), dtype=np.float32)\n', (5577, 5608), True, 'import numpy as np\n'), ((7156, 7179), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (7173, 7179), False, 'import torch\n'), ((7188, 7208), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (7202, 7208), True, 'import numpy as np\n'), ((7865, 7882), 'torch.nn.SmoothL1Loss', 'nn.SmoothL1Loss', ([], {}), '()\n', (7880, 7882), True, 'import torch.nn as nn\n'), ((8979, 9006), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'state_dim'}), '(n_components=state_dim)\n', (8982, 9006), False, 'from sklearn.decomposition import PCA\n'), ((10190, 10245), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['self.soft_q_optimizer1'], {'step_size': '(1)', 'gamma': '(0.98)'}), '(self.soft_q_optimizer1, step_size=1, gamma=0.98)\n', (10196, 10245), False, 'from torch.optim.lr_scheduler import StepLR\n'), ((10359, 10414), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['self.soft_q_optimizer2'], {'step_size': '(1)', 'gamma': '(0.98)'}), '(self.soft_q_optimizer2, step_size=1, gamma=0.98)\n', (10365, 10414), False, 'from torch.optim.lr_scheduler import StepLR\n'), ((10526, 10580), 'torch.optim.lr_scheduler.StepLR', 'StepLR', (['self.policy_optimizer'], {'step_size': '(1)', 'gamma': '(0.98)'}), '(self.policy_optimizer, step_size=1, gamma=0.98)\n', (10532, 10580), False, 'from torch.optim.lr_scheduler import StepLR\n'), ((10677, 10731), 'torch.zeros', 'torch.zeros', (['(1)'], {'requires_grad': '(True)', 'device': 'self.device'}), '(1, requires_grad=True, device=self.device)\n', (10688, 10731), False, 'import torch\n'), ((10763, 10798), 'torch.optim.Adam', 'optim.Adam', (['[self.log_alpha]'], {'lr': 'lr'}), '([self.log_alpha], lr=lr)\n', (10773, 10798), True, 'import torch.optim as optim\n'), ((6846, 6866), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (6855, 6866), False, 'import json\n'), ((15547, 15595), 'numpy.hstack', 'np.hstack', (['(states, self.state_memory.buffer[0])'], {}), '((states, self.state_memory.buffer[0]))\n', (15556, 15595), True, 'import numpy as np\n'), ((15622, 15676), 'numpy.hstack', 'np.hstack', (['(next_states, self.state_memory.buffer[-1])'], {}), '((next_states, self.state_memory.buffer[-1]))\n', (15631, 15676), True, 'import numpy as np\n'), ((17426, 17441), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17439, 17441), False, 'import torch\n'), ((20951, 20976), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (20965, 20976), False, 'import os\n'), ((20990, 21012), 'os.makedirs', 'os.makedirs', (['save_path'], {}), '(save_path)\n', (21001, 21012), False, 'import os\n'), ((3706, 3722), 'torch.tanh', 'torch.tanh', (['mean'], {}), '(mean)\n', (3716, 3722), False, 'import torch\n'), ((7946, 7971), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7969, 7971), False, 'import torch\n'), ((11861, 11910), 'numpy.hstack', 'np.hstack', (['(states, self.state_memory.buffer[-1])'], {}), '((states, self.state_memory.buffer[-1]))\n', (11870, 11910), True, 'import numpy as np\n'), ((13571, 13622), 'numpy.array', 'np.array', (['[j[0] for j in self.replay_buffer.buffer]'], {}), '([j[0] for j in self.replay_buffer.buffer])\n', (13579, 13622), True, 'import numpy as np\n'), ((13656, 13674), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (13663, 13674), True, 'import numpy as np\n'), ((13810, 13861), 'numpy.array', 'np.array', (['[j[2] for j in self.replay_buffer.buffer]'], {}), '([j[2] for j in self.replay_buffer.buffer])\n', (13818, 13861), True, 'import numpy as np\n'), ((13897, 13907), 'numpy.mean', 'np.mean', (['R'], {}), '(R)\n', (13904, 13907), True, 'import numpy as np\n'), ((21731, 21753), 'torch.load', 'torch.load', (['actor_path'], {}), '(actor_path)\n', (21741, 21753), False, 'import torch\n'), ((21837, 21861), 'torch.load', 'torch.load', (['critic1_path'], {}), '(critic1_path)\n', (21847, 21861), False, 'import torch\n'), ((21945, 21969), 'torch.load', 'torch.load', (['critic2_path'], {}), '(critic2_path)\n', (21955, 21969), False, 'import torch\n'), ((10611, 10644), 'numpy.prod', 'np.prod', (['self.action_spaces.shape'], {}), '(self.action_spaces.shape)\n', (10618, 10644), True, 'import numpy as np\n'), ((13707, 13724), 'numpy.std', 'np.std', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (13713, 13724), True, 'import numpy as np\n'), ((16707, 16736), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['state'], {}), '(state)\n', (16729, 16736), False, 'import torch\n'), ((16778, 16812), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['next_state'], {}), '(next_state)\n', (16800, 16812), False, 'import torch\n'), ((16850, 16880), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['action'], {}), '(action)\n', (16872, 16880), False, 'import torch\n'), ((17089, 17113), 'torch.FloatTensor', 'torch.FloatTensor', (['state'], {}), '(state)\n', (17106, 17113), False, 'import torch\n'), ((17155, 17184), 'torch.FloatTensor', 'torch.FloatTensor', (['next_state'], {}), '(next_state)\n', (17172, 17184), False, 'import torch\n'), ((17222, 17247), 'torch.FloatTensor', 'torch.FloatTensor', (['action'], {}), '(action)\n', (17239, 17247), False, 'import torch\n'), ((13942, 13951), 'numpy.std', 'np.std', (['R'], {}), '(R)\n', (13948, 13951), True, 'import numpy as np\n'), ((12157, 12182), 'torch.FloatTensor', 'torch.FloatTensor', (['state_'], {}), '(state_)\n', (12174, 12182), False, 'import torch\n'), ((16918, 16948), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['reward'], {}), '(reward)\n', (16940, 16948), False, 'import torch\n'), ((16997, 17025), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['done'], {}), '(done)\n', (17019, 17025), False, 'import torch\n'), ((17285, 17310), 'torch.FloatTensor', 'torch.FloatTensor', (['reward'], {}), '(reward)\n', (17302, 17310), False, 'import torch\n'), ((17359, 17382), 'torch.FloatTensor', 'torch.FloatTensor', (['done'], {}), '(done)\n', (17376, 17382), False, 'import torch\n')] |
'''
File: landsat_dataset.py
Author: <NAME>
Version: 0.1
Create: 2016-03-24 11:45:03
Description:
'''
import logging
class qa:
qa_land = 1
qa_shadow = 2
qa_cloud = 3
qa_water = 4
qa_snow = 5
qa_nodata = 255
cf0_land = 1
cf0_water = 5
cf0_shadow = 3
cf0_snow = 4
cf0_cloud = 2
cf0_nodata = 255
cf1_land = 0
cf1_water = 1
cf1_shadow = 2
cf1_snow = 3
cf1_cloud = 4
cf1_nodata = 255
def __init__(self):
pass
@staticmethod
def from_fmask(bnd, code_set):
import numpy as np
_dat = np.empty((bnd.height, bnd.width), dtype=np.uint8)
_dat.fill(qa.qa_nodata)
if code_set == 1:
_dat[bnd.data == qa.cf1_land] = qa.qa_land
_dat[bnd.data == qa.cf1_shadow] = qa.qa_shadow
_dat[bnd.data == qa.cf1_cloud] = qa.qa_cloud
_dat[bnd.data == qa.cf1_water] = qa.qa_water
_dat[bnd.data == qa.cf1_snow] = qa.qa_snow
_dat[bnd.data == qa.cf1_nodata] = qa.qa_nodata
else:
_dat[bnd.data == qa.cf0_land] = qa.qa_land
_dat[bnd.data == qa.cf0_shadow] = qa.qa_shadow
_dat[bnd.data == qa.cf0_cloud] = qa.qa_cloud
_dat[bnd.data == qa.cf0_water] = qa.qa_water
_dat[bnd.data == qa.cf0_snow] = qa.qa_snow
_dat[bnd.data == qa.cf0_nodata] = qa.qa_nodata
return bnd.from_grid(_dat, nodata=qa.qa_nodata)
class sr:
def band(self, b):
if b not in self._bs:
raise Exception('failed to find band %s (%s)' % (b, str(self._bs)))
logging.info('loading band %s' % (b))
return self._load_band(b)
def get_band(self, b):
_b = self._band_no(self._inf, b)
if _b not in self._bs:
logging.error('failed to find band %s (%s) (%s)' % (b, _b, str(self._bs)))
return None
logging.info('loading TM band %s (%s)' % (b, _b))
return self._load_band(_b)
def get_cloud(self, b):
return None
def metadata(self):
raise Exception('unsupported function')
def tag(self):
raise Exception('unsupported function')
def _band_no_tm_lc(self, b):
if b <= 5:
return b + 1
if b == 6:
return 10
if b == 7:
return 7
raise Exception('unsupported TM band num %s' % b)
def _band_no_tm_etm(self, b):
if b == 6:
if b in self._bs:
return b
return 61
return b
def _band_no(self, inf, b):
if inf.sensor.upper() == 'LC':
return self._band_no_tm_lc(b)
if inf.sensor.upper() == 'LE':
return self._band_no_tm_etm(b)
return b
class sr_dir(sr):
def __init__(self, p, fzip):
self._p = p
from . import landsat
self._inf = landsat.parse(p)
if not self._inf:
raise Exception('failed to parse %s' % p)
import os
if p.endswith('/'):
self._list_dir(p)
else:
raise Exception('not support the file %s' % p)
self._fzip = fzip
self._bnds = {}
def _is_img(self, f):
f = str(f).lower()
return f.endswith('.img.gz') or f.endswith('.img') or f.endswith('.tif.gz') or f.endswith('.tif')
def _list_dir(self, p):
import os
import re
from gio import file_mag
_fs = {}
_bs = []
for _f in file_mag.get(p).list():
_p = str(_f)
_m = self._is_img(_p) and re.search('sr_band(\d+)\.', _p)
if _m:
_fs['sr_b%s' % _m.group(1)] = _p
_b = int(_m.group(1))
if _b not in _bs:
_bs.append(_b)
continue
_m = self._is_img(_p) and (re.search('toa_band(\d+)\.', _p) or \
re.search('toa_b(\d+)\.', str(_f)))
if _m:
_fs['toa_b%s' % _m.group(1)] = _p
_b = int(_m.group(1))
if _b not in _bs:
_bs.append(_b)
continue
_m = self._is_img(_p) and re.search('bt_band(\d+)\.', _p)
if _m:
_fs['toa_b%s' % _m.group(1)] = _p
_b = int(_m.group(1))
if _b not in _bs:
_bs.append(_b)
continue
_m = self._is_img(_p) and re.search('_cfmask\.', _p)
if _m:
_fs['cfmask'] = _p
continue
_m = (not _p.startswith('lnd')) and re.search('_mtl.txt', _p.lower())
if _m:
_fs['mtl'] = _p
continue
self._fs = _fs
self._bs = _bs
logging.info('found bands: %s' % str(self._fs.keys()))
assert 'mtl' in _fs
def _load_band(self, b):
from . import geo_raster as ge
from . import file_mag
_b = b
logging.info('loading band %s (%s)' % (b, _b))
if _b not in list(self._bnds.keys()):
_bn = ('sr_b%s' if 'sr_b%s' % _b in self._fs else 'toa_b%s') % _b
logging.info('caching band %s' % _bn)
self._bnds[_b] = ge.open(self._fzip.unzip(file_mag.get(self._fs[_bn]).get())).get_band()
return self._bnds[_b]
def get_cloud(self, code_set=0):
from . import file_mag
_b = 'cloud'
if _b in list(self._bnds.keys()):
return self._bnds[_b]
# if 'cloud' not in self._fs.keys():
# return None
if _b in self._bs:
from . import geo_raster as ge
_bnd = ge.open(self._fzip.unzip(file_mag.get(self._fs[_b]).get())).get_band().cache()
self._bnds['cloud'] = _bnd
return _bnd
_b = 'cfmask'
if _b in self._fs:
from . import geo_raster as ge
_bnd = qa.from_fmask(ge.open(self._fzip.unzip(file_mag.get(self._fs[_b]).get())).get_band().cache(), code_set)
self._bnds['cloud'] = _bnd
return _bnd
logging.warning('failed to find cfmask in (%s) (%s)' % \
(str(self._fs.keys()), str(self._bs)))
return None
def tda_cloud(self, b):
_bnd = b
if _bnd == None:
return None
_dat = _bnd.data
_idx_land = _dat == 0
_idx_water = _dat == 1
_idx_cloud_shadow = _dat == 2
_idx_snow = _dat == 3
_idx_cloud = _dat == 4
import numpy as np
_ddd = np.empty(_dat.shape, dtype=np.uint8)
_ddd.fill(0)
_ddd[_idx_land] = 1
_ddd[_idx_water] = 5
_ddd[_idx_cloud_shadow] = 2
_ddd[_idx_cloud] = 3
_ddd[_idx_snow] = 4
return _bnd.from_grid(_ddd, nodata=255)
def metadata(self):
from . import file_mag
if 'mtl' not in list(self._fs.keys()):
return None
_ms = {}
with open(self._fzip.unzip(file_mag.get(self._fs['mtl']).get())) as _fi:
for _l in _fi:
_rs = [x.strip() for x in _l.strip().split('=')]
if len(_rs) == 2:
_ms[_rs[0]] = _rs[1]
if 'SUN_AZIMUTH' in _ms:
_ms['SolarAzimuth'] = _ms['SUN_AZIMUTH']
if 'SUN_ELEVATION' in _ms:
_ms['SolarZenith'] = 90 - float(_ms['SUN_ELEVATION'])
assert(_ms['SolarZenith'] >= 0)
_ms['SolarElevation'] = 90 - float(_ms['SolarZenith'])
# print('sun params: %s, %s, %s' % (_ms['SolarAzimuth'], _ms['SolarZenith'], _ms['SUN_ELEVATION']))
# if 'SolarAzimuth' not in _ms:
# print _ms.keys()
assert('SolarAzimuth' in _ms)
return _ms
def tag(self):
return 'dir'
class sr_hdf(sr):
def __init__(self, f, fzip):
if not f.endswith('.hdf'):
raise Exception('only support HDF file')
from . import landsat
self._inf = landsat.parse(f)
if not self._inf:
raise Exception('failed to parse %s' % f)
from . import geo_raster as ge
import re
_img = ge.open(f)
_bs = []
for _s, _d in _img.sub_datasets():
_d = re.search('(\d+)$', _s)
if _d:
_bs = int(_d.group(1))
self._img = _img
self._bs = _bs
self._f = f
self._fzip = fzip
def _load_band(self, b):
return self._img.get_subdataset(b).get_band()
def metadata(self):
return self._img.raster.GetMetadata()
def tag(self):
return 'hdf'
def load(f, fzip):
import os
if not f.startswith('s3://') and os.path.isdir(f):
f = f + '/'
if f.endswith('/'):
return sr_dir(f, fzip)
if f.endswith('.hdf.gz') or f.endswith('.hdf'):
return sr_hdf(fzip.unzip(f), fzip)
raise Exception('not support file %s' % f)
| [
"os.path.isdir",
"numpy.empty",
"logging.info",
"re.search",
"gio.file_mag.get"
] | [((591, 640), 'numpy.empty', 'np.empty', (['(bnd.height, bnd.width)'], {'dtype': 'np.uint8'}), '((bnd.height, bnd.width), dtype=np.uint8)\n', (599, 640), True, 'import numpy as np\n'), ((1608, 1643), 'logging.info', 'logging.info', (["('loading band %s' % b)"], {}), "('loading band %s' % b)\n", (1620, 1643), False, 'import logging\n'), ((1900, 1949), 'logging.info', 'logging.info', (["('loading TM band %s (%s)' % (b, _b))"], {}), "('loading TM band %s (%s)' % (b, _b))\n", (1912, 1949), False, 'import logging\n'), ((5014, 5060), 'logging.info', 'logging.info', (["('loading band %s (%s)' % (b, _b))"], {}), "('loading band %s (%s)' % (b, _b))\n", (5026, 5060), False, 'import logging\n'), ((6632, 6668), 'numpy.empty', 'np.empty', (['_dat.shape'], {'dtype': 'np.uint8'}), '(_dat.shape, dtype=np.uint8)\n', (6640, 6668), True, 'import numpy as np\n'), ((8775, 8791), 'os.path.isdir', 'os.path.isdir', (['f'], {}), '(f)\n', (8788, 8791), False, 'import os\n'), ((5198, 5235), 'logging.info', 'logging.info', (["('caching band %s' % _bn)"], {}), "('caching band %s' % _bn)\n", (5210, 5235), False, 'import logging\n'), ((8326, 8350), 're.search', 're.search', (['"""(\\\\d+)$"""', '_s'], {}), "('(\\\\d+)$', _s)\n", (8335, 8350), False, 'import re\n'), ((3500, 3515), 'gio.file_mag.get', 'file_mag.get', (['p'], {}), '(p)\n', (3512, 3515), False, 'from gio import file_mag\n'), ((3600, 3633), 're.search', 're.search', (['"""sr_band(\\\\d+)\\\\."""', '_p'], {}), "('sr_band(\\\\d+)\\\\.', _p)\n", (3609, 3633), False, 'import re\n'), ((4206, 4239), 're.search', 're.search', (['"""bt_band(\\\\d+)\\\\."""', '_p'], {}), "('bt_band(\\\\d+)\\\\.', _p)\n", (4215, 4239), False, 'import re\n'), ((4478, 4505), 're.search', 're.search', (['"""_cfmask\\\\."""', '_p'], {}), "('_cfmask\\\\.', _p)\n", (4487, 4505), False, 'import re\n'), ((3872, 3906), 're.search', 're.search', (['"""toa_band(\\\\d+)\\\\."""', '_p'], {}), "('toa_band(\\\\d+)\\\\.', _p)\n", (3881, 3906), False, 'import re\n'), ((7070, 7099), 'gio.file_mag.get', 'file_mag.get', (["self._fs['mtl']"], {}), "(self._fs['mtl'])\n", (7082, 7099), False, 'from gio import file_mag\n'), ((5291, 5318), 'gio.file_mag.get', 'file_mag.get', (['self._fs[_bn]'], {}), '(self._fs[_bn])\n', (5303, 5318), False, 'from gio import file_mag\n'), ((5731, 5757), 'gio.file_mag.get', 'file_mag.get', (['self._fs[_b]'], {}), '(self._fs[_b])\n', (5743, 5757), False, 'from gio import file_mag\n'), ((6000, 6026), 'gio.file_mag.get', 'file_mag.get', (['self._fs[_b]'], {}), '(self._fs[_b])\n', (6012, 6026), False, 'from gio import file_mag\n')] |
#encoding=utf8
'''
Detection with SSD
In this example, we will load a SSD model and use it to detect objects.
'''
import os
import sys
import argparse
import numpy as np
import cv2
import random
import caffe
from google.protobuf import text_format
from caffe.proto import caffe_pb2
def get_labelname(labelmap, labels):
num_labels = len(labelmap.item)
labelnames = []
if type(labels) is not list:
labels = [labels]
for label in labels:
found = False
for i in range(0, num_labels):
if label == labelmap.item[i].label:
found = True
labelnames.append(labelmap.item[i].display_name)
break
assert found == True
return labelnames
class CaffeDetection:
def __init__(self, gpu_id, model_def, model_weights, labelmap_file, data_shape=None):
caffe.set_device(gpu_id)
caffe.set_mode_gpu()
# Load the net in the test phase for inference, and configure input preprocessing.
self.net = caffe.Net(model_def, # defines the structure of the model
model_weights, # contains the trained weights
caffe.TEST) # use test mode (e.g., don't perform dropout)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
if data_shape is None:
data_shape = self.net.blobs['data'].data.shape
else:
if isinstance (data_shape, int):
data_shape = (data_shape, data_shape)
self.data_shape = data_shape #(h, w)
self.transformer = caffe.io.Transformer({'data': (1, 3, data_shape[0], data_shape[1])})
# change into (C,H,W) instead of (H,W,C)
self.transformer.set_transpose('data', (2, 0, 1))
# self.transformer.set_mean('data', np.array([127.5, 127.5, 127.5])) # mean pixel
self.transformer.set_mean('data', np.array([104, 117, 123])) # mean pixel
# the reference model operates on images in [0,255] range instead of [0,1]
self.transformer.set_raw_scale('data', 255)
#self.transformer.set_input_scale('data', 0.007843) ??????
# the reference model has channels in BGR order instead of RGB
self.transformer.set_channel_swap('data', (2, 1, 0))
# load PASCAL VOC labels
file = open(labelmap_file, 'r')
self.labelmap = caffe_pb2.LabelMap()
text_format.Merge(str(file.read()), self.labelmap)
def detect(self, image_file, conf_thresh=0.5, topn=100):
'''
SSD detection
'''
# set net to batch size of 1
# print(self.data_shape)
# self.net.blobs['data'].reshape(1, 3, self.data_shape[0], self.data_shape[1])
image = caffe.io.load_image(image_file)
#Run the net and examine the top_k results
transformed_image = self.transformer.preprocess('data', image)
self.net.blobs['data'].data[...] = transformed_image
# Forward pass.
#detections = self.net.forward()['detection_out']
self.net.forward()
detections = self.net.blobs['detection_out'].data[...]
print(detections.shape)
# exit()
# Parse the outputs.
# print(detections[0,0,0,:])
# print(detections[0,0,1,:])
# exit()
det_label = detections[0,0,:,1]
det_conf = detections[0,0,:,2]
det_xmin = detections[0,0,:,3]
det_ymin = detections[0,0,:,4]
det_xmax = detections[0,0,:,5]
det_ymax = detections[0,0,:,6]
# Get detections with confidence higher than conf_thresh.
top_indices = [i for i, conf in enumerate(det_conf) if conf >= conf_thresh]
top_conf = det_conf[top_indices]
print("top conf:", top_conf)
top_label_indices = det_label[top_indices].tolist()
print('lablelmap:', self.labelmap)
print('top_label_indices:', top_label_indices)
top_labels = get_labelname(self.labelmap, top_label_indices)
print(top_labels)
top_xmin = det_xmin[top_indices]
top_ymin = det_ymin[top_indices]
top_xmax = det_xmax[top_indices]
top_ymax = det_ymax[top_indices]
result = []
for i in range(min(topn, top_conf.shape[0])):
xmin = top_xmin[i]
ymin = top_ymin[i]
xmax = top_xmax[i]
ymax = top_ymax[i]
score = top_conf[i]
label = int(top_label_indices[i])
label_name = top_labels[i]
result.append([xmin, ymin, xmax, ymax, label, score, label_name])
return result
def main(args):
'''main '''
detection = CaffeDetection(args.gpu_id,
args.model_def, args.model_weights,
args.labelmap_file, args.data_shape)
#result = detection.detect(args.image_file)
img_dir=args.image_dir
save_path=args.save_path
list_imgs=os.listdir(img_dir)
for ind in list_imgs:
imgPath=img_dir+ind
if not os.path.isfile(imgPath):
continue
print(imgPath)
result = detection.detect(imgPath) ##abspath
img=cv2.imread(imgPath)
if img is None:
continue
imgshape=img.shape # h,w, c
if imgshape[2]!=3:
continue
# cv2.imshow('fd',img)
# cv2.waitKey(0)
#print('shape')
#print(imgshape)
# colors = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
red = (0, 0, 255)
blue = (255, 0, 0)
for item in result:
xmin = int(round(item[0] * imgshape[1]))
ymin = int(round(item[1] * imgshape[0]))
xmax = int(round(item[2] * imgshape[1]))
ymax = int(round(item[3] * imgshape[0]))
print('bbox[%f, %f, %f, %f]\n' % (xmin,ymin, xmax,ymax))
if xmin<=0 or ymin<=0 or xmax<=0 or ymax<=0:
print('Out of boundary.\n')
continue
if item[-1] == 'helmet':
colors = blue
elif item[-1] == 'face':
colors = red
cv2.rectangle(img, (round(xmin), round(ymin)),
(round(xmax), round(ymax)), colors, 2)
cv2.putText(img, '{:s} {:.3f}'.format(item[-1], item[-2]),
(round(xmin), round(ymin - 2)),
cv2.FONT_HERSHEY_SIMPLEX,
0.8,
colors,
2
)
# crop_img = img[ymin:ymax,xmin:xmax]
# print(path_)
#cv2.imshow('fd',crop_img)
#cv2.waitKey(0)
path_ = save_path + ind + '_detection.jpg'
cv2.imwrite(path_, img)
def parse_args():
'''parse args'''
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', type=int, default=1, help='gpu id')
parser.add_argument('--data_shape', default=(540, 960), type=int)
# parser.add_argument('--labelmap_file',
# default=os.path.join(os.getcwd(), '../../', 'models/person_detection/person_labelmap_voc.prototxt'))
# parser.add_argument('--model_weights',
# default=os.path.join(os.getcwd(), '../../', 'models/person_detection/pelee_ssd_person.caffemodel'))
# parser.add_argument('--model_def',
# default=os.path.join(os.getcwd(), '../../', 'models/person_detection/pelee_ssd_person.prototxt'))
parser.add_argument('--labelmap_file',
default=os.path.join(os.getcwd(), '../../', 'models/safehat/helmet_labelmap_voc.prototxt'))
parser.add_argument('--model_weights',
default=os.path.join(os.getcwd(), '../../', 'models/safehat/pelee_helmet.caffemodel'))
parser.add_argument('--model_def',
default=os.path.join(os.getcwd(), '../../', 'models/safehat/pelee_helmet.prototxt'))
parser.add_argument('--save_path', default=os.path.join(os.getcwd(), '../../', 'output/helmet/'))
parser.add_argument('--image_dir', default=os.path.join(os.getcwd(), '../../', 'images/helmet/'))
return parser.parse_args()
if __name__ == '__main__':
main(parse_args())
| [
"caffe.set_mode_gpu",
"argparse.ArgumentParser",
"caffe.io.load_image",
"os.getcwd",
"cv2.imwrite",
"caffe.io.Transformer",
"cv2.imread",
"caffe.set_device",
"caffe.proto.caffe_pb2.LabelMap",
"numpy.array",
"os.path.isfile",
"caffe.Net",
"os.listdir"
] | [((4950, 4969), 'os.listdir', 'os.listdir', (['img_dir'], {}), '(img_dir)\n', (4960, 4969), False, 'import os\n'), ((6880, 6905), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6903, 6905), False, 'import argparse\n'), ((861, 885), 'caffe.set_device', 'caffe.set_device', (['gpu_id'], {}), '(gpu_id)\n', (877, 885), False, 'import caffe\n'), ((894, 914), 'caffe.set_mode_gpu', 'caffe.set_mode_gpu', ([], {}), '()\n', (912, 914), False, 'import caffe\n'), ((1026, 1073), 'caffe.Net', 'caffe.Net', (['model_def', 'model_weights', 'caffe.TEST'], {}), '(model_def, model_weights, caffe.TEST)\n', (1035, 1073), False, 'import caffe\n'), ((1620, 1688), 'caffe.io.Transformer', 'caffe.io.Transformer', (["{'data': (1, 3, data_shape[0], data_shape[1])}"], {}), "({'data': (1, 3, data_shape[0], data_shape[1])})\n", (1640, 1688), False, 'import caffe\n'), ((2401, 2421), 'caffe.proto.caffe_pb2.LabelMap', 'caffe_pb2.LabelMap', ([], {}), '()\n', (2419, 2421), False, 'from caffe.proto import caffe_pb2\n'), ((2763, 2794), 'caffe.io.load_image', 'caffe.io.load_image', (['image_file'], {}), '(image_file)\n', (2782, 2794), False, 'import caffe\n'), ((5173, 5192), 'cv2.imread', 'cv2.imread', (['imgPath'], {}), '(imgPath)\n', (5183, 5192), False, 'import cv2\n'), ((6802, 6825), 'cv2.imwrite', 'cv2.imwrite', (['path_', 'img'], {}), '(path_, img)\n', (6813, 6825), False, 'import cv2\n'), ((1928, 1953), 'numpy.array', 'np.array', (['[104, 117, 123]'], {}), '([104, 117, 123])\n', (1936, 1953), True, 'import numpy as np\n'), ((5039, 5062), 'os.path.isfile', 'os.path.isfile', (['imgPath'], {}), '(imgPath)\n', (5053, 5062), False, 'import os\n'), ((7644, 7655), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7653, 7655), False, 'import os\n'), ((7803, 7814), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7812, 7814), False, 'import os\n'), ((7952, 7963), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7961, 7963), False, 'import os\n'), ((8077, 8088), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8086, 8088), False, 'import os\n'), ((8179, 8190), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8188, 8190), False, 'import os\n')] |
# Copyright (C) 2012 <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division
import numpy as np
from .cython.bond import calculate_bonds_set
class HistBuilder(object):
def __init__(self, low, high, N_bins):
assert high > low
assert N_bins >= 1
self.low = low
self.high = high
self.N_bins = N_bins
self.count = np.zeros(N_bins, dtype=int)
self.dx = (self.high - self.low) / self.N_bins
assert self.dx > 0
def record(self, x):
x = np.array(x, copy=True)
x -= self.low
x /= self.dx
index = np.floor(x).astype(int)
N = self.N_bins
count = self.count
for i in index:
if (0 <= i < N):
count[i] += 1
def get_count(self):
return self.count.copy()
def get_low_bounds(self):
return self.low + self.dx * np.arange(self.N_bins)
def get_mid_bounds(self):
return self.low + self.dx * (0.5 + np.arange(self.N_bins))
class LogHistBuilder(HistBuilder):
def __init__(self, low, high, N_bins):
super(LogHistBuilder, self).__init__(np.log(low), np.log(high), N_bins)
def record(self, x):
super(LogHistBuilder, self).record(np.log(x))
def get_low_bounds(self):
return np.exp(super(LogHistBuilder, self).get_low_bounds())
def get_mid_bounds(self):
return np.exp(super(LogHistBuilder, self).get_mid_bounds())
class BondDurationAnalyzer(object):
def __init__(self, r_bond, dt, hist_builder):
self.r_bond = r_bond
self.dt = dt
self.hist_builder = hist_builder
self.N_analyze = 0
self.start_steps = {}
self.seen_first = False
self.last_step = None
self.analyze_rate = None
def analyze(self, time_step, positions, box_size):
current = calculate_bonds_set(positions, box_size, self.r_bond)
start_steps = self.start_steps
for bond in current:
if bond not in start_steps:
start_steps[bond] = time_step
durations = []
for bond in list(start_steps):
if bond not in current:
duration = time_step - start_steps[bond]
assert duration > 0
del start_steps[bond]
durations.append(duration)
self.hist_builder.record(durations)
if not self.seen_first:
self.seen_first = True
else:
self.N_analyze += 1
delta = time_step - self.last_step
if self.N_analyze == 1:
self.analyze_rate = delta
else:
assert self.analyze_rate == delta
self.last_step= time_step
def get_frequencies(self):
if self.analyze_rate is None:
return None
if self.N_analyze <= 0:
return None
total_time = self.analyze_rate * self.N_analyze * self.dt
count = self.hist_builder.get_count()
frequency = count / total_time
return frequency
def get_low_bounds(self):
return self.dt * self.hist_builder.get_low_bounds()
def get_mid_bounds(self):
return self.dt * self.hist_builder.get_mid_bounds()
| [
"numpy.log",
"numpy.floor",
"numpy.zeros",
"numpy.array",
"numpy.arange"
] | [((919, 946), 'numpy.zeros', 'np.zeros', (['N_bins'], {'dtype': 'int'}), '(N_bins, dtype=int)\n', (927, 946), True, 'import numpy as np\n'), ((1067, 1089), 'numpy.array', 'np.array', (['x'], {'copy': '(True)'}), '(x, copy=True)\n', (1075, 1089), True, 'import numpy as np\n'), ((1680, 1691), 'numpy.log', 'np.log', (['low'], {}), '(low)\n', (1686, 1691), True, 'import numpy as np\n'), ((1693, 1705), 'numpy.log', 'np.log', (['high'], {}), '(high)\n', (1699, 1705), True, 'import numpy as np\n'), ((1784, 1793), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (1790, 1793), True, 'import numpy as np\n'), ((1149, 1160), 'numpy.floor', 'np.floor', (['x'], {}), '(x)\n', (1157, 1160), True, 'import numpy as np\n'), ((1433, 1455), 'numpy.arange', 'np.arange', (['self.N_bins'], {}), '(self.N_bins)\n', (1442, 1455), True, 'import numpy as np\n'), ((1530, 1552), 'numpy.arange', 'np.arange', (['self.N_bins'], {}), '(self.N_bins)\n', (1539, 1552), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from scipy.stats import itemfreq
def get_dominant_color(image, n_colors):
pixels = np.float32(image).reshape((-1, 3))
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1)
flags = cv2.KMEANS_RANDOM_CENTERS
flags, labels, centroids = cv2.kmeans(
pixels, n_colors, None, criteria, 10, flags)
palette = np.uint8(centroids)
return palette[np.argmax(itemfreq(labels)[:, -1])]
clicked = False
def onMouse(event, x, y, flags, param):
global clicked
if event == cv2.EVENT_LBUTTONUP:
clicked = True
cameraCapture = cv2.VideoCapture(0)
cv2.namedWindow('camera')
cv2.setMouseCallback('camera', onMouse)
# Read and process frames in loop
success, frame = cameraCapture.read()
while success and not clicked:
cv2.waitKey(1)
success, frame = cameraCapture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(gray, 37)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT,
1, 50, param1=120, param2=40)
if not circles is None:
circles = np.uint16(np.around(circles))
max_r, max_i = 0, 0
for i in range(len(circles[:, :, 2][0])):
if circles[:, :, 2][0][i] > 50 and circles[:, :, 2][0][i] > max_r:
max_i = i
max_r = circles[:, :, 2][0][i]
x, y, r = circles[:, :, :][0][max_i]
if y > r and x > r:
square = frame[y-r:y+r, x-r:x+r]
dominant_color = get_dominant_color(square, 2)
if dominant_color[2] > 100:
print("STOP")
elif dominant_color[0] > 80:
zone_0 = square[square.shape[0]*3//8:square.shape[0]
* 5//8, square.shape[1]*1//8:square.shape[1]*3//8]
cv2.imshow('Zone0', zone_0)
zone_0_color = get_dominant_color(zone_0, 1)
zone_1 = square[square.shape[0]*1//8:square.shape[0]
* 3//8, square.shape[1]*3//8:square.shape[1]*5//8]
cv2.imshow('Zone1', zone_1)
zone_1_color = get_dominant_color(zone_1, 1)
zone_2 = square[square.shape[0]*3//8:square.shape[0]
* 5//8, square.shape[1]*5//8:square.shape[1]*7//8]
cv2.imshow('Zone2', zone_2)
zone_2_color = get_dominant_color(zone_2, 1)
if zone_1_color[2] < 60:
if sum(zone_0_color) > sum(zone_2_color):
print("LEFT")
else:
print("RIGHT")
else:
if sum(zone_1_color) > sum(zone_0_color) and sum(zone_1_color) > sum(zone_2_color):
print("FORWARD")
elif sum(zone_0_color) > sum(zone_2_color):
print("FORWARD AND LEFT")
else:
print("FORWARD AND RIGHT")
else:
print("N/A")
for i in circles[0, :]:
cv2.circle(frame, (i[0], i[1]), i[2], (0, 255, 0), 2)
cv2.circle(frame, (i[0], i[1]), 2, (0, 0, 255), 3)
cv2.imshow('camera', frame)
cv2.destroyAllWindows()
cameraCapture.release() | [
"numpy.uint8",
"cv2.HoughCircles",
"cv2.circle",
"scipy.stats.itemfreq",
"cv2.medianBlur",
"cv2.waitKey",
"cv2.cvtColor",
"numpy.float32",
"cv2.imshow",
"cv2.VideoCapture",
"numpy.around",
"cv2.setMouseCallback",
"cv2.kmeans",
"cv2.destroyAllWindows",
"cv2.namedWindow"
] | [((609, 628), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (625, 628), False, 'import cv2\n'), ((630, 655), 'cv2.namedWindow', 'cv2.namedWindow', (['"""camera"""'], {}), "('camera')\n", (645, 655), False, 'import cv2\n'), ((656, 695), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['"""camera"""', 'onMouse'], {}), "('camera', onMouse)\n", (676, 695), False, 'import cv2\n'), ((3245, 3268), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3266, 3268), False, 'import cv2\n'), ((300, 355), 'cv2.kmeans', 'cv2.kmeans', (['pixels', 'n_colors', 'None', 'criteria', '(10)', 'flags'], {}), '(pixels, n_colors, None, criteria, 10, flags)\n', (310, 355), False, 'import cv2\n'), ((379, 398), 'numpy.uint8', 'np.uint8', (['centroids'], {}), '(centroids)\n', (387, 398), True, 'import numpy as np\n'), ((808, 822), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (819, 822), False, 'import cv2\n'), ((877, 916), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (889, 916), False, 'import cv2\n'), ((927, 951), 'cv2.medianBlur', 'cv2.medianBlur', (['gray', '(37)'], {}), '(gray, 37)\n', (941, 951), False, 'import cv2\n'), ((966, 1037), 'cv2.HoughCircles', 'cv2.HoughCircles', (['img', 'cv2.HOUGH_GRADIENT', '(1)', '(50)'], {'param1': '(120)', 'param2': '(40)'}), '(img, cv2.HOUGH_GRADIENT, 1, 50, param1=120, param2=40)\n', (982, 1037), False, 'import cv2\n'), ((3215, 3242), 'cv2.imshow', 'cv2.imshow', (['"""camera"""', 'frame'], {}), "('camera', frame)\n", (3225, 3242), False, 'import cv2\n'), ((119, 136), 'numpy.float32', 'np.float32', (['image'], {}), '(image)\n', (129, 136), True, 'import numpy as np\n'), ((1125, 1143), 'numpy.around', 'np.around', (['circles'], {}), '(circles)\n', (1134, 1143), True, 'import numpy as np\n'), ((3094, 3147), 'cv2.circle', 'cv2.circle', (['frame', '(i[0], i[1])', 'i[2]', '(0, 255, 0)', '(2)'], {}), '(frame, (i[0], i[1]), i[2], (0, 255, 0), 2)\n', (3104, 3147), False, 'import cv2\n'), ((3160, 3210), 'cv2.circle', 'cv2.circle', (['frame', '(i[0], i[1])', '(2)', '(0, 0, 255)', '(3)'], {}), '(frame, (i[0], i[1]), 2, (0, 0, 255), 3)\n', (3170, 3210), False, 'import cv2\n'), ((428, 444), 'scipy.stats.itemfreq', 'itemfreq', (['labels'], {}), '(labels)\n', (436, 444), False, 'from scipy.stats import itemfreq\n'), ((1832, 1859), 'cv2.imshow', 'cv2.imshow', (['"""Zone0"""', 'zone_0'], {}), "('Zone0', zone_0)\n", (1842, 1859), False, 'import cv2\n'), ((2090, 2117), 'cv2.imshow', 'cv2.imshow', (['"""Zone1"""', 'zone_1'], {}), "('Zone1', zone_1)\n", (2100, 2117), False, 'import cv2\n'), ((2348, 2375), 'cv2.imshow', 'cv2.imshow', (['"""Zone2"""', 'zone_2'], {}), "('Zone2', zone_2)\n", (2358, 2375), False, 'import cv2\n')] |
''' Purpose for this file is to verify functions associated with Groups._params dictionary.
'''
import unittest
import c3d
from c3d.group import Group
import numpy as np
import test.verify as verify
from test.zipload import Zipload
from test.base import Base
rnd = np.random.default_rng()
def add_dummy_param(group, name='TEST_NAME', shape=(10, 2), flt_range=(-1e6, 1e6)):
arr = rnd.uniform(*flt_range, size=shape).astype(np.float32)
group.add_param(name, bytes_per_element=4, dimensions=arr.shape, bytes=arr.T.tobytes())
class ParamSample():
''' Helper object to verify parameter entries persist or terminate properly. '''
def __init__(self, group):
assert isinstance(group, Group), \
'Must pass Group to ParamSample instance, was %s' % type(group)
self.group = group
self.sample()
@property
def items(self):
'''Helper to access group items. '''
return [(k, g) for (k, g) in self.group.items()]
@property
def keys(self):
'''Helper to access group items. '''
return [k for (k, g) in self.group.items()]
def sample(self):
'''Call before applying changes. '''
self.s_items = self.items
self.s_keys = self.keys
def assert_entry_count(self, delta=0):
'''Assert entry count.
Arguments
---------
delta: Number of entries added (+) or removed (-) since last sample.
'''
items = self.items
assert len(self.s_items) + delta == len(items),\
'Rename added item entry. Expected %i entries, now has %i.' %\
(len(self.s_items), len(items))
def assert_group_items(self, ignore=None):
'''Assert all named (str, Group) pairs persisted after change.'''
enumerator = range(len(self.s_items))
for i, (n, g) in enumerate(self.s_items):
if n == ignore:
continue
g2 = self.group.get(n)
assert g == g2, 'Group listed order changed for entry %i.' % i
def verify_add_parameter(self, N):
'''Add N parameters and verify count at each iteration.'''
self.sample()
for i in range(1, N):
test_name = 'TEST_ADD_PARAM_%i' % i
add_dummy_param(self.group, test_name)
assert self.group.get(test_name) is not None, 'Added group does not exist.'
self.assert_group_items()
def verify_remove_all(self):
'''Remove all groups using name key and verify count at each iteration.'''
self.sample()
keys = [k for (k, g) in self.items]
for i, key in enumerate(keys):
grp = self.group.get(key)
assert grp is not None, 'Expected group to exist.'
self.group.remove_param(key)
assert self.group.get(key) is None, 'Removed param persisted.'
self.assert_entry_count(delta=-1 - i)
class TestParameterAccessors(Base):
''' Tests functionality associated with accessing and editing Paramater entries in Group objects.
'''
ZIP = 'sample01.zip'
INTEL_INT = 'Eb015pi.c3d'
INTEL_REAL = 'Eb015pr.c3d'
def test_Group_values(self):
'''Test Group.values()'''
reader = c3d.Reader(Zipload._get(self.ZIP, self.INTEL_REAL))
for g in reader.values():
N = len([v for v in g.values()])
assert N > 0, 'No group values in file or GroupReadonly.values() failed'
def test_Group_items(self):
'''Test Group.items()'''
reader = c3d.Reader(Zipload._get(self.ZIP, self.INTEL_REAL))
for g in reader.values():
N = len([kv for kv in g.items()])
assert N > 0, 'No group items in file or GroupReadonly.items() failed'
def test_Group_readonly_add_param(self):
'''Test if adding parameter to a readonly group fails.'''
reader = c3d.Reader(Zipload._get(self.ZIP, self.INTEL_REAL))
for g in reader.values():
try:
add_dummy_param(g)
raise RuntimeError('Adding to readonly should not be possible.')
except AttributeError:
pass
def test_Group_add_param(self):
'''Test if adding and groups acts as intended.'''
reader = c3d.Reader(Zipload._get(self.ZIP, self.INTEL_REAL))
writer = reader.to_writer()
for g in writer.values():
ref = ParamSample(g)
ref.verify_add_parameter(100)
ref.verify_remove_all()
def test_Group_remove_param(self):
'''Test if removing groups acts as intended.'''
reader = c3d.Reader(Zipload._get(self.ZIP, self.INTEL_REAL))
writer = reader.to_writer()
for g in writer.values():
ref = ParamSample(g)
ref.verify_remove_all()
ref.verify_add_parameter(100)
def test_Group_rename_param(self):
''' Test if renaming groups acts as intended.'''
reader = c3d.Reader(Zipload._get(self.ZIP, self.INTEL_REAL))
writer = reader.to_writer()
for g in writer.values():
ref = ParamSample(g)
prm_keys = ref.keys
new_names = ['TEST_NAME' + str(i) for i in range(len(prm_keys))]
for key, nname in zip(prm_keys, new_names):
prm = g.get(key)
g.rename_param(key, nname)
prm2 = g.get(nname)
assert prm2 is not None, "Rename failed, renamed param does not exist."
assert prm == prm2, 'Rename failed, param acquired from new name is not identical.'
ref.assert_entry_count()
try:
g.rename_param(new_names[0], new_names[1])
raise RuntimeError('Overwriting existing numerical ID should raise a ValueError.')
except ValueError as e:
pass # Correct
if __name__ == '__main__':
unittest.main()
| [
"numpy.random.default_rng",
"unittest.main",
"test.zipload.Zipload._get"
] | [((266, 289), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (287, 289), True, 'import numpy as np\n'), ((5880, 5895), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5893, 5895), False, 'import unittest\n'), ((3233, 3272), 'test.zipload.Zipload._get', 'Zipload._get', (['self.ZIP', 'self.INTEL_REAL'], {}), '(self.ZIP, self.INTEL_REAL)\n', (3245, 3272), False, 'from test.zipload import Zipload\n'), ((3532, 3571), 'test.zipload.Zipload._get', 'Zipload._get', (['self.ZIP', 'self.INTEL_REAL'], {}), '(self.ZIP, self.INTEL_REAL)\n', (3544, 3571), False, 'from test.zipload import Zipload\n'), ((3876, 3915), 'test.zipload.Zipload._get', 'Zipload._get', (['self.ZIP', 'self.INTEL_REAL'], {}), '(self.ZIP, self.INTEL_REAL)\n', (3888, 3915), False, 'from test.zipload import Zipload\n'), ((4263, 4302), 'test.zipload.Zipload._get', 'Zipload._get', (['self.ZIP', 'self.INTEL_REAL'], {}), '(self.ZIP, self.INTEL_REAL)\n', (4275, 4302), False, 'from test.zipload import Zipload\n'), ((4609, 4648), 'test.zipload.Zipload._get', 'Zipload._get', (['self.ZIP', 'self.INTEL_REAL'], {}), '(self.ZIP, self.INTEL_REAL)\n', (4621, 4648), False, 'from test.zipload import Zipload\n'), ((4956, 4995), 'test.zipload.Zipload._get', 'Zipload._get', (['self.ZIP', 'self.INTEL_REAL'], {}), '(self.ZIP, self.INTEL_REAL)\n', (4968, 4995), False, 'from test.zipload import Zipload\n')] |
#!/usr/bin/env python
# stdlib imports
import os
import re
# third party imports
import numpy as np
from obspy.core.utcdatetime import UTCDateTime
# local
from gmprocess.utils.constants import UNIT_CONVERSIONS
from gmprocess.core.stationstream import StationStream
from gmprocess.core.stationtrace import StationTrace, PROCESS_LEVELS
from gmprocess.io.seedname import get_channel_name, get_units_type
INTIMEFMT = '%Y/%m/%d %H:%M:%S'
FLOATRE = r"[-+]?[0-9]*\.?[0-9]+"
INTRE = "[-+]?[0-9]*"
TEXT_HDR_ROWS = 13
INT_HDR_ROWS = 7
FLOAT_HDR_ROWS = 7
COLS_PER_ROW = 10
COLWIDTH = 13
SOURCE = 'Road, Housing & Urban Development Research Center (BHRC)'
SOURCE_FORMAT = 'BHRC'
NETWORK = 'I1'
LEVELS = {'VOL1DS': 'V1'}
def is_bhrc(filename):
try:
with open(filename, 'rt', encoding='utf-8') as f:
lines = [next(f) for x in range(TEXT_HDR_ROWS)]
has_line1 = lines[0].startswith('* VOL')
has_line7 = lines[6].startswith('COMP')
if has_line1 and has_line7:
return True
except UnicodeDecodeError:
return False
return False
def read_bhrc(filename, **kwargs):
"""Read the Iran BHRC strong motion data format.
Args:
filename (str): path to BHRC data file.
kwargs (ref):
Other arguments will be ignored.
Returns:
list: Sequence of one StationStream object containing 3
StationTrace objects.
"""
header1, offset = _read_header_lines(filename, 0)
data1, offset = _read_data(filename, offset, header1)
header2, offset = _read_header_lines(filename, offset)
data2, offset = _read_data(filename, offset, header2)
header3, offset = _read_header_lines(filename, offset)
data3, offset = _read_data(filename, offset, header3)
trace1 = StationTrace(data1, header1)
trace2 = StationTrace(data2, header2)
trace3 = StationTrace(data3, header3)
stream = StationStream([trace1, trace2, trace3])
for tr in stream:
if tr.stats.standard.process_level != PROCESS_LEVELS['V0']:
response = {'input_units': 'counts', 'output_units': 'cm/s^2'}
tr.setProvenance('remove_response', response)
return [stream]
def _read_header_lines(filename, offset):
"""Read the header lines for each channel.
Args:
filename (str):
Input BHRC file name.
offset (int):
Number of lines to skip from the beginning of the file.
Returns:
tuple: (header dictionary containing Stats dictionary with
extra sub-dicts, updated offset rows)
"""
with open(filename, 'rt', encoding='utf-8') as f:
for _ in range(offset):
next(f)
lines = [next(f) for x in range(TEXT_HDR_ROWS)]
offset += TEXT_HDR_ROWS
header = {}
standard = {}
coords = {}
format_specific = {}
# get the sensor azimuth with respect to the earthquake
# this data has been rotated so that the longitudinal channel (L)
# is oriented at the sensor azimuth, and the transverse (T) is
# 90 degrees off from that.
station_info = lines[7][lines[7].index('Station'):]
float_strings = re.findall(FLOATRE, station_info)
(lat_str, lon_str, alt_str, lstr, tstr) = float_strings[0:5]
component = lines[4].strip()
if component == 'V':
angle = np.nan
elif component == 'L':
angle = float(lstr)
else:
angle = float(tstr)
coords = {'latitude': float(lat_str),
'longitude': float(lon_str),
'elevation': float(alt_str)}
# fill out the standard dictionary
standard['source'] = SOURCE
standard['source_format'] = SOURCE_FORMAT
standard['instrument'] = lines[1].split('=')[1].strip()
standard['sensor_serial_number'] = ''
volstr = lines[0].split()[1].strip()
if volstr not in LEVELS:
raise KeyError('Volume %s files are not supported.' % volstr)
standard['process_level'] = PROCESS_LEVELS[LEVELS[volstr]]
standard['process_time'] = ''
station_name = lines[7][0:lines[7].index('Station')].strip()
standard['station_name'] = station_name
standard['structure_type'] = ''
standard['corner_frequency'] = np.nan
standard['units'] = 'acc'
period_str, damping_str = re.findall(FLOATRE, lines[9])
standard['instrument_period'] = float(period_str)
if standard['instrument_period'] == 0:
standard['instrument_period'] = np.nan
standard['instrument_damping'] = float(damping_str)
standard['horizontal_orientation'] = angle
standard['vertical_orientation'] = np.nan
standard['comments'] = ''
head, tail = os.path.split(filename)
standard['source_file'] = tail or os.path.basename(head)
# this field can be used for instrument correction
# when data is in counts
standard['instrument_sensitivity'] = np.nan
# fill out the stats stuff
# we don't know the start of the trace
header['starttime'] = UTCDateTime(1970, 1, 1)
npts_str, dur_str = re.findall(FLOATRE, lines[10])
header['npts'] = int(npts_str)
header['duration'] = float(dur_str)
header['delta'] = header['duration'] / (header['npts'] - 1)
header['sampling_rate'] = 1 / header['delta']
if np.isnan(angle):
header['channel'] = get_channel_name(
header['sampling_rate'],
is_acceleration=True,
is_vertical=True,
is_north=False)
elif (angle > 315 or angle < 45) or (angle > 135 and angle < 225):
header['channel'] = get_channel_name(
header['sampling_rate'],
is_acceleration=True,
is_vertical=False,
is_north=True)
else:
header['channel'] = get_channel_name(
header['sampling_rate'],
is_acceleration=True,
is_vertical=False,
is_north=False)
standard['units_type'] = get_units_type(header['channel'])
part1 = lines[0].split(':')[1]
stationcode = part1.split('/')[0].strip()
header['station'] = stationcode
header['location'] = '--'
header['network'] = NETWORK
header['coordinates'] = coords
header['standard'] = standard
header['format_specific'] = format_specific
offset += INT_HDR_ROWS
offset += FLOAT_HDR_ROWS
return (header, offset)
def _read_data(filename, offset, header):
"""Read acceleration data from BHRC file.
Args:
filename (str):
BHRC strong motion filename.
offset (int):
Number of rows from the beginning of the file to skip.
header (dict):
Dictionary for given channel with number of points.
Returns:
tuple: (Acceleration data (in gals), updated offset)
"""
widths = [COLWIDTH] * COLS_PER_ROW
npoints = header['npts']
nrows = int(np.ceil(npoints / COLS_PER_ROW))
data = np.genfromtxt(filename, skip_header=offset,
max_rows=nrows, filling_values=np.nan,
delimiter=widths)
data = data.flatten()
data = data[0:header['npts']]
# convert data to cm/s^2
data *= UNIT_CONVERSIONS['g/10']
offset += nrows + 1 # there is an end of record marker line
return (data, offset)
| [
"gmprocess.core.stationtrace.StationTrace",
"numpy.ceil",
"os.path.basename",
"gmprocess.core.stationstream.StationStream",
"gmprocess.io.seedname.get_channel_name",
"numpy.genfromtxt",
"numpy.isnan",
"re.findall",
"obspy.core.utcdatetime.UTCDateTime",
"gmprocess.io.seedname.get_units_type",
"os... | [((1786, 1814), 'gmprocess.core.stationtrace.StationTrace', 'StationTrace', (['data1', 'header1'], {}), '(data1, header1)\n', (1798, 1814), False, 'from gmprocess.core.stationtrace import StationTrace, PROCESS_LEVELS\n'), ((1828, 1856), 'gmprocess.core.stationtrace.StationTrace', 'StationTrace', (['data2', 'header2'], {}), '(data2, header2)\n', (1840, 1856), False, 'from gmprocess.core.stationtrace import StationTrace, PROCESS_LEVELS\n'), ((1870, 1898), 'gmprocess.core.stationtrace.StationTrace', 'StationTrace', (['data3', 'header3'], {}), '(data3, header3)\n', (1882, 1898), False, 'from gmprocess.core.stationtrace import StationTrace, PROCESS_LEVELS\n'), ((1912, 1951), 'gmprocess.core.stationstream.StationStream', 'StationStream', (['[trace1, trace2, trace3]'], {}), '([trace1, trace2, trace3])\n', (1925, 1951), False, 'from gmprocess.core.stationstream import StationStream\n'), ((3155, 3188), 're.findall', 're.findall', (['FLOATRE', 'station_info'], {}), '(FLOATRE, station_info)\n', (3165, 3188), False, 'import re\n'), ((4260, 4289), 're.findall', 're.findall', (['FLOATRE', 'lines[9]'], {}), '(FLOATRE, lines[9])\n', (4270, 4289), False, 'import re\n'), ((4630, 4653), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (4643, 4653), False, 'import os\n'), ((4949, 4972), 'obspy.core.utcdatetime.UTCDateTime', 'UTCDateTime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (4960, 4972), False, 'from obspy.core.utcdatetime import UTCDateTime\n'), ((4997, 5027), 're.findall', 're.findall', (['FLOATRE', 'lines[10]'], {}), '(FLOATRE, lines[10])\n', (5007, 5027), False, 'import re\n'), ((5224, 5239), 'numpy.isnan', 'np.isnan', (['angle'], {}), '(angle)\n', (5232, 5239), True, 'import numpy as np\n'), ((5878, 5911), 'gmprocess.io.seedname.get_units_type', 'get_units_type', (["header['channel']"], {}), "(header['channel'])\n", (5892, 5911), False, 'from gmprocess.io.seedname import get_channel_name, get_units_type\n'), ((6849, 6954), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'skip_header': 'offset', 'max_rows': 'nrows', 'filling_values': 'np.nan', 'delimiter': 'widths'}), '(filename, skip_header=offset, max_rows=nrows, filling_values=\n np.nan, delimiter=widths)\n', (6862, 6954), True, 'import numpy as np\n'), ((4692, 4714), 'os.path.basename', 'os.path.basename', (['head'], {}), '(head)\n', (4708, 4714), False, 'import os\n'), ((5269, 5371), 'gmprocess.io.seedname.get_channel_name', 'get_channel_name', (["header['sampling_rate']"], {'is_acceleration': '(True)', 'is_vertical': '(True)', 'is_north': '(False)'}), "(header['sampling_rate'], is_acceleration=True, is_vertical\n =True, is_north=False)\n", (5285, 5371), False, 'from gmprocess.io.seedname import get_channel_name, get_units_type\n'), ((6805, 6836), 'numpy.ceil', 'np.ceil', (['(npoints / COLS_PER_ROW)'], {}), '(npoints / COLS_PER_ROW)\n', (6812, 6836), True, 'import numpy as np\n'), ((5515, 5617), 'gmprocess.io.seedname.get_channel_name', 'get_channel_name', (["header['sampling_rate']"], {'is_acceleration': '(True)', 'is_vertical': '(False)', 'is_north': '(True)'}), "(header['sampling_rate'], is_acceleration=True, is_vertical\n =False, is_north=True)\n", (5531, 5617), False, 'from gmprocess.io.seedname import get_channel_name, get_units_type\n'), ((5700, 5803), 'gmprocess.io.seedname.get_channel_name', 'get_channel_name', (["header['sampling_rate']"], {'is_acceleration': '(True)', 'is_vertical': '(False)', 'is_north': '(False)'}), "(header['sampling_rate'], is_acceleration=True, is_vertical\n =False, is_north=False)\n", (5716, 5803), False, 'from gmprocess.io.seedname import get_channel_name, get_units_type\n')] |
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.patches as mpatches
from mpl_toolkits.mplot3d import axes3d
from matplotlib import style
import numpy as np
import math
import time
#plt.style.use('dark_background')
class ParticleInABox():
class OneDimensional():
def __init__(self, length_x, quantum_number, time_dependence):
self.length_x = length_x
self.quantum_number = quantum_number
self.time_dependence = time_dependence
def wavefunction(self):
if self.time_dependence == True:
figure = plt.figure()
figure.show()
"""Generate time intervals."""
t = []
for i in range(0, 1000):
u = i*100
t.append(u)
for time_interval in t:
plt.clf()
axis = figure.add_subplot(111)
axis.autoscale(False)
plt.xlim(0, self.length_x)
plt.ylim(-1.5, 1.5)
x = []
for i in range((self.length_x)*100):
u = i/100
x.append(u)
real_y = []
imaginary_y = []
for i in x:
real_output = math.cos((pow(self.quantum_number, 2)*6.626*pow(10, -34)*math.pi*time_interval)/(4*9.109*pow(10, -31)*self.length_x))*math.sqrt((2)/(self.length_x))*math.sin((math.pi*self.quantum_number*i)/(self.length_x))
imaginary_output = -1*math.sin((pow(self.quantum_number, 2)*6.626*pow(10, -34)*math.pi*time_interval)/(4*9.109*pow(10, -31)*self.length_x))*math.sqrt((2)/(self.length_x))*math.sin((math.pi*self.quantum_number*i)/(self.length_x))
real_y.append(real_output)
imaginary_y.append(imaginary_output)
axis.plot(x, real_y, color = "blue", label = r'$Re[\psi(x)$]')
axis.plot(x, imaginary_y, color = "red", label = r'$Im[\psi(x)]$')
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc='lower left', ncol=2, mode="expand", borderaxespad=0.)
axis.set_xlabel(r'$x$')
figure.canvas.draw()
time.sleep(0.01)
else:
x = []
for i in range((self.length_x)*100):
u = i/100
x.append(u)
y = []
for i in x:
output = math.sqrt((2)/(self.length_x))*math.sin((math.pi*self.quantum_number*i)/(self.length_x))
y.append(output)
plt.plot(x, y)
plt.show()
def PDF(self):
if self.time_dependence == True:
figure = plt.figure()
figure.show()
"""Generate time intervals."""
t = []
for i in range(0, 1000):
u = i*100
t.append(u)
for time_interval in t:
plt.clf()
axis = figure.add_subplot(111)
axis.autoscale(False)
plt.xlim(0, self.length_x)
plt.ylim(-1.5, 1.5)
x = []
for i in range((self.length_x)*100):
u = i/100
x.append(u)
y = []
for i in x:
output = math.exp(-1*(pow(self.quantum_number, 2)*6.626*pow(10, -34)*math.pi*time_interval)/(4*9.109*pow(10, -31)*self.length_x))*(2/self.length_x)*pow(math.sin((math.pi*self.quantum_number*i)/(self.length_x)), 2)
y.append(output)
axis.plot(x, y, color = "blue")
axis.set_xlabel(r'$x$')
axis.set_ylabel(r'$|\psi(x)|^2$')
figure.canvas.draw()
time.sleep(0.01)
else:
x = []
for i in range((self.length_x)*100):
u = i/100
print(u)
x.append(u)
y = []
for i in x:
output = (2/self.length_x)*pow(math.sin((math.pi*self.quantum_number*i)/(self.length_x)), 2)
y.append(output)
plt.plot(x, y)
plt.xlabel('\\alpha')
plt.ylabel('\\beta')
plt.show()
class TwoDimensional():
def __init__(self, length_x, length_y, quantum_number_x, quantum_number_y):
self.length_x = length_x
self.length_y = length_y
self.quantum_number_x = quantum_number_x
self.quantum_number_y = quantum_number_y
def wavefunction(self):
"""Generating x-coordinates."""
x = np.linspace(0, self.length_x, 100)
"""Generating y-coordinates."""
y = np.linspace(0, self.length_y, 100)
"""Generating all possible xy-coordinates."""
X,Y = np.meshgrid(x,y)
"""Generating z-coordinates from xy-coordinates."""
Z = np.sqrt((2)/(self.length_x*self.length_y))*np.sin((math.pi*self.quantum_number_x*X)/(self.length_x))*np.sin((math.pi*self.quantum_number_y*Y)/(self.length_y))
"""Generating plot."""
figure = plt.figure()
axis = figure.gca(projection='3d')
contour = axis.plot_surface(X,Y,Z,cmap='hot')
figure.colorbar(contour, shrink = 0.75)
axis.set_xlabel(r'$x$')
axis.set_ylabel(r'$y$')
axis.set_zlabel(r'$\psi(x, y)$')
axis.view_init(elev = 30, azim = -135)
plt.show()
def PDF(self):
"""Generating x-coordinates."""
x = np.linspace(0, self.length_x, 1000)
"""Generating y-coordinates."""
y = np.linspace(0, self.length_y, 1000)
"""Generating all possible xy-coordinates."""
X,Y = np.meshgrid(x,y)
"""Generating z-coordinates from xy-coordinates."""
Z = (2/self.length_x*self.length_y)*pow(np.sin((math.pi*self.quantum_number_x*X)/(self.length_x))*np.sin((math.pi*self.quantum_number_y*Y)/(self.length_y)),2)
"""Generating plot."""
figure = plt.figure()
axis = figure.gca(projection='3d')
contour = axis.plot_surface(X,Y,Z,cmap='hot')
figure.colorbar(contour, shrink = 0.75)
axis.set_xlabel(r'$x$')
axis.set_ylabel(r'$y$')
axis.set_zlabel(r'$|\psi(x, y)|^2$')
axis.view_init(elev = 30, azim = -135)
plt.show()
class ThreeDimensional():
def __init__(self, length_x, length_y, length_z, quantum_number_x, quantum_number_y, quantum_number_z, scatter_density):
self.length_x = length_x
self.length_y = length_y
self.length_z = length_z
self.quantum_number_x = quantum_number_x
self.quantum_number_y = quantum_number_y
self.quantum_number_z = quantum_number_z
self.scatter_density = scatter_density
def wavefunction(self):
"""Generating x-coordinates."""
x = []
for i in range((self.length_x)*self.scatter_density):
u = i/self.scatter_density
x.append(u)
"""Generating y-coordinates."""
y = []
for i in range((self.length_y)*self.scatter_density):
u = i/self.scatter_density
y.append(u)
"""Generating z-coordinates."""
z = []
for i in range((self.length_z)*self.scatter_density):
u = i/self.scatter_density
z.append(u)
"""Generating all possible xyz-coordinates."""
space_coordinates = []
counter = 0
for i in x:
for j in y:
for k in z:
coordinate = []
coordinate.append(i)
coordinate.append(j)
coordinate.append(k)
space_coordinates.append(coordinate)
counter = counter + 1
"""Generating colour-coordinates from xyz-coordinates."""
for coordinate in space_coordinates:
output = math.sqrt((2)/(self.length_x*self.length_y*self.length_z))*math.sin((math.pi*self.quantum_number_x*coordinate[0])/(self.length_x))*math.sin((math.pi*self.quantum_number_y*coordinate[1])/(self.length_y))*math.sin((math.pi*self.quantum_number_z*coordinate[2])/(self.length_z))
coordinate.append(output)
"""Formatting colour-xyz-coordinates."""
x_plot = []
y_plot = []
z_plot = []
colour_plot = []
for coordinate in space_coordinates:
x_plot.append(coordinate[0])
y_plot.append(coordinate[1])
z_plot.append(coordinate[2])
colour_plot.append(coordinate[3])
"""Generating plot"""
fig = plt.figure()
ax1 = fig.add_subplot(111, projection='3d')
img = ax1.scatter(x_plot, y_plot, z_plot, c=colour_plot, cmap=plt.get_cmap('jet'))
fig.colorbar(img)
ax1.set_xlabel(r'$x$')
ax1.set_ylabel(r'$y$')
ax1.set_zlabel(r'$|\psi(x, y)|^2$')
plt.show()
def PDF(self):
"""Generating x-coordinates."""
x = []
for i in range((self.length_x)*self.scatter_density):
u = i/self.scatter_density
x.append(u)
"""Generating y-coordinates."""
y = []
for i in range((self.length_y)*self.scatter_density):
u = i/self.scatter_density
y.append(u)
"""Generating z-coordinates."""
z = []
for i in range((self.length_z)*self.scatter_density):
u = i/self.scatter_density
z.append(u)
"""Generating all possible xyz-coordinates."""
space_coordinates = []
counter = 0
for i in x:
for j in y:
for k in z:
coordinate = []
coordinate.append(i)
coordinate.append(j)
coordinate.append(k)
space_coordinates.append(coordinate)
counter = counter + 1
"""Generating colour-coordinates from xyz-coordinates."""
for coordinate in space_coordinates:
output = (2/(self.length_x*self.length_y*self.length_z))*pow(math.sin((math.pi*self.quantum_number_x*coordinate[0])/(self.length_x))*math.sin((math.pi*self.quantum_number_y*coordinate[1])/(self.length_y))*math.sin((math.pi*self.quantum_number_z*coordinate[2])/(self.length_z)),2)
coordinate.append(output)
"""Formatting colour-xyz-coordinates."""
x_plot = []
y_plot = []
z_plot = []
colour_plot = []
for coordinate in space_coordinates:
x_plot.append(coordinate[0])
y_plot.append(coordinate[1])
z_plot.append(coordinate[2])
colour_plot.append(coordinate[3])
"""Generating plot"""
fig = plt.figure()
ax1 = fig.add_subplot(111, projection='3d')
img = ax1.scatter(x_plot, y_plot, z_plot, c=colour_plot, cmap=plt.get_cmap('jet'))
fig.colorbar(img)
ax1.set_xlabel(r'$x$')
ax1.set_ylabel(r'$y$')
ax1.set_zlabel(r'$z$')
plt.title(r'$|\psi(x, y, z)|^{2}$')
plt.show()
ParticleInABox().OneDimensional(length_x = 1, quantum_number = 2, time_dependence = True).PDF()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"numpy.meshgrid",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.ylim",
"math.sqrt",
"matplotlib.pyplot.legend",
"math.sin",
"time.sleep",
"matplotlib.pyplot.fi... | [((5214, 5248), 'numpy.linspace', 'np.linspace', (['(0)', 'self.length_x', '(100)'], {}), '(0, self.length_x, 100)\n', (5225, 5248), True, 'import numpy as np\n'), ((5313, 5347), 'numpy.linspace', 'np.linspace', (['(0)', 'self.length_y', '(100)'], {}), '(0, self.length_y, 100)\n', (5324, 5347), True, 'import numpy as np\n'), ((5428, 5445), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (5439, 5445), True, 'import numpy as np\n'), ((5748, 5760), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5758, 5760), True, 'import matplotlib.pyplot as plt\n'), ((6106, 6116), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6114, 6116), True, 'import matplotlib.pyplot as plt\n'), ((6207, 6242), 'numpy.linspace', 'np.linspace', (['(0)', 'self.length_x', '(1000)'], {}), '(0, self.length_x, 1000)\n', (6218, 6242), True, 'import numpy as np\n'), ((6307, 6342), 'numpy.linspace', 'np.linspace', (['(0)', 'self.length_y', '(1000)'], {}), '(0, self.length_y, 1000)\n', (6318, 6342), True, 'import numpy as np\n'), ((6423, 6440), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (6434, 6440), True, 'import numpy as np\n'), ((6739, 6751), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6749, 6751), True, 'import matplotlib.pyplot as plt\n'), ((7101, 7111), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7109, 7111), True, 'import matplotlib.pyplot as plt\n'), ((9684, 9696), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9694, 9696), True, 'import matplotlib.pyplot as plt\n'), ((10015, 10025), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10023, 10025), True, 'import matplotlib.pyplot as plt\n'), ((12090, 12102), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12100, 12102), True, 'import matplotlib.pyplot as plt\n'), ((12408, 12443), 'matplotlib.pyplot.title', 'plt.title', (['"""$|\\\\psi(x, y, z)|^{2}$"""'], {}), "('$|\\\\psi(x, y, z)|^{2}$')\n", (12417, 12443), True, 'import matplotlib.pyplot as plt\n'), ((12457, 12467), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12465, 12467), True, 'import matplotlib.pyplot as plt\n'), ((648, 660), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (658, 660), True, 'import matplotlib.pyplot as plt\n'), ((2872, 2886), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (2880, 2886), True, 'import matplotlib.pyplot as plt\n'), ((2904, 2914), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2912, 2914), True, 'import matplotlib.pyplot as plt\n'), ((3017, 3029), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3027, 3029), True, 'import matplotlib.pyplot as plt\n'), ((4691, 4705), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (4699, 4705), True, 'import matplotlib.pyplot as plt\n'), ((4723, 4744), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""\\\\alpha"""'], {}), "('\\\\alpha')\n", (4733, 4744), True, 'import matplotlib.pyplot as plt\n'), ((4762, 4782), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""\\\\beta"""'], {}), "('\\\\beta')\n", (4772, 4782), True, 'import matplotlib.pyplot as plt\n'), ((4800, 4810), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4808, 4810), True, 'import matplotlib.pyplot as plt\n'), ((5630, 5689), 'numpy.sin', 'np.sin', (['(math.pi * self.quantum_number_y * Y / self.length_y)'], {}), '(math.pi * self.quantum_number_y * Y / self.length_y)\n', (5636, 5689), True, 'import numpy as np\n'), ((936, 945), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (943, 945), True, 'import matplotlib.pyplot as plt\n'), ((1062, 1088), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'self.length_x'], {}), '(0, self.length_x)\n', (1070, 1088), True, 'import matplotlib.pyplot as plt\n'), ((1110, 1129), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.5)', '(1.5)'], {}), '(-1.5, 1.5)\n', (1118, 1129), True, 'import matplotlib.pyplot as plt\n'), ((2236, 2350), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(0.0, 1.02, 1.0, 0.102)', 'loc': '"""lower left"""', 'ncol': '(2)', 'mode': '"""expand"""', 'borderaxespad': '(0.0)'}), "(bbox_to_anchor=(0.0, 1.02, 1.0, 0.102), loc='lower left', ncol=2,\n mode='expand', borderaxespad=0.0)\n", (2246, 2350), True, 'import matplotlib.pyplot as plt\n'), ((2451, 2467), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (2461, 2467), False, 'import time\n'), ((3305, 3314), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3312, 3314), True, 'import matplotlib.pyplot as plt\n'), ((3431, 3457), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', 'self.length_x'], {}), '(0, self.length_x)\n', (3439, 3457), True, 'import matplotlib.pyplot as plt\n'), ((3479, 3498), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-1.5)', '(1.5)'], {}), '(-1.5, 1.5)\n', (3487, 3498), True, 'import matplotlib.pyplot as plt\n'), ((4251, 4267), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (4261, 4267), False, 'import time\n'), ((5529, 5573), 'numpy.sqrt', 'np.sqrt', (['(2 / (self.length_x * self.length_y))'], {}), '(2 / (self.length_x * self.length_y))\n', (5536, 5573), True, 'import numpy as np\n'), ((5572, 5631), 'numpy.sin', 'np.sin', (['(math.pi * self.quantum_number_x * X / self.length_x)'], {}), '(math.pi * self.quantum_number_x * X / self.length_x)\n', (5578, 5631), True, 'import numpy as np\n'), ((9113, 9186), 'math.sin', 'math.sin', (['(math.pi * self.quantum_number_z * coordinate[2] / self.length_z)'], {}), '(math.pi * self.quantum_number_z * coordinate[2] / self.length_z)\n', (9121, 9186), False, 'import math\n'), ((9829, 9848), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (9841, 9848), True, 'import matplotlib.pyplot as plt\n'), ((12235, 12254), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""jet"""'], {}), "('jet')\n", (12247, 12254), True, 'import matplotlib.pyplot as plt\n'), ((2726, 2754), 'math.sqrt', 'math.sqrt', (['(2 / self.length_x)'], {}), '(2 / self.length_x)\n', (2735, 2754), False, 'import math\n'), ((2757, 2816), 'math.sin', 'math.sin', (['(math.pi * self.quantum_number * i / self.length_x)'], {}), '(math.pi * self.quantum_number * i / self.length_x)\n', (2765, 2816), False, 'import math\n'), ((6560, 6619), 'numpy.sin', 'np.sin', (['(math.pi * self.quantum_number_x * X / self.length_x)'], {}), '(math.pi * self.quantum_number_x * X / self.length_x)\n', (6566, 6619), True, 'import numpy as np\n'), ((6618, 6677), 'numpy.sin', 'np.sin', (['(math.pi * self.quantum_number_y * Y / self.length_y)'], {}), '(math.pi * self.quantum_number_y * Y / self.length_y)\n', (6624, 6677), True, 'import numpy as np\n'), ((9041, 9114), 'math.sin', 'math.sin', (['(math.pi * self.quantum_number_y * coordinate[1] / self.length_y)'], {}), '(math.pi * self.quantum_number_y * coordinate[1] / self.length_y)\n', (9049, 9114), False, 'import math\n'), ((1615, 1674), 'math.sin', 'math.sin', (['(math.pi * self.quantum_number * i / self.length_x)'], {}), '(math.pi * self.quantum_number * i / self.length_x)\n', (1623, 1674), False, 'import math\n'), ((1869, 1928), 'math.sin', 'math.sin', (['(math.pi * self.quantum_number * i / self.length_x)'], {}), '(math.pi * self.quantum_number * i / self.length_x)\n', (1877, 1928), False, 'import math\n'), ((4572, 4631), 'math.sin', 'math.sin', (['(math.pi * self.quantum_number * i / self.length_x)'], {}), '(math.pi * self.quantum_number * i / self.length_x)\n', (4580, 4631), False, 'import math\n'), ((8910, 8972), 'math.sqrt', 'math.sqrt', (['(2 / (self.length_x * self.length_y * self.length_z))'], {}), '(2 / (self.length_x * self.length_y * self.length_z))\n', (8919, 8972), False, 'import math\n'), ((8969, 9042), 'math.sin', 'math.sin', (['(math.pi * self.quantum_number_x * coordinate[0] / self.length_x)'], {}), '(math.pi * self.quantum_number_x * coordinate[0] / self.length_x)\n', (8977, 9042), False, 'import math\n'), ((11516, 11589), 'math.sin', 'math.sin', (['(math.pi * self.quantum_number_z * coordinate[2] / self.length_z)'], {}), '(math.pi * self.quantum_number_z * coordinate[2] / self.length_z)\n', (11524, 11589), False, 'import math\n'), ((1584, 1612), 'math.sqrt', 'math.sqrt', (['(2 / self.length_x)'], {}), '(2 / self.length_x)\n', (1593, 1612), False, 'import math\n'), ((1838, 1866), 'math.sqrt', 'math.sqrt', (['(2 / self.length_x)'], {}), '(2 / self.length_x)\n', (1847, 1866), False, 'import math\n'), ((3929, 3988), 'math.sin', 'math.sin', (['(math.pi * self.quantum_number * i / self.length_x)'], {}), '(math.pi * self.quantum_number * i / self.length_x)\n', (3937, 3988), False, 'import math\n'), ((11372, 11445), 'math.sin', 'math.sin', (['(math.pi * self.quantum_number_x * coordinate[0] / self.length_x)'], {}), '(math.pi * self.quantum_number_x * coordinate[0] / self.length_x)\n', (11380, 11445), False, 'import math\n'), ((11444, 11517), 'math.sin', 'math.sin', (['(math.pi * self.quantum_number_y * coordinate[1] / self.length_y)'], {}), '(math.pi * self.quantum_number_y * coordinate[1] / self.length_y)\n', (11452, 11517), False, 'import math\n')] |
import argparse
from multiprocessing import cpu_count
from multiprocessing.pool import Pool
from pathlib import Path
from typing import Dict, Union
import numpy as np
import tqdm
from dfa.audio import Audio
from dfa.paths import Paths
from dfa.text import Tokenizer
from dfa.utils import get_files, read_config, pickle_binary, read_metafile
class Preprocessor:
"""Performs mel extraction and tokenization and stores the resulting torch tensors."""
def __init__(self,
audio: Audio,
tokenizer: Tokenizer,
paths: Paths,
text_dict: Dict[str, str],
mel_dim_last=True) -> None:
self.audio = audio
self.paths = paths
self.tokenizer = tokenizer
self.text_dict = text_dict
self.mel_dim_last = mel_dim_last
def __call__(self, file_path: Path) -> Dict[str, Union[str, int]]:
item_id = file_path.stem
if self.paths.precomputed_mels:
mel = np.load(self.paths.precomputed_mels / f'{item_id}.npy')
if not self.mel_dim_last:
mel = mel.T
assert mel.shape[1] == self.audio.n_mels, \
f'Expected mel shape to be of (None, {self.audio.n_mels}), but was: {mel.shape}! ' \
f'Consider setting config/audio/mel_dim_last: {not self.mel_dim_last}'
else:
wav = self.audio.load_wav(file_path)
mel = self.audio.wav_to_mel(wav)
np.save(self.paths.mel_dir / f'{item_id}.npy', mel, allow_pickle=False)
text = self.text_dict[item_id]
tokens = np.array(self.tokenizer(text)).astype(np.int32)
np.save(self.paths.token_dir / f'{item_id}.npy', tokens, allow_pickle=False)
return {'item_id': item_id, 'tokens_len': tokens.shape[0], 'mel_len': mel.shape[0]}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Preprocessing for DeepForcedAligner.')
parser.add_argument('--config', '-c', help='Points to the config file.', default='config.yaml')
parser.add_argument('--num_workers', '-w', metavar='N', type=int, default=cpu_count() - 1,
help='The number of worker threads to use for preprocessing')
args = parser.parse_args()
config = read_config(args.config)
paths = Paths.from_config(config['paths'])
audio = Audio.from_config(config['audio'])
mel_dim_last = config['preprocessing']['mel_dim_last']
print(f'Config: {args.config}\n'
f'Target data directory: {paths.data_dir}')
text_dict = read_metafile(paths.metadata_path)
symbols = set()
for text in text_dict.values():
symbols.update(set(text))
symbols = sorted(list(symbols))
if paths.precomputed_mels:
audio_files = get_files(paths.precomputed_mels, extension='.npy')
else:
audio_files = get_files(paths.dataset_dir, extension='.wav')
audio_files = [x for x in audio_files if x.stem in text_dict]
tokenizer = Tokenizer(symbols)
preprocessor = Preprocessor(audio=audio, tokenizer=tokenizer, paths=paths,
text_dict=text_dict, mel_dim_last=mel_dim_last)
pool = Pool(processes=args.num_workers)
mapper = pool.imap_unordered(preprocessor, audio_files)
dataset = []
for i, item in tqdm.tqdm(enumerate(mapper), total=len(audio_files)):
dataset.append(item)
pickle_binary(dataset, paths.data_dir / 'dataset.pkl')
pickle_binary(symbols, paths.data_dir / 'symbols.pkl')
print('Preprocessing done.')
| [
"dfa.utils.read_metafile",
"numpy.load",
"numpy.save",
"argparse.ArgumentParser",
"dfa.utils.read_config",
"dfa.utils.pickle_binary",
"dfa.audio.Audio.from_config",
"dfa.utils.get_files",
"dfa.text.Tokenizer",
"multiprocessing.pool.Pool",
"dfa.paths.Paths.from_config",
"multiprocessing.cpu_cou... | [((1884, 1959), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Preprocessing for DeepForcedAligner."""'}), "(description='Preprocessing for DeepForcedAligner.')\n", (1907, 1959), False, 'import argparse\n'), ((2287, 2311), 'dfa.utils.read_config', 'read_config', (['args.config'], {}), '(args.config)\n', (2298, 2311), False, 'from dfa.utils import get_files, read_config, pickle_binary, read_metafile\n'), ((2324, 2358), 'dfa.paths.Paths.from_config', 'Paths.from_config', (["config['paths']"], {}), "(config['paths'])\n", (2341, 2358), False, 'from dfa.paths import Paths\n'), ((2371, 2405), 'dfa.audio.Audio.from_config', 'Audio.from_config', (["config['audio']"], {}), "(config['audio'])\n", (2388, 2405), False, 'from dfa.audio import Audio\n'), ((2582, 2616), 'dfa.utils.read_metafile', 'read_metafile', (['paths.metadata_path'], {}), '(paths.metadata_path)\n', (2595, 2616), False, 'from dfa.utils import get_files, read_config, pickle_binary, read_metafile\n'), ((3011, 3029), 'dfa.text.Tokenizer', 'Tokenizer', (['symbols'], {}), '(symbols)\n', (3020, 3029), False, 'from dfa.text import Tokenizer\n'), ((3200, 3232), 'multiprocessing.pool.Pool', 'Pool', ([], {'processes': 'args.num_workers'}), '(processes=args.num_workers)\n', (3204, 3232), False, 'from multiprocessing.pool import Pool\n'), ((3421, 3475), 'dfa.utils.pickle_binary', 'pickle_binary', (['dataset', "(paths.data_dir / 'dataset.pkl')"], {}), "(dataset, paths.data_dir / 'dataset.pkl')\n", (3434, 3475), False, 'from dfa.utils import get_files, read_config, pickle_binary, read_metafile\n'), ((3480, 3534), 'dfa.utils.pickle_binary', 'pickle_binary', (['symbols', "(paths.data_dir / 'symbols.pkl')"], {}), "(symbols, paths.data_dir / 'symbols.pkl')\n", (3493, 3534), False, 'from dfa.utils import get_files, read_config, pickle_binary, read_metafile\n'), ((1489, 1560), 'numpy.save', 'np.save', (["(self.paths.mel_dir / f'{item_id}.npy')", 'mel'], {'allow_pickle': '(False)'}), "(self.paths.mel_dir / f'{item_id}.npy', mel, allow_pickle=False)\n", (1496, 1560), True, 'import numpy as np\n'), ((1673, 1749), 'numpy.save', 'np.save', (["(self.paths.token_dir / f'{item_id}.npy')", 'tokens'], {'allow_pickle': '(False)'}), "(self.paths.token_dir / f'{item_id}.npy', tokens, allow_pickle=False)\n", (1680, 1749), True, 'import numpy as np\n'), ((2797, 2848), 'dfa.utils.get_files', 'get_files', (['paths.precomputed_mels'], {'extension': '""".npy"""'}), "(paths.precomputed_mels, extension='.npy')\n", (2806, 2848), False, 'from dfa.utils import get_files, read_config, pickle_binary, read_metafile\n'), ((2881, 2927), 'dfa.utils.get_files', 'get_files', (['paths.dataset_dir'], {'extension': '""".wav"""'}), "(paths.dataset_dir, extension='.wav')\n", (2890, 2927), False, 'from dfa.utils import get_files, read_config, pickle_binary, read_metafile\n'), ((1006, 1061), 'numpy.load', 'np.load', (["(self.paths.precomputed_mels / f'{item_id}.npy')"], {}), "(self.paths.precomputed_mels / f'{item_id}.npy')\n", (1013, 1061), True, 'import numpy as np\n'), ((2138, 2149), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (2147, 2149), False, 'from multiprocessing import cpu_count\n')] |
from sklearn import neighbors, datasets
import numpy as np
from sklearn import tree
from sklearn import linear_model
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
#Training Set
X = np.zeros((22,1))
X[:,0] = np.arange(0,11,.5)
noisesigma = .2
Y = np.ravel(2 + .1 * X + noisesigma * np.random.randn(22, 1))
#Testing Set
Xp = np.zeros((110,1))
Xp[:,0] = np.arange(0,11,.1)
Yp = np.ravel(2 + .1 * Xp)
# Linear Regression
reglr = linear_model.LinearRegression()
reglr.fit(X,Y)
Ylr = reglr.predict(Xp)
regridge = linear_model.RidgeCV(alphas=[10])
regridge.fit(X,Y)
Yridge = regridge.predict(Xp)
reglasso = linear_model.Lasso(alpha = 0.1)
reglasso.fit(X,Y)
Ylasso = reglasso.predict(Xp)
plt.plot(X,Y,'go')
plt.plot(Xp,Yp,'g',label='true')
plt.plot(Xp,Ylasso,'r',label='lasso')
plt.plot(Xp,Yridge,'b',label='ridge')
plt.plot(Xp,Ylr,'m',label='linearregression')
plt.legend( loc = 4 )
| [
"matplotlib.pyplot.plot",
"numpy.ravel",
"numpy.random.randn",
"matplotlib.pyplot.legend",
"numpy.zeros",
"sklearn.linear_model.LinearRegression",
"sklearn.linear_model.RidgeCV",
"numpy.arange",
"sklearn.linear_model.Lasso"
] | [((214, 231), 'numpy.zeros', 'np.zeros', (['(22, 1)'], {}), '((22, 1))\n', (222, 231), True, 'import numpy as np\n'), ((240, 261), 'numpy.arange', 'np.arange', (['(0)', '(11)', '(0.5)'], {}), '(0, 11, 0.5)\n', (249, 261), True, 'import numpy as np\n'), ((357, 375), 'numpy.zeros', 'np.zeros', (['(110, 1)'], {}), '((110, 1))\n', (365, 375), True, 'import numpy as np\n'), ((385, 406), 'numpy.arange', 'np.arange', (['(0)', '(11)', '(0.1)'], {}), '(0, 11, 0.1)\n', (394, 406), True, 'import numpy as np\n'), ((409, 431), 'numpy.ravel', 'np.ravel', (['(2 + 0.1 * Xp)'], {}), '(2 + 0.1 * Xp)\n', (417, 431), True, 'import numpy as np\n'), ((461, 492), 'sklearn.linear_model.LinearRegression', 'linear_model.LinearRegression', ([], {}), '()\n', (490, 492), False, 'from sklearn import linear_model\n'), ((544, 577), 'sklearn.linear_model.RidgeCV', 'linear_model.RidgeCV', ([], {'alphas': '[10]'}), '(alphas=[10])\n', (564, 577), False, 'from sklearn import linear_model\n'), ((638, 667), 'sklearn.linear_model.Lasso', 'linear_model.Lasso', ([], {'alpha': '(0.1)'}), '(alpha=0.1)\n', (656, 667), False, 'from sklearn import linear_model\n'), ((720, 740), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'Y', '"""go"""'], {}), "(X, Y, 'go')\n", (728, 740), True, 'import matplotlib.pyplot as plt\n'), ((739, 774), 'matplotlib.pyplot.plot', 'plt.plot', (['Xp', 'Yp', '"""g"""'], {'label': '"""true"""'}), "(Xp, Yp, 'g', label='true')\n", (747, 774), True, 'import matplotlib.pyplot as plt\n'), ((772, 812), 'matplotlib.pyplot.plot', 'plt.plot', (['Xp', 'Ylasso', '"""r"""'], {'label': '"""lasso"""'}), "(Xp, Ylasso, 'r', label='lasso')\n", (780, 812), True, 'import matplotlib.pyplot as plt\n'), ((810, 850), 'matplotlib.pyplot.plot', 'plt.plot', (['Xp', 'Yridge', '"""b"""'], {'label': '"""ridge"""'}), "(Xp, Yridge, 'b', label='ridge')\n", (818, 850), True, 'import matplotlib.pyplot as plt\n'), ((848, 896), 'matplotlib.pyplot.plot', 'plt.plot', (['Xp', 'Ylr', '"""m"""'], {'label': '"""linearregression"""'}), "(Xp, Ylr, 'm', label='linearregression')\n", (856, 896), True, 'import matplotlib.pyplot as plt\n'), ((894, 911), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(4)'}), '(loc=4)\n', (904, 911), True, 'import matplotlib.pyplot as plt\n'), ((314, 336), 'numpy.random.randn', 'np.random.randn', (['(22)', '(1)'], {}), '(22, 1)\n', (329, 336), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
# -*- Mode: python -*-
#
# $Id: Simplex.py,v 1.2 2004/05/31 14:01:06 vivake Exp $
#
# Copyright (c) 2002-2004 <NAME> (vivakeATlab49.com). All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# This software is maintained by Vivake (vivakeATlab49.com) and is available at:
# http://shell.lab49.com/~vivake/python/Simplex.py
#
# 1.2 ( 5/2004) - Fixed a bug found by Noboru Yamamoto <noboru.yamamotoATkek.jp>
# which caused minimize() not to converge, and reach the maxiter
# limit under some conditions.
""" Simplex - a regression method for arbitrary nonlinear function minimization
Simplex minimizes an arbitrary nonlinear function of N variables by the
Nedler-Mead Simplex method as described in:
<NAME>. and <NAME>. "A Simplex Method for Function Minimization."
Computer Journal 7 (1965): 308-313.
It makes no assumptions about the smoothness of the function being minimized.
It converges to a local minimum which may or may not be the global minimum
depending on the initial guess used as a starting point.
"""
import math
import copy
import numpy
class Simplex:
def __init__(self, testfunc, guess, xdata=None, ydata=None,
increments=None, hb=None, lb=None,
kR=-1, kE=2, kC=0.5):
"""Initializes the simplex.
INPUTS
------
testfunc the function to minimize
guess[] an list containing initial guesses
xdata[] array with the x time points (passed to func)
ydata[] array with data against which to minimize (passed to func)
hb high bounds on parameters (same length as guess)
lb low bounds on parameters (same length as guess)
increments[] an list containing increments, perturbation size
kR reflection constant
kE expansion constant
kC contraction constant
"""
self.testfunc = testfunc
self.guess = guess
self.Xdata = xdata
self.Ydata = ydata
if increments == None:
self.increments = numpy.ones(len(guess))
else:
self.increments = increments
self.hb = hb
self.lb = lb
self.kR = kR
self.kE = kE
self.kC = kC
self.numvars = len(self.guess)
self.lbfac = 1.0
self.hbfac = 1.0
self.simplex = []
self.lowest = -1
self.highest = -1
self.secondhighest = -1
self.errors = []
self.currenterror = 0
# Initialize vertices
# Two extras to store centroid and reflected point
for vertex in range(0, self.numvars + 3):
self.simplex.append(copy.copy(self.guess))
# Use initial increments
for vertex in range(0, self.numvars + 1):
for x in range(0, self.numvars):
if x == (vertex - 1):
self.simplex[vertex][x] = self.guess[
x] + self.increments[x]
self.errors.append(0)
self.calculate_errors_at_vertices()
def minimize(self, epsilon=1e-6, maxiters=5000, monitor=1):
"""Walks the simplex down to a local minima.
INPUTS
------
epsilon convergence requirement
maxiters maximum number of iterations
monitor if non-zero, progress info is output to stdout
OUTPUTS
-------
an array containing the final values
lowest value of the error function
number of iterations taken to get here
"""
iter = 0
for iter in range(0, maxiters):
# Identify highest, second highest, and lowest vertices
self.highest = 0
self.lowest = 0
for vertex in range(0, self.numvars + 1):
if self.errors[vertex] > self.errors[self.highest]:
self.highest = vertex
if self.errors[vertex] < self.errors[self.lowest]:
self.lowest = vertex
self.secondhighest = 0
for vertex in range(0, self.numvars + 1):
if vertex == self.highest:
continue
if self.errors[vertex] > self.errors[self.secondhighest]:
self.secondhighest = vertex
# Test for convergence
S = 0.0
S1 = 0.0
for vertex in range(0, self.numvars + 1):
S = S + self.errors[vertex]
F2 = S / (self.numvars + 1)
for vertex in range(0, self.numvars + 1):
S1 = S1 + (self.errors[vertex] - F2) ** 2
T = math.sqrt(S1 / self.numvars)
# Optionally, print progress information
if monitor:
print('Iteration = %d Best = %f Worst = %f\r' % (iter, self.errors[self.lowest], self.errors[self.highest]), end=' ')
if T <= epsilon: # We converged! Break out of loop!
print("CONVERGED!")
break
else: # Didn't converge. Keep crunching.
# Calculate centroid of simplex, excluding highest vertex
for x in range(0, self.numvars):
S = 0.0
for vertex in range(0, self.numvars + 1):
if vertex == self.highest:
continue
S = S + self.simplex[vertex][x]
self.simplex[self.numvars + 1][x] = S / self.numvars
self.reflect_simplex()
self.currenterror = self.testfunc(
self.guess, self.Xdata, self.Ydata)
if self.currenterror < self.errors[self.lowest]:
tmp = self.currenterror
self.expand_simplex()
self.currenterror = self.testfunc(
self.guess, self.Xdata, self.Ydata)
if self.currenterror < tmp:
self.accept_expanded_point()
else:
self.currenterror = tmp
self.accept_reflected_point()
elif self.currenterror <= self.errors[self.secondhighest]:
self.accept_reflected_point()
elif self.currenterror <= self.errors[self.highest]:
self.accept_reflected_point()
self.contract_simplex()
self.currenterror = self.testfunc(
self.guess, self.Xdata, self.Ydata)
if self.currenterror < self.errors[self.highest]:
self.accept_contracted_point()
else:
self.multiple_contract_simplex()
elif self.currenterror >= self.errors[self.highest]:
self.contract_simplex()
self.currenterror = self.testfunc(
self.guess, self.Xdata, self.Ydata)
if self.currenterror < self.errors[self.highest]:
self.accept_contracted_point()
else:
self.multiple_contract_simplex()
# Either converged or reached the maximum number of iterations.
# Return the lowest vertex and the currenterror.
for x in range(0, self.numvars):
self.guess[x] = self.simplex[self.lowest][x]
self.currenterror = self.errors[self.lowest]
return self.guess, self.currenterror, iter
def contract_simplex(self):
for x in range(0, self.numvars):
self.guess[x] = self.kC * self.simplex[self.highest][x] + \
(1 - self.kC) * self.simplex[self.numvars + 1][x]
self.check_bounds()
return
def expand_simplex(self):
for x in range(0, self.numvars):
self.guess[x] = self.kE * self.guess[x] + \
(1 - self.kE) * self.simplex[self.numvars + 1][x]
self.check_bounds()
return
def reflect_simplex(self):
for x in range(0, self.numvars):
self.guess[x] = self.kR * self.simplex[self.highest][x] + \
(1 - self.kR) * self.simplex[self.numvars + 1][x]
# REMEMBER THE REFLECTED POINT
self.simplex[self.numvars + 2][x] = self.guess[x]
self.check_bounds()
return
def multiple_contract_simplex(self):
for vertex in range(0, self.numvars + 1):
if vertex == self.lowest:
continue
for x in range(0, self.numvars):
self.simplex[vertex][
x] = 0.5 * (self.simplex[vertex][x] + self.simplex[self.lowest][x])
self.calculate_errors_at_vertices()
return
def accept_contracted_point(self):
self.errors[self.highest] = self.currenterror
for x in range(0, self.numvars):
self.simplex[self.highest][x] = self.guess[x]
return
def accept_expanded_point(self):
self.errors[self.highest] = self.currenterror
for x in range(0, self.numvars):
self.simplex[self.highest][x] = self.guess[x]
return
def accept_reflected_point(self):
self.errors[self.highest] = self.currenterror
for x in range(0, self.numvars):
self.simplex[self.highest][x] = self.simplex[self.numvars + 2][x]
return
def calculate_errors_at_vertices(self):
for vertex in range(0, self.numvars + 1):
if vertex == self.lowest:
continue
for x in range(0, self.numvars):
self.guess[x] = self.simplex[vertex][x]
self.currenterror = self.testfunc(
self.guess, self.Xdata, self.Ydata)
self.errors[vertex] = self.currenterror
return
def check_bounds(self):
for vertex in range(0, self.numvars + 1):
for x in range(0, self.numvars):
if self.hb is not None and self.hb[x] < self.guess[x]:
self.guess[x] = self.hb[x] * self.hbfac
if self.lb is not None and self.lb[x] < self.guess[x]:
self.guess[x] = self.lb[x] * self.lbfac
return
def objective_function(args):
return abs(args[0] * args[0] * args[0] * 5 - args[1] * args[1] * 7 + math.sqrt(abs(args[0])) - 118)
def testf(p, x, y0=None, noise=0.):
x = numpy.array(x)
y = p[0] * (1.0 - numpy.exp(-x / p[1])) * numpy.exp(-x / p[2]) + p[3]
if noise > 0.:
y = y + numpy.random.normal(loc=0., scale=noise, size=y.shape[0])
if y0 is None:
return numpy.array(y)
else:
return numpy.sqrt(numpy.sum((y - y0) ** 2))
def test():
x = numpy.arange(0, 50, 0.01)
p = [2, 0.5, 3.0, -60]
y = testf(p, x, noise=0.00)
p0 = [0.5, 0.5, 0.5, 0.5]
s = Simplex(testf, p0, xdata=x, ydata=y, increments=[2, 4, 6, 8])
values, err, iter = s.minimize()
print('args = ', values)
print('error = ', err)
print('iterations = ', iter)
import matplotlib.pylab as MP
MP.figure()
MP.plot(x, y, 'k') # original functoin
#(ys, dt) = testf(p0, x)
#MP.plot(x, ys, 'g', linewidth=2) # initial guess
yf = testf(values, x)
MP.plot(x, yf, 'r--', linewidth=2.0) # converged solution
MP.show()
if __name__ == '__main__':
test()
| [
"numpy.sum",
"math.sqrt",
"matplotlib.pylab.figure",
"copy.copy",
"matplotlib.pylab.plot",
"numpy.array",
"numpy.arange",
"numpy.exp",
"numpy.random.normal",
"matplotlib.pylab.show"
] | [((11157, 11171), 'numpy.array', 'numpy.array', (['x'], {}), '(x)\n', (11168, 11171), False, 'import numpy\n'), ((11472, 11497), 'numpy.arange', 'numpy.arange', (['(0)', '(50)', '(0.01)'], {}), '(0, 50, 0.01)\n', (11484, 11497), False, 'import numpy\n'), ((11827, 11838), 'matplotlib.pylab.figure', 'MP.figure', ([], {}), '()\n', (11836, 11838), True, 'import matplotlib.pylab as MP\n'), ((11843, 11861), 'matplotlib.pylab.plot', 'MP.plot', (['x', 'y', '"""k"""'], {}), "(x, y, 'k')\n", (11850, 11861), True, 'import matplotlib.pylab as MP\n'), ((11997, 12033), 'matplotlib.pylab.plot', 'MP.plot', (['x', 'yf', '"""r--"""'], {'linewidth': '(2.0)'}), "(x, yf, 'r--', linewidth=2.0)\n", (12004, 12033), True, 'import matplotlib.pylab as MP\n'), ((12060, 12069), 'matplotlib.pylab.show', 'MP.show', ([], {}), '()\n', (12067, 12069), True, 'import matplotlib.pylab as MP\n'), ((11373, 11387), 'numpy.array', 'numpy.array', (['y'], {}), '(y)\n', (11384, 11387), False, 'import numpy\n'), ((5372, 5400), 'math.sqrt', 'math.sqrt', (['(S1 / self.numvars)'], {}), '(S1 / self.numvars)\n', (5381, 5400), False, 'import math\n'), ((11218, 11238), 'numpy.exp', 'numpy.exp', (['(-x / p[2])'], {}), '(-x / p[2])\n', (11227, 11238), False, 'import numpy\n'), ((11281, 11339), 'numpy.random.normal', 'numpy.random.normal', ([], {'loc': '(0.0)', 'scale': 'noise', 'size': 'y.shape[0]'}), '(loc=0.0, scale=noise, size=y.shape[0])\n', (11300, 11339), False, 'import numpy\n'), ((11424, 11448), 'numpy.sum', 'numpy.sum', (['((y - y0) ** 2)'], {}), '((y - y0) ** 2)\n', (11433, 11448), False, 'import numpy\n'), ((3426, 3447), 'copy.copy', 'copy.copy', (['self.guess'], {}), '(self.guess)\n', (3435, 3447), False, 'import copy\n'), ((11194, 11214), 'numpy.exp', 'numpy.exp', (['(-x / p[1])'], {}), '(-x / p[1])\n', (11203, 11214), False, 'import numpy\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test finding a new design point, thetaT
@author: <NAME> [University of Washington, Seattle], 2018
@email: dflemin3 (at) uw (dot) edu
"""
from approxposterior import approx, likelihood as lh, gpUtils
import numpy as np
import george
def testFindAmp():
"""
Test the findNextPoint function.
"""
# Define algorithm parameters
m0 = 50 # Initial size of training set
bounds = ((-5,5), (-5,5)) # Prior bounds
algorithm = "bape"
# For reproducibility
seed = 57
np.random.seed(seed)
# Randomly sample initial conditions from the prior
# Note: adding corner cases because approxposterior loves corners
theta = np.array(list(lh.rosenbrockSample(m0)) + [[-5, 5], [5, 5]])
# Evaluate forward model log likelihood + lnprior for each theta
y = np.zeros(len(theta))
for ii in range(len(theta)):
y[ii] = lh.rosenbrockLnlike(theta[ii]) + lh.rosenbrockLnprior(theta[ii])
# Set up a gp
gp = gpUtils.defaultGP(theta, y, fitAmp=True)
# Initialize object using the Wang & Li (2017) Rosenbrock function example
# using default ExpSquaredKernel GP
ap = approx.ApproxPosterior(theta=theta,
y=y,
gp=gp,
lnprior=lh.rosenbrockLnprior,
lnlike=lh.rosenbrockLnlike,
priorSample=lh.rosenbrockSample,
bounds=bounds,
algorithm=algorithm)
# Find new point!
thetaT = ap.findNextPoint(computeLnLike=False,
bounds=bounds,
seed=seed)
err_msg = "findNextPoint selected incorrect thetaT."
assert(np.allclose(thetaT, [-2.03449242, -3.07172107], rtol=1.0e-3)), err_msg
# end function
def testFindNoAmp():
"""
Test the findNextPoint function.
"""
# Define algorithm parameters
m0 = 50 # Initial size of training set
bounds = ((-5,5), (-5,5)) # Prior bounds
algorithm = "bape"
# For reproducibility
seed = 57
np.random.seed(seed)
# Randomly sample initial conditions from the prior
# Note: adding corner cases because approxposterior loves corners
theta = np.array(list(lh.rosenbrockSample(m0)) + [[-5, 5], [5, 5]])
# Evaluate forward model log likelihood + lnprior for each theta
y = np.zeros(len(theta))
for ii in range(len(theta)):
y[ii] = lh.rosenbrockLnlike(theta[ii]) + lh.rosenbrockLnprior(theta[ii])
# Set up a gp
gp = gpUtils.defaultGP(theta, y, fitAmp=False)
# Initialize object using the Wang & Li (2017) Rosenbrock function example
# using default ExpSquaredKernel GP
ap = approx.ApproxPosterior(theta=theta,
y=y,
gp=gp,
lnprior=lh.rosenbrockLnprior,
lnlike=lh.rosenbrockLnlike,
priorSample=lh.rosenbrockSample,
bounds=bounds,
algorithm=algorithm)
# Find new point!
thetaT = ap.findNextPoint(computeLnLike=False,
bounds=bounds,
seed=seed)
err_msg = "findNextPoint selected incorrect thetaT."
assert(np.allclose(thetaT, [0.79813416, 0.85542199], rtol=1.0e-3)), err_msg
# end function
if __name__ == "__main__":
testFindAmp()
testFindNoAmp()
| [
"numpy.random.seed",
"approxposterior.gpUtils.defaultGP",
"numpy.allclose",
"approxposterior.likelihood.rosenbrockSample",
"approxposterior.likelihood.rosenbrockLnprior",
"approxposterior.likelihood.rosenbrockLnlike",
"approxposterior.approx.ApproxPosterior"
] | [((585, 605), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (599, 605), True, 'import numpy as np\n'), ((1046, 1086), 'approxposterior.gpUtils.defaultGP', 'gpUtils.defaultGP', (['theta', 'y'], {'fitAmp': '(True)'}), '(theta, y, fitAmp=True)\n', (1063, 1086), False, 'from approxposterior import approx, likelihood as lh, gpUtils\n'), ((1216, 1400), 'approxposterior.approx.ApproxPosterior', 'approx.ApproxPosterior', ([], {'theta': 'theta', 'y': 'y', 'gp': 'gp', 'lnprior': 'lh.rosenbrockLnprior', 'lnlike': 'lh.rosenbrockLnlike', 'priorSample': 'lh.rosenbrockSample', 'bounds': 'bounds', 'algorithm': 'algorithm'}), '(theta=theta, y=y, gp=gp, lnprior=lh.\n rosenbrockLnprior, lnlike=lh.rosenbrockLnlike, priorSample=lh.\n rosenbrockSample, bounds=bounds, algorithm=algorithm)\n', (1238, 1400), False, 'from approxposterior import approx, likelihood as lh, gpUtils\n'), ((1844, 1903), 'numpy.allclose', 'np.allclose', (['thetaT', '[-2.03449242, -3.07172107]'], {'rtol': '(0.001)'}), '(thetaT, [-2.03449242, -3.07172107], rtol=0.001)\n', (1855, 1903), True, 'import numpy as np\n'), ((2231, 2251), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2245, 2251), True, 'import numpy as np\n'), ((2692, 2733), 'approxposterior.gpUtils.defaultGP', 'gpUtils.defaultGP', (['theta', 'y'], {'fitAmp': '(False)'}), '(theta, y, fitAmp=False)\n', (2709, 2733), False, 'from approxposterior import approx, likelihood as lh, gpUtils\n'), ((2863, 3047), 'approxposterior.approx.ApproxPosterior', 'approx.ApproxPosterior', ([], {'theta': 'theta', 'y': 'y', 'gp': 'gp', 'lnprior': 'lh.rosenbrockLnprior', 'lnlike': 'lh.rosenbrockLnlike', 'priorSample': 'lh.rosenbrockSample', 'bounds': 'bounds', 'algorithm': 'algorithm'}), '(theta=theta, y=y, gp=gp, lnprior=lh.\n rosenbrockLnprior, lnlike=lh.rosenbrockLnlike, priorSample=lh.\n rosenbrockSample, bounds=bounds, algorithm=algorithm)\n', (2885, 3047), False, 'from approxposterior import approx, likelihood as lh, gpUtils\n'), ((3491, 3548), 'numpy.allclose', 'np.allclose', (['thetaT', '[0.79813416, 0.85542199]'], {'rtol': '(0.001)'}), '(thetaT, [0.79813416, 0.85542199], rtol=0.001)\n', (3502, 3548), True, 'import numpy as np\n'), ((953, 983), 'approxposterior.likelihood.rosenbrockLnlike', 'lh.rosenbrockLnlike', (['theta[ii]'], {}), '(theta[ii])\n', (972, 983), True, 'from approxposterior import approx, likelihood as lh, gpUtils\n'), ((986, 1017), 'approxposterior.likelihood.rosenbrockLnprior', 'lh.rosenbrockLnprior', (['theta[ii]'], {}), '(theta[ii])\n', (1006, 1017), True, 'from approxposterior import approx, likelihood as lh, gpUtils\n'), ((2599, 2629), 'approxposterior.likelihood.rosenbrockLnlike', 'lh.rosenbrockLnlike', (['theta[ii]'], {}), '(theta[ii])\n', (2618, 2629), True, 'from approxposterior import approx, likelihood as lh, gpUtils\n'), ((2632, 2663), 'approxposterior.likelihood.rosenbrockLnprior', 'lh.rosenbrockLnprior', (['theta[ii]'], {}), '(theta[ii])\n', (2652, 2663), True, 'from approxposterior import approx, likelihood as lh, gpUtils\n'), ((759, 782), 'approxposterior.likelihood.rosenbrockSample', 'lh.rosenbrockSample', (['m0'], {}), '(m0)\n', (778, 782), True, 'from approxposterior import approx, likelihood as lh, gpUtils\n'), ((2405, 2428), 'approxposterior.likelihood.rosenbrockSample', 'lh.rosenbrockSample', (['m0'], {}), '(m0)\n', (2424, 2428), True, 'from approxposterior import approx, likelihood as lh, gpUtils\n')] |
#!/usr/bin/env python3
from os import path
import numpy as np
import pandas as pd
import sys
parent_path = path.abspath('..')
sys.path.insert(0, parent_path)
import unittest
import dfFunctions
import tf_models
import recommender as re
from utils import rmse
def run_test(testClass, header):
"""
Function to run all the tests from a class of tests.
:type testClass: unittest.TesCase
:type header: str
"""
print(header)
suite = unittest.TestLoader().loadTestsFromTestCase(testClass)
unittest.TextTestRunner(verbosity=2).run(suite)
class TestBasic(unittest.TestCase):
"""
Class that test all the basic functions
"""
def test_rmse(self):
"""
Test to check if the rmse function deals
with arrays of different sizes and if it behaves
normally.
"""
array1 = np.array([1, 1, 1, 1])
array2 = np.array([1, 1, 1, 1, 2])
array3 = np.array([2, 2, 2, 2])
self.assertRaises(AssertionError, rmse, array1, array2)
self.assertTrue(rmse(array3, array1) == 1)
class TestdfManipulation(unittest.TestCase):
"""
Class with tests for dataframe manipulation.
"""
def test_load_dataframe(self):
"""
Test to check if the function load_dataframe is working
with all the datasets from movielens.
"""
path1 = parent_path + '/movielens/ml-1m/ratings.dat'
path10 = parent_path + '/movielens/ml-10m/ratings.dat'
path20 = parent_path + '/movielens/ml-20m/ratings.csv'
df1 = dfFunctions.load_dataframe(path1)
df10 = dfFunctions.load_dataframe(path10)
df20 = dfFunctions.load_dataframe(path20)
self.assertTrue(type(df1) == pd.core.frame.DataFrame)
self.assertTrue(type(df10) == pd.core.frame.DataFrame)
self.assertTrue(type(df20) == pd.core.frame.DataFrame)
def test_batch(self):
"""
Test to check if the batchgenerator class is creating
different batches of the same size.
"""
path = parent_path + '/movielens/ml-1m/ratings.dat'
df = dfFunctions.load_dataframe(path)
batch_size = 100
generator = dfFunctions.BatchGenerator(df,
batch_size,
'user',
'item',
'rating')
old_observation = None
count = 0
num_of_tests = 200
for i in range(num_of_tests):
batch = generator.get_batch()
current_observation = (batch[0][0], batch[1][0], batch[2][0])
if current_observation == old_observation:
count += 1
old_observation = current_observation
self.assertTrue(len(batch[0]) == batch_size)
self.assertTrue(len(batch[1]) == batch_size)
self.assertTrue(len(batch[2]) == batch_size)
self.assertTrue(count < num_of_tests)
def test_dataframe_separation(self):
"""
Test to check if the class SVDmodel is separating
the dataframe in train, test and valid dataframes
with the right proportions.
"""
path = parent_path + '/movielens/ml-1m/ratings.dat'
df = dfFunctions.load_dataframe(path)
model = re.SVDmodel(df, 'user', 'item', 'rating')
sum_of_sizes = len(model.train) + len(model.test) + len(model.valid)
proportion_train = len(model.train)/len(df)
proportion_test = len(model.test)/len(df)
proportion_valid = len(model.valid)/len(df)
right_proportions = np.array([0.8, 0.1, 0.1])
proportions = np.array([proportion_train,
proportion_test,
proportion_valid])
error = rmse(proportions, right_proportions)
self.assertTrue(len(df) == sum_of_sizes)
self.assertTrue(error < 0.1,
"""\n The right proportions are (train,test,valid) =
{0}, but the model is separating the dataframe with the
proportions {1}"""
.format(right_proportions, proportions))
def test_dataframe_intersection(self):
"""
Test to check if the train, test and valid dataframes
have no intersection between them.
"""
path = parent_path + '/movielens/ml-1m/ratings.dat'
df = dfFunctions.load_dataframe(path)
model = re.SVDmodel(df, 'user', 'item', 'rating')
dic_intersection = dfFunctions.count_intersection(model.train,
model.test,
model.valid)
self.assertTrue(dic_intersection['1-2'] == 0,
"""\n The intersection between
the train and test dataframe
is {0}""".format(dic_intersection['1-2']))
self.assertTrue(dic_intersection['1-3'] == 0,
"""\n The intersection between
the train and valid dataframe
is {0}""".format(dic_intersection['1-3']))
self.assertTrue(dic_intersection['2-3'] == 0,
"""\n The intersection between
the test and valid dataframe
is {0}""".format(dic_intersection['2-3']))
class TestSVD(unittest.TestCase):
"""
Class with tests for the SVD model.
"""
def test_upperbound(self):
"""
We run 5000 steps of training and check if the root mean square error
from the valid dataset is less than 1.0 in the SVD model
"""
path = parent_path + '/movielens/ml-1m/ratings.dat'
df = dfFunctions.load_dataframe(path)
model = re.SVDmodel(df, 'user', 'item', 'rating')
dimension = 12
regularizer_constant = 0.05
learning_rate = 0.001
momentum_factor = 0.9
batch_size = 1000
num_steps = 5000
print("\n")
model.training(dimension,
regularizer_constant,
learning_rate,
momentum_factor,
batch_size,
num_steps)
prediction = model.valid_prediction()
self.assertTrue(prediction <= 1.0,
"""\n with num_steps = {0} \n, the mean square error
of the valid dataset should be
less than 1 and not {1}"""
.format(num_steps, prediction))
def test_prediction(self):
"""
We run 5000 steps of training and check if the difference
between the prediction mean and the actual mean is less than
0.9 in the SVD model
"""
path = parent_path + '/movielens/ml-1m/ratings.dat'
df = dfFunctions.load_dataframe(path)
model = re.SVDmodel(df, 'user', 'item', 'rating')
dimension = 12
regularizer_constant = 0.05
learning_rate = 0.001
momentum_factor = 0.9
batch_size = 1000
num_steps = 5000
print("\n")
model.training(dimension,
regularizer_constant,
learning_rate,
momentum_factor,
batch_size,
num_steps)
user_example = np.array(model.valid['user'])[0:10]
movies_example = np.array(model.valid['item'])[0:10]
actual_ratings = np.mean(np.array(model.valid['rating'])[0:10])
predicted_ratings = np.mean(model.prediction(user_example,
movies_example))
difference = np.absolute(actual_ratings - predicted_ratings)
self.assertTrue(difference <= 0.9,
"""\n with num_steps = {0} \n, the difference should be
less than 0.9 and not {1}"""
.format(num_steps, difference))
class TestNSVD(unittest.TestCase):
"""
Class with tests for the NSVD model.
"""
def test_rated_items(self):
"""
Test to check if the method _set_item_dic creates
a dic with user:rated_items such that
len(rated_items) == max_size.
"""
path = parent_path + '/movielens/ml-1m/ratings.dat'
df = dfFunctions.load_dataframe(path)
finder = dfFunctions.ItemFinder(df, 'user', 'item', 'rating', 'mean')
all_users = df['user'].unique()
count = 0
problem_users = []
for user in all_users:
r_items = finder.dic[user]
if len(r_items) == finder.size and r_items.dtype == 'int32':
count += 1
else:
problem_users.append(user)
self.assertTrue(count == len(all_users),
"""\n There are {0} arrays in dic such that
len(dic[user]) != finder.size or with wrong types. And these
users are {1}""".format(count, problem_users))
def test_upperbound(self):
"""
We run 5000 steps of training and check if the root mean square error
from the valid dataset is less than 1.0 in the NSVD model
"""
path = parent_path + '/movielens/ml-1m/ratings.dat'
df = dfFunctions.load_dataframe(path)
model = re.SVDmodel(df, 'user', 'item', 'rating', 'nsvd', 'mean')
dimension = 12
regularizer_constant = 0.05
learning_rate = 0.001
momentum_factor = 0.9
batch_size = 1000
num_steps = 5000
print("\n")
model.training(dimension,
regularizer_constant,
learning_rate,
momentum_factor,
batch_size,
num_steps)
prediction = model.valid_prediction()
self.assertTrue(prediction <= 1.0,
"""\n with num_steps = {0} \n, the mean square
error of the valid dataset should be less
than 1 and not {1}"""
.format(num_steps, prediction))
def test_prediction(self):
"""
We run 5000 steps of training and check if the difference
between the prediction mean and the actual mean is less than
0.9 in the SVD model
"""
path = parent_path + '/movielens/ml-1m/ratings.dat'
df = dfFunctions.load_dataframe(path)
model = re.SVDmodel(df, 'user', 'item', 'rating', 'nsvd', 'mean')
dimension = 12
regularizer_constant = 0.05
learning_rate = 0.001
momentum_factor = 0.9
batch_size = 1000
num_steps = 5000
print("\n")
model.training(dimension,
regularizer_constant,
learning_rate,
momentum_factor,
batch_size,
num_steps)
user_example = np.array(model.valid['user'])[0:10]
movies_example = np.array(model.valid['item'])[0:10]
actual_ratings = np.mean(np.array(model.valid['rating'])[0:10])
predicted_ratings = np.mean(model.prediction(user_example,
movies_example))
difference = np.absolute(actual_ratings - predicted_ratings)
self.assertTrue(difference <= 0.9,
"""\n with num_steps = {0} \n, the difference should be
less than 0.9 and not {1}"""
.format(num_steps, difference))
| [
"numpy.absolute",
"os.path.abspath",
"dfFunctions.load_dataframe",
"unittest.TextTestRunner",
"dfFunctions.count_intersection",
"dfFunctions.ItemFinder",
"sys.path.insert",
"numpy.array",
"dfFunctions.BatchGenerator",
"recommender.SVDmodel",
"unittest.TestLoader",
"utils.rmse"
] | [((108, 126), 'os.path.abspath', 'path.abspath', (['""".."""'], {}), "('..')\n", (120, 126), False, 'from os import path\n'), ((127, 158), 'sys.path.insert', 'sys.path.insert', (['(0)', 'parent_path'], {}), '(0, parent_path)\n', (142, 158), False, 'import sys\n'), ((853, 875), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (861, 875), True, 'import numpy as np\n'), ((893, 918), 'numpy.array', 'np.array', (['[1, 1, 1, 1, 2]'], {}), '([1, 1, 1, 1, 2])\n', (901, 918), True, 'import numpy as np\n'), ((936, 958), 'numpy.array', 'np.array', (['[2, 2, 2, 2]'], {}), '([2, 2, 2, 2])\n', (944, 958), True, 'import numpy as np\n'), ((1556, 1589), 'dfFunctions.load_dataframe', 'dfFunctions.load_dataframe', (['path1'], {}), '(path1)\n', (1582, 1589), False, 'import dfFunctions\n'), ((1605, 1639), 'dfFunctions.load_dataframe', 'dfFunctions.load_dataframe', (['path10'], {}), '(path10)\n', (1631, 1639), False, 'import dfFunctions\n'), ((1655, 1689), 'dfFunctions.load_dataframe', 'dfFunctions.load_dataframe', (['path20'], {}), '(path20)\n', (1681, 1689), False, 'import dfFunctions\n'), ((2108, 2140), 'dfFunctions.load_dataframe', 'dfFunctions.load_dataframe', (['path'], {}), '(path)\n', (2134, 2140), False, 'import dfFunctions\n'), ((2186, 2254), 'dfFunctions.BatchGenerator', 'dfFunctions.BatchGenerator', (['df', 'batch_size', '"""user"""', '"""item"""', '"""rating"""'], {}), "(df, batch_size, 'user', 'item', 'rating')\n", (2212, 2254), False, 'import dfFunctions\n'), ((3313, 3345), 'dfFunctions.load_dataframe', 'dfFunctions.load_dataframe', (['path'], {}), '(path)\n', (3339, 3345), False, 'import dfFunctions\n'), ((3362, 3403), 'recommender.SVDmodel', 're.SVDmodel', (['df', '"""user"""', '"""item"""', '"""rating"""'], {}), "(df, 'user', 'item', 'rating')\n", (3373, 3403), True, 'import recommender as re\n'), ((3663, 3688), 'numpy.array', 'np.array', (['[0.8, 0.1, 0.1]'], {}), '([0.8, 0.1, 0.1])\n', (3671, 3688), True, 'import numpy as np\n'), ((3711, 3774), 'numpy.array', 'np.array', (['[proportion_train, proportion_test, proportion_valid]'], {}), '([proportion_train, proportion_test, proportion_valid])\n', (3719, 3774), True, 'import numpy as np\n'), ((3856, 3892), 'utils.rmse', 'rmse', (['proportions', 'right_proportions'], {}), '(proportions, right_proportions)\n', (3860, 3892), False, 'from utils import rmse\n'), ((4490, 4522), 'dfFunctions.load_dataframe', 'dfFunctions.load_dataframe', (['path'], {}), '(path)\n', (4516, 4522), False, 'import dfFunctions\n'), ((4539, 4580), 'recommender.SVDmodel', 're.SVDmodel', (['df', '"""user"""', '"""item"""', '"""rating"""'], {}), "(df, 'user', 'item', 'rating')\n", (4550, 4580), True, 'import recommender as re\n'), ((4608, 4676), 'dfFunctions.count_intersection', 'dfFunctions.count_intersection', (['model.train', 'model.test', 'model.valid'], {}), '(model.train, model.test, model.valid)\n', (4638, 4676), False, 'import dfFunctions\n'), ((5772, 5804), 'dfFunctions.load_dataframe', 'dfFunctions.load_dataframe', (['path'], {}), '(path)\n', (5798, 5804), False, 'import dfFunctions\n'), ((5821, 5862), 'recommender.SVDmodel', 're.SVDmodel', (['df', '"""user"""', '"""item"""', '"""rating"""'], {}), "(df, 'user', 'item', 'rating')\n", (5832, 5862), True, 'import recommender as re\n'), ((6903, 6935), 'dfFunctions.load_dataframe', 'dfFunctions.load_dataframe', (['path'], {}), '(path)\n', (6929, 6935), False, 'import dfFunctions\n'), ((6952, 6993), 'recommender.SVDmodel', 're.SVDmodel', (['df', '"""user"""', '"""item"""', '"""rating"""'], {}), "(df, 'user', 'item', 'rating')\n", (6963, 6993), True, 'import recommender as re\n'), ((7763, 7810), 'numpy.absolute', 'np.absolute', (['(actual_ratings - predicted_ratings)'], {}), '(actual_ratings - predicted_ratings)\n', (7774, 7810), True, 'import numpy as np\n'), ((8409, 8441), 'dfFunctions.load_dataframe', 'dfFunctions.load_dataframe', (['path'], {}), '(path)\n', (8435, 8441), False, 'import dfFunctions\n'), ((8459, 8519), 'dfFunctions.ItemFinder', 'dfFunctions.ItemFinder', (['df', '"""user"""', '"""item"""', '"""rating"""', '"""mean"""'], {}), "(df, 'user', 'item', 'rating', 'mean')\n", (8481, 8519), False, 'import dfFunctions\n'), ((9358, 9390), 'dfFunctions.load_dataframe', 'dfFunctions.load_dataframe', (['path'], {}), '(path)\n', (9384, 9390), False, 'import dfFunctions\n'), ((9407, 9464), 'recommender.SVDmodel', 're.SVDmodel', (['df', '"""user"""', '"""item"""', '"""rating"""', '"""nsvd"""', '"""mean"""'], {}), "(df, 'user', 'item', 'rating', 'nsvd', 'mean')\n", (9418, 9464), True, 'import recommender as re\n'), ((10505, 10537), 'dfFunctions.load_dataframe', 'dfFunctions.load_dataframe', (['path'], {}), '(path)\n', (10531, 10537), False, 'import dfFunctions\n'), ((10554, 10611), 'recommender.SVDmodel', 're.SVDmodel', (['df', '"""user"""', '"""item"""', '"""rating"""', '"""nsvd"""', '"""mean"""'], {}), "(df, 'user', 'item', 'rating', 'nsvd', 'mean')\n", (10565, 10611), True, 'import recommender as re\n'), ((11381, 11428), 'numpy.absolute', 'np.absolute', (['(actual_ratings - predicted_ratings)'], {}), '(actual_ratings - predicted_ratings)\n', (11392, 11428), True, 'import numpy as np\n'), ((458, 479), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (477, 479), False, 'import unittest\n'), ((517, 553), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (540, 553), False, 'import unittest\n'), ((7436, 7465), 'numpy.array', 'np.array', (["model.valid['user']"], {}), "(model.valid['user'])\n", (7444, 7465), True, 'import numpy as np\n'), ((7497, 7526), 'numpy.array', 'np.array', (["model.valid['item']"], {}), "(model.valid['item'])\n", (7505, 7526), True, 'import numpy as np\n'), ((11054, 11083), 'numpy.array', 'np.array', (["model.valid['user']"], {}), "(model.valid['user'])\n", (11062, 11083), True, 'import numpy as np\n'), ((11115, 11144), 'numpy.array', 'np.array', (["model.valid['item']"], {}), "(model.valid['item'])\n", (11123, 11144), True, 'import numpy as np\n'), ((1047, 1067), 'utils.rmse', 'rmse', (['array3', 'array1'], {}), '(array3, array1)\n', (1051, 1067), False, 'from utils import rmse\n'), ((7566, 7597), 'numpy.array', 'np.array', (["model.valid['rating']"], {}), "(model.valid['rating'])\n", (7574, 7597), True, 'import numpy as np\n'), ((11184, 11215), 'numpy.array', 'np.array', (["model.valid['rating']"], {}), "(model.valid['rating'])\n", (11192, 11215), True, 'import numpy as np\n')] |
import numpy as np
from evobench import Benchmark, Solution
from evosolve.linkage import BaseEmpiricalLinkage, LinkageScrap
class EmpiricalLinkage(BaseEmpiricalLinkage):
def __init__(self, benchmark: Benchmark):
super(EmpiricalLinkage, self).__init__(benchmark)
def get_scrap(self, base: Solution, target_index: int) -> LinkageScrap:
if not base.fitness:
base.fitness = self.benchmark.evaluate_solution(base)
perturbed = base.genome.copy()
perturbed[target_index] = not perturbed[target_index]
perturbed = Solution(perturbed)
perturbed.fitness = self.benchmark.evaluate_solution(perturbed)
base_converged = base.genome.copy()
perturbed_converged = perturbed.genome.copy()
for i in range(self.benchmark.genome_size):
if i == target_index:
continue
base_c = base.genome.copy()
perturbed_c = perturbed.genome.copy()
base_c[i] = not base_c[i]
perturbed_c[i] = not perturbed_c[i]
base_c = Solution(base_c)
perturbed_c = Solution(perturbed_c)
base_c.fitness = self.benchmark.evaluate_solution(base_c)
perturbed_c.fitness = self.benchmark.evaluate_solution(perturbed_c)
if base_c.fitness > base.fitness:
base_converged[i] = base_c.genome[i]
if perturbed_c.fitness > perturbed.fitness:
perturbed_converged[i] = perturbed_c.genome[i]
interactions = np.abs(base_converged - perturbed_converged)
return LinkageScrap(target_index, interactions)
| [
"numpy.abs",
"evobench.Solution",
"evosolve.linkage.LinkageScrap"
] | [((572, 591), 'evobench.Solution', 'Solution', (['perturbed'], {}), '(perturbed)\n', (580, 591), False, 'from evobench import Benchmark, Solution\n'), ((1535, 1579), 'numpy.abs', 'np.abs', (['(base_converged - perturbed_converged)'], {}), '(base_converged - perturbed_converged)\n', (1541, 1579), True, 'import numpy as np\n'), ((1595, 1635), 'evosolve.linkage.LinkageScrap', 'LinkageScrap', (['target_index', 'interactions'], {}), '(target_index, interactions)\n', (1607, 1635), False, 'from evosolve.linkage import BaseEmpiricalLinkage, LinkageScrap\n'), ((1075, 1091), 'evobench.Solution', 'Solution', (['base_c'], {}), '(base_c)\n', (1083, 1091), False, 'from evobench import Benchmark, Solution\n'), ((1118, 1139), 'evobench.Solution', 'Solution', (['perturbed_c'], {}), '(perturbed_c)\n', (1126, 1139), False, 'from evobench import Benchmark, Solution\n')] |
import Gymwrappers as wrappers
import dqn_model
from PER import SumTree, Memory
import argparse
import time
import numpy as np
import collections
import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter
import pdb
DEFAULT_ENV_NAME = 'PongNoFrameskip-v4'
MEAN_REWARD_BOUND = 17#19.0
GAMMA = 0.99
BATCH_SIZE = 32
REPLAY_SIZE = 10_000 # maximum size
REPLAY_START_SIZE = 10_000 # size we wait before starting training
LEARNING_RATE = 1e-4
SYNC_TARGET_FRAMES = 1000 # elapsed frames after which target network updated
EPSILON_START = 1.0
EPSILON_FINAL = 0.01
EPSILON_DECAY_LAST_FRAME = 150_000
Experience = collections.namedtuple('Experience', \
field_names=['state', 'action', 'reward', 'done', \
'new_state'])
class PrioReplayBuffer:
def __init__(self, buf_size, prob_alpha=0.6):
self.prob_alpha = prob_alpha
self.capacity = buf_size
self.pos = 0
self.buffer = []
self.priorities = np.zeros((buf_size, ), dtype=np.float32)
def __len__(self):
return len(self.buffer)
def append(self, experience):
max_prio = self.priorities.max() if self.buffer else 1.0
if len(self.buffer) < self.capacity:
self.buffer.append(experience)
else:
self.buffer[self.pos] = experience
self.priorities[self.pos] = max_prio
self.pos = (self.pos + 1) % self.capacity
def sample(self, batch_size, beta=0.4):
if len(self.buffer) == self.capacity:
prios = self.priorities
else:
prios = self.priorities[:self.pos]
probs = prios ** self.prob_alpha
probs /= probs.sum()
indices = np.random.choice(len(self.buffer), batch_size, p=probs)
states, action, reward, dones, next_states = \
zip(*[self.buffer[idx] for idx in indices])
samples = tuple((np.array(states), np.array(action), \
np.array(reward, dtype=np.float32), \
np.array(dones, dtype=np.uint8), np.array(next_states)))
total = len(self.buffer)
weights = (total * probs[indices]) ** (-beta)
weights /= weights.max()
return samples, indices, np.array(weights, dtype=np.float32)
def update_priorities(self, batch_indices, batch_priorities):
for idx, prio in zip(batch_indices, batch_priorities):
self.priorities[idx] = prio
class ExperienceBuffer():
def __init__(self, capacity):
self.buffer = collections.deque(maxlen=capacity)
def __len__(self):
return len(self.buffer)
def append(self, experience):
self.buffer.append(experience)
def sample(self, batch_size):
indices = np.random.choice(len(self.buffer), batch_size, replace=False)
states, action, reward, dones, next_states = \
zip(*[self.buffer[idx] for idx in indices])
return np.array(states), np.array(action), np.array(reward, dtype=np.float32), \
np.array(dones, dtype=np.uint8), np.array(next_states)
class Agent():
def __init__(self, env, exp_buffer):
self.env = env
self.exp_buffer = exp_buffer
self._reset()
def _reset(self):
self.state = self.env.reset()
self.total_reward = 0.0
@torch.no_grad() # agent is not learning while playing, so disable grads
def play_step(self, net, epsilon=0.0, noisy_dqn=False, device="cpu"):
done_reward = None
# choose action based on epsilon-greedy
if noisy_dqn==False and (np.random.random() < epsilon):
action = self.env.action_space.sample()
else:
state_a = np.array([self.state], copy=False) #?why list -> adding Batch dimension
state_v = torch.tensor(state_a).to(device)
q_vals_v = net(state_v)
_, act_v = torch.max(q_vals_v, dim=1)
action = int(act_v.item())
# do step in the environment
new_state, reward, is_done, _ = self.env.step(action)
self.total_reward += reward
exp = Experience(self.state, action, reward,
is_done, new_state)
self.exp_buffer.append(exp)
self.state = new_state
# check if episode ended, then clear reward accumulator and reset env
if is_done:
done_reward = self.total_reward
self._reset()
return done_reward
@torch.no_grad() # agent is not learning while playing, so disable grads
def play_n_step(self, net, epsilon=0.0, n_step=1, gamma=0.99, noisy_dqn=False, device="cpu"):
done_reward = None
n_step_reward = 0
for i in range(n_step):
# choose action based on epsilon-greedy
if noisy_dqn==False and (np.random.random() < epsilon):
action = self.env.action_space.sample()
else:
state_a = np.array([self.state], copy=False) #?why list -> adding Batch dimension
state_v = torch.tensor(state_a).to(device)
q_vals_v = net(state_v)
_, act_v = torch.max(q_vals_v, dim=1)
action = int(act_v.item())
# cache first state, action in order to add to experience exp_buffer
if i==0:
init_state = self.state
init_action = action
# do step in the environment
new_state, reward, is_done, _ = self.env.step(action)
n_step_reward += gamma**(i-1) * reward # acc for n steps discounted reward
self.total_reward += reward # global accumulator for undiscounted reward
if is_done:
break
self.state = new_state
exp = Experience(init_state, init_action, n_step_reward,
is_done, new_state)
self.exp_buffer.append(exp)
# check if episode ended, then clear reward accumulator and reset env
if is_done:
done_reward = self.total_reward
self._reset()
return done_reward
def calc_loss(batch, net, tgt_net, gamma=0.99, ddqn=False, batch_weights=None, \
device="cpu"):
states, actions, rewards, dones, next_states = batch
states_v = torch.tensor(np.array(states, copy=False)).to(device)
next_states_v = torch.tensor(np.array(next_states, copy=False)).to(device)
actions_v = torch.tensor(actions).to(device)
rewards_v = torch.tensor(rewards).to(device)
done_mask = torch.BoolTensor(dones).to(device)
if batch_weights is not None:
batch_weights_v = torch.tensor(batch_weights).to(device)
#pdb.set_trace()
indices = torch.arange(0,states.shape[0]).type(torch.long).to(device)
state_action_values = net(states_v)[indices,actions_v.type(torch.long)]
with torch.no_grad():
if ddqn:
#pdb.set_trace()
next_state_action = net(next_states_v).max(1)[1]
next_state_action_values = tgt_net(next_states_v)[indices, next_state_action.type(torch.long)]
else:
next_state_action_values = tgt_net(next_states_v).max(1)[0]
next_state_action_values[done_mask] = 0.0
next_state_action_values = next_state_action_values.detach()
expected_state_action_values = next_state_action_values * gamma + \
rewards_v
if batch_weights is None: # priority replay is disabled
loss = nn.MSELoss()(state_action_values, expected_state_action_values)
priority = None
else:
loss = batch_weights_v * (state_action_values - \
expected_state_action_values)**2
priority = loss + 1e-5
#pdb.set_trace()
loss = loss.mean()
return loss, priority
def calc_loss_n_steps(batch, net, tgt_net, gamma =0.99, n_steps=1, \
ddqn=False, batch_weights=None, device="cpu"):
states, actions, rewards, dones, next_states = batch
states_v = torch.tensor(np.array(states, copy=False)).to(device)
next_states_v = torch.tensor(np.array(next_states, copy=False)).to(device)
actions_v = torch.tensor(actions).to(device)
rewards_v = torch.tensor(rewards).to(device)
done_mask = torch.BoolTensor(dones).to(device)
if batch_weights is not None:
batch_weights_v = torch.tensor(batch_weights).to(device)
#pdb.set_trace()
indices = torch.arange(0,states.shape[0]).type(torch.long).to(device)
state_action_values = net(states_v)[indices, actions_v.type(torch.long)]
with torch.no_grad():
if ddqn:
#pdb.set_trace()
next_state_action = net(next_states_v).max(1)[1]
next_state_action_values = tgt_net(next_states_v)[indices, next_state_action.type(torch.long)]
else:
next_state_action_values = tgt_net(next_states_v).max(1)[0]
next_state_action_values[done_mask] = 0.0
next_state_action_values = next_state_action_values.detach()
expected_state_action_values = next_state_action_values * gamma**(n_steps) + \
rewards_v
if batch_weights is None: # priority replay is disabled
loss = nn.MSELoss()(state_action_values, expected_state_action_values)
priority = None
else:
loss = batch_weights_v * (state_action_values - \
expected_state_action_values)**2
priority = loss + 1e-5
loss = loss.mean()
return loss, priority
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--env", default=DEFAULT_ENV_NAME,
help="Name of the environment, default=" +
DEFAULT_ENV_NAME)
parser.add_argument("--n_step", default=1, type=int, help="unrolling step in Bellman optimality eqn")
parser.add_argument("--ddqn", default=0, help="set =1 to enable DDQN")
parser.add_argument("--noisydqn", default=0, help='set to 1 to enable Noisy DQN/DDQN')
parser.add_argument("--prioreplay", default=0, help='set to 1 to enable vanilla implementation of priority replay')
parser.add_argument("--duelingdqn", default=0, help='set to 1 to enable priority replay')
parser.add_argument("--BST_PER", default=0, help='set to 1 to enable Binary sum tree implementation of Experience Replay Memory')
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
env = wrappers.make_env(args.env)
if args.noisydqn:
print("Choosing Noisy DQN architecture")
net = dqn_model.NoisyDQN(env.observation_space.shape,
env.action_space.n).to(device)
tgt_net = dqn_model.NoisyDQN(env.observation_space.shape,
env.action_space.n).to(device)
elif args.duelingdqn:
print("Choosing Dueling DQN architecture")
net = dqn_model.DuelingDQN(env.observation_space.shape,
env.action_space.n).to(device)
tgt_net = dqn_model.DuelingDQN(env.observation_space.shape,
env.action_space.n).to(device)
else:
print("Choosing Vanilla DQN architecture")
net = dqn_model.DQN(env.observation_space.shape,
env.action_space.n).to(device)
tgt_net = dqn_model.DQN(env.observation_space.shape,
env.action_space.n).to(device)
if args.ddqn:
print("Double DQN enabled")
writer = SummaryWriter(comment="-" + args.env)
print(net)
if args.prioreplay:
print("Vanilla implementation of Priority Replay")
buffer = PrioReplayBuffer(REPLAY_SIZE)
agent = Agent(env, buffer)
elif args.BST_PER:
print("Binary Sum Tree implementation of Priority Replay")
buffer = Memory(REPLAY_SIZE)
agent = Agent(env, buffer)
else:
print("No Priority Replay")
buffer = ExperienceBuffer(REPLAY_SIZE)
agent = Agent(env, buffer)
epsilon = EPSILON_START
optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)
total_rewards = []
frame_idx = 0
ts_frame = 0
ts = time.time()
best_m_reward = None
while True:
frame_idx += 1
# decrement epsilon
epsilon = max(EPSILON_FINAL, EPSILON_START -
frame_idx / EPSILON_DECAY_LAST_FRAME)
# play one step
if args.n_step == 1:
reward = agent.play_step(net, epsilon, noisy_dqn=args.noisydqn, \
device=device)
else:
reward = agent.play_n_step(net, epsilon=epsilon, \
n_step=args.n_step, \
gamma=GAMMA, noisy_dqn=args.noisydqn, \
device=device)
if reward is not None:
total_rewards.append(reward)
speed = (frame_idx - ts_frame) / (time.time() - ts)
ts_frame = frame_idx
ts = time.time()
m_reward = np.mean(total_rewards[-100:])
writer.add_scalar("epsilon", epsilon, frame_idx)
writer.add_scalar("speed", speed, frame_idx)
writer.add_scalar("reward_100", m_reward, frame_idx)
writer.add_scalar("reward", reward, frame_idx)
if args.noisydqn:
for layer, snr in enumerate(net.noisylayer_snr()):
writer.add_scalar(f"layer:{layer}", snr , frame_idx)
print(f"frame:{frame_idx}, games:{len(total_rewards)}, \
reward:{m_reward:.4f}, fps:{speed:.4f}")
else:
print(f"frame:{frame_idx}, games:{len(total_rewards)}, \
reward:{m_reward:.4f}, eps:{epsilon:.4f}, fps:{speed:.4f}")
if best_m_reward is None or best_m_reward < m_reward:
torch.save(net.state_dict(), args.env +
"-best_%.0f.dat" % m_reward)
if best_m_reward is not None:
print("Best reward updated %.3f -> %.3f" % (
best_m_reward, m_reward))
best_m_reward = m_reward
if m_reward > MEAN_REWARD_BOUND:
print("Solved in %d frames!" % frame_idx)
break
# if Experience buffer less than threshold, then skip training
if len(buffer) < REPLAY_START_SIZE:
continue
# Sync target network
if frame_idx % SYNC_TARGET_FRAMES == 0:
tgt_net.load_state_dict(net.state_dict())
optimizer.zero_grad()
if args.prioreplay or args.BST_PER:
batch, batch_indices, batch_weights = buffer.sample(BATCH_SIZE)
#pdb.set_trace()
else:
batch = buffer.sample(BATCH_SIZE)
batch_weights = None
if args.n_step == 1:
loss_t, priority = calc_loss(batch, net, tgt_net, gamma =GAMMA, \
ddqn=args.ddqn, batch_weights=batch_weights, \
device=device)
else:
loss_t, priority = calc_loss_n_steps(batch, net, tgt_net, gamma =GAMMA, \
n_steps=args.n_step, ddqn=args.ddqn, \
batch_weights=batch_weights, \
device=device)
loss_t.backward()
optimizer.step()
if args.prioreplay or args.BST_PER: # update priorities in buffer
#print("updating buffer priorities")
buffer.update_priorities(batch_indices, priority.data.cpu().numpy())
writer.close()
| [
"argparse.ArgumentParser",
"numpy.mean",
"torch.arange",
"PER.Memory",
"torch.no_grad",
"collections.deque",
"torch.nn.MSELoss",
"torch.BoolTensor",
"torch.max",
"torch.cuda.is_available",
"Gymwrappers.make_env",
"dqn_model.DuelingDQN",
"tensorboardX.SummaryWriter",
"numpy.zeros",
"time.... | [((653, 757), 'collections.namedtuple', 'collections.namedtuple', (['"""Experience"""'], {'field_names': "['state', 'action', 'reward', 'done', 'new_state']"}), "('Experience', field_names=['state', 'action',\n 'reward', 'done', 'new_state'])\n", (675, 757), False, 'import collections\n'), ((3368, 3383), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3381, 3383), False, 'import torch\n'), ((4499, 4514), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4512, 4514), False, 'import torch\n'), ((9606, 9631), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (9629, 9631), False, 'import argparse\n'), ((10543, 10570), 'Gymwrappers.make_env', 'wrappers.make_env', (['args.env'], {}), '(args.env)\n', (10560, 10570), True, 'import Gymwrappers as wrappers\n'), ((11573, 11610), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {'comment': "('-' + args.env)"}), "(comment='-' + args.env)\n", (11586, 11610), False, 'from tensorboardX import SummaryWriter\n'), ((12243, 12254), 'time.time', 'time.time', ([], {}), '()\n', (12252, 12254), False, 'import time\n'), ((1039, 1078), 'numpy.zeros', 'np.zeros', (['(buf_size,)'], {'dtype': 'np.float32'}), '((buf_size,), dtype=np.float32)\n', (1047, 1078), True, 'import numpy as np\n'), ((2574, 2608), 'collections.deque', 'collections.deque', ([], {'maxlen': 'capacity'}), '(maxlen=capacity)\n', (2591, 2608), False, 'import collections\n'), ((6874, 6889), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6887, 6889), False, 'import torch\n'), ((8613, 8628), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8626, 8628), False, 'import torch\n'), ((2284, 2319), 'numpy.array', 'np.array', (['weights'], {'dtype': 'np.float32'}), '(weights, dtype=np.float32)\n', (2292, 2319), True, 'import numpy as np\n'), ((2986, 3002), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (2994, 3002), True, 'import numpy as np\n'), ((3004, 3020), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (3012, 3020), True, 'import numpy as np\n'), ((3022, 3056), 'numpy.array', 'np.array', (['reward'], {'dtype': 'np.float32'}), '(reward, dtype=np.float32)\n', (3030, 3056), True, 'import numpy as np\n'), ((3075, 3106), 'numpy.array', 'np.array', (['dones'], {'dtype': 'np.uint8'}), '(dones, dtype=np.uint8)\n', (3083, 3106), True, 'import numpy as np\n'), ((3108, 3129), 'numpy.array', 'np.array', (['next_states'], {}), '(next_states)\n', (3116, 3129), True, 'import numpy as np\n'), ((3742, 3776), 'numpy.array', 'np.array', (['[self.state]'], {'copy': '(False)'}), '([self.state], copy=False)\n', (3750, 3776), True, 'import numpy as np\n'), ((3928, 3954), 'torch.max', 'torch.max', (['q_vals_v'], {'dim': '(1)'}), '(q_vals_v, dim=1)\n', (3937, 3954), False, 'import torch\n'), ((6461, 6482), 'torch.tensor', 'torch.tensor', (['actions'], {}), '(actions)\n', (6473, 6482), False, 'import torch\n'), ((6510, 6531), 'torch.tensor', 'torch.tensor', (['rewards'], {}), '(rewards)\n', (6522, 6531), False, 'import torch\n'), ((6559, 6582), 'torch.BoolTensor', 'torch.BoolTensor', (['dones'], {}), '(dones)\n', (6575, 6582), False, 'import torch\n'), ((7504, 7516), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (7514, 7516), True, 'import torch.nn as nn\n'), ((8199, 8220), 'torch.tensor', 'torch.tensor', (['actions'], {}), '(actions)\n', (8211, 8220), False, 'import torch\n'), ((8248, 8269), 'torch.tensor', 'torch.tensor', (['rewards'], {}), '(rewards)\n', (8260, 8269), False, 'import torch\n'), ((8297, 8320), 'torch.BoolTensor', 'torch.BoolTensor', (['dones'], {}), '(dones)\n', (8313, 8320), False, 'import torch\n'), ((9255, 9267), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (9265, 9267), True, 'import torch.nn as nn\n'), ((10494, 10519), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10517, 10519), False, 'import torch\n'), ((11900, 11919), 'PER.Memory', 'Memory', (['REPLAY_SIZE'], {}), '(REPLAY_SIZE)\n', (11906, 11919), False, 'from PER import SumTree, Memory\n'), ((13102, 13113), 'time.time', 'time.time', ([], {}), '()\n', (13111, 13113), False, 'import time\n'), ((13137, 13166), 'numpy.mean', 'np.mean', (['total_rewards[-100:]'], {}), '(total_rewards[-100:])\n', (13144, 13166), True, 'import numpy as np\n'), ((1958, 1974), 'numpy.array', 'np.array', (['states'], {}), '(states)\n', (1966, 1974), True, 'import numpy as np\n'), ((1976, 1992), 'numpy.array', 'np.array', (['action'], {}), '(action)\n', (1984, 1992), True, 'import numpy as np\n'), ((2021, 2055), 'numpy.array', 'np.array', (['reward'], {'dtype': 'np.float32'}), '(reward, dtype=np.float32)\n', (2029, 2055), True, 'import numpy as np\n'), ((2074, 2105), 'numpy.array', 'np.array', (['dones'], {'dtype': 'np.uint8'}), '(dones, dtype=np.uint8)\n', (2082, 2105), True, 'import numpy as np\n'), ((2107, 2128), 'numpy.array', 'np.array', (['next_states'], {}), '(next_states)\n', (2115, 2128), True, 'import numpy as np\n'), ((3623, 3641), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3639, 3641), True, 'import numpy as np\n'), ((4974, 5008), 'numpy.array', 'np.array', (['[self.state]'], {'copy': '(False)'}), '([self.state], copy=False)\n', (4982, 5008), True, 'import numpy as np\n'), ((5172, 5198), 'torch.max', 'torch.max', (['q_vals_v'], {'dim': '(1)'}), '(q_vals_v, dim=1)\n', (5181, 5198), False, 'import torch\n'), ((6325, 6353), 'numpy.array', 'np.array', (['states'], {'copy': '(False)'}), '(states, copy=False)\n', (6333, 6353), True, 'import numpy as np\n'), ((6399, 6432), 'numpy.array', 'np.array', (['next_states'], {'copy': '(False)'}), '(next_states, copy=False)\n', (6407, 6432), True, 'import numpy as np\n'), ((6654, 6681), 'torch.tensor', 'torch.tensor', (['batch_weights'], {}), '(batch_weights)\n', (6666, 6681), False, 'import torch\n'), ((8063, 8091), 'numpy.array', 'np.array', (['states'], {'copy': '(False)'}), '(states, copy=False)\n', (8071, 8091), True, 'import numpy as np\n'), ((8137, 8170), 'numpy.array', 'np.array', (['next_states'], {'copy': '(False)'}), '(next_states, copy=False)\n', (8145, 8170), True, 'import numpy as np\n'), ((8392, 8419), 'torch.tensor', 'torch.tensor', (['batch_weights'], {}), '(batch_weights)\n', (8404, 8419), False, 'import torch\n'), ((10656, 10723), 'dqn_model.NoisyDQN', 'dqn_model.NoisyDQN', (['env.observation_space.shape', 'env.action_space.n'], {}), '(env.observation_space.shape, env.action_space.n)\n', (10674, 10723), False, 'import dqn_model\n'), ((10777, 10844), 'dqn_model.NoisyDQN', 'dqn_model.NoisyDQN', (['env.observation_space.shape', 'env.action_space.n'], {}), '(env.observation_space.shape, env.action_space.n)\n', (10795, 10844), False, 'import dqn_model\n'), ((3836, 3857), 'torch.tensor', 'torch.tensor', (['state_a'], {}), '(state_a)\n', (3848, 3857), False, 'import torch\n'), ((4843, 4861), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (4859, 4861), True, 'import numpy as np\n'), ((6728, 6760), 'torch.arange', 'torch.arange', (['(0)', 'states.shape[0]'], {}), '(0, states.shape[0])\n', (6740, 6760), False, 'import torch\n'), ((8466, 8498), 'torch.arange', 'torch.arange', (['(0)', 'states.shape[0]'], {}), '(0, states.shape[0])\n', (8478, 8498), False, 'import torch\n'), ((10979, 11048), 'dqn_model.DuelingDQN', 'dqn_model.DuelingDQN', (['env.observation_space.shape', 'env.action_space.n'], {}), '(env.observation_space.shape, env.action_space.n)\n', (10999, 11048), False, 'import dqn_model\n'), ((11102, 11171), 'dqn_model.DuelingDQN', 'dqn_model.DuelingDQN', (['env.observation_space.shape', 'env.action_space.n'], {}), '(env.observation_space.shape, env.action_space.n)\n', (11122, 11171), False, 'import dqn_model\n'), ((11282, 11344), 'dqn_model.DQN', 'dqn_model.DQN', (['env.observation_space.shape', 'env.action_space.n'], {}), '(env.observation_space.shape, env.action_space.n)\n', (11295, 11344), False, 'import dqn_model\n'), ((11398, 11460), 'dqn_model.DQN', 'dqn_model.DQN', (['env.observation_space.shape', 'env.action_space.n'], {}), '(env.observation_space.shape, env.action_space.n)\n', (11411, 11460), False, 'import dqn_model\n'), ((13034, 13045), 'time.time', 'time.time', ([], {}), '()\n', (13043, 13045), False, 'import time\n'), ((5072, 5093), 'torch.tensor', 'torch.tensor', (['state_a'], {}), '(state_a)\n', (5084, 5093), False, 'import torch\n')] |
import numpy as np
from unittest import TestCase
import VZcomp.utils as utils
import VZcomp
class Utils(TestCase):
@classmethod
def setUpClass(self):
pass
def test_list_from_file(self):
bell_file = VZcomp.__path__[0]+'/tests/files/bell_state.qasm'
lines = utils.list_from_file(bell_file.replace(chr(92), chr(47)))
self.assertAlmostEqual(lines[0], 'Y90 q0')
self.assertAlmostEqual(lines[1], 'Y90 q1')
self.assertAlmostEqual(lines[2], 'CZ q0,q1')
self.assertAlmostEqual(lines[3], 'Y90 q0')
def test_split_entangling(self):
bell_file = VZcomp.__path__[0]+'/tests/files/bell_state.qasm'
lines = utils.list_from_file(bell_file.replace(chr(92), chr(47)))
list_1Q, list_2Q = utils.split_entangling(lines)
# print(list_1Q,list_2Q)
self.assertAlmostEqual(list_1Q[0], ['Y90 q0', 'Y90 q1'])
self.assertAlmostEqual(list_1Q[1], ['Y90 q0'])
self.assertAlmostEqual(list_2Q[0], 'CZ q0,q1')
def test_op2matrix(self):
X90 = utils.op2matrix('X90')
X90_r = np.round(X90, 3)
self.assertAlmostEqual(X90_r[0, 0], 0.707)
self.assertAlmostEqual(X90_r[0, 1], -0.707j)
self.assertAlmostEqual(X90_r[1, 0], -0.707j)
self.assertAlmostEqual(X90_r[1, 1], 0.707)
X = utils.op2matrix('X')
X_r = np.round(X, 3)
self.assertAlmostEqual(X_r[0, 0], 0)
self.assertAlmostEqual(X_r[0, 1], -1j)
self.assertAlmostEqual(X_r[1, 0], -1j)
self.assertAlmostEqual(X_r[1, 1], 0)
Y = utils.op2matrix('Y')
Y_r = np.round(Y, 3)
self.assertAlmostEqual(Y_r[0, 0], 0)
self.assertAlmostEqual(Y_r[0, 1], -1)
self.assertAlmostEqual(Y_r[1, 0], 1)
self.assertAlmostEqual(Y_r[1, 1], 0)
Z = utils.op2matrix('Z')
Z_r = np.round(Z, 3)
self.assertAlmostEqual(Z_r[0, 0], -1j)
self.assertAlmostEqual(Z_r[0, 1], 0)
self.assertAlmostEqual(Z_r[1, 0], 0)
self.assertAlmostEqual(Z_r[1, 1], 1j)
H = utils.op2matrix('H')
H_r = np.round(H, 3)
self.assertAlmostEqual(H_r[0, 0], -0.707j)
self.assertAlmostEqual(H_r[0, 1], -0.707j)
self.assertAlmostEqual(H_r[1, 0], -0.707j)
self.assertAlmostEqual(H_r[1, 1], 0.707j)
T = utils.op2matrix('T')
T_r = np.round(T, 3)
self.assertAlmostEqual(T_r[0, 0], 0.924-0.383j)
self.assertAlmostEqual(T_r[0, 1], 0)
self.assertAlmostEqual(T_r[1, 0], 0)
self.assertAlmostEqual(T_r[1, 1], 0.924+0.383j)
I = utils.op2matrix('I')
I_r = np.round(I, 3)
self.assertAlmostEqual(I_r[0, 0], 1)
self.assertAlmostEqual(I_r[0, 1], 0)
self.assertAlmostEqual(I_r[1, 0], 0)
self.assertAlmostEqual(I_r[1, 1], 1)
def test_mat2szxz(self):
X90 = utils.op2matrix('X90')
X90_szxz = utils.mat2szxz(X90)
rounded_szxz = np.round(X90_szxz, 3)
self.assertAlmostEqual(rounded_szxz[0], 0)
self.assertAlmostEqual(rounded_szxz[1], 1.571)
self.assertAlmostEqual(rounded_szxz[2], 0)
| [
"VZcomp.utils.mat2szxz",
"VZcomp.utils.op2matrix",
"numpy.round",
"VZcomp.utils.split_entangling"
] | [((769, 798), 'VZcomp.utils.split_entangling', 'utils.split_entangling', (['lines'], {}), '(lines)\n', (791, 798), True, 'import VZcomp.utils as utils\n'), ((1052, 1074), 'VZcomp.utils.op2matrix', 'utils.op2matrix', (['"""X90"""'], {}), "('X90')\n", (1067, 1074), True, 'import VZcomp.utils as utils\n'), ((1091, 1107), 'numpy.round', 'np.round', (['X90', '(3)'], {}), '(X90, 3)\n', (1099, 1107), True, 'import numpy as np\n'), ((1329, 1349), 'VZcomp.utils.op2matrix', 'utils.op2matrix', (['"""X"""'], {}), "('X')\n", (1344, 1349), True, 'import VZcomp.utils as utils\n'), ((1364, 1378), 'numpy.round', 'np.round', (['X', '(3)'], {}), '(X, 3)\n', (1372, 1378), True, 'import numpy as np\n'), ((1576, 1596), 'VZcomp.utils.op2matrix', 'utils.op2matrix', (['"""Y"""'], {}), "('Y')\n", (1591, 1596), True, 'import VZcomp.utils as utils\n'), ((1611, 1625), 'numpy.round', 'np.round', (['Y', '(3)'], {}), '(Y, 3)\n', (1619, 1625), True, 'import numpy as np\n'), ((1820, 1840), 'VZcomp.utils.op2matrix', 'utils.op2matrix', (['"""Z"""'], {}), "('Z')\n", (1835, 1840), True, 'import VZcomp.utils as utils\n'), ((1855, 1869), 'numpy.round', 'np.round', (['Z', '(3)'], {}), '(Z, 3)\n', (1863, 1869), True, 'import numpy as np\n'), ((2066, 2086), 'VZcomp.utils.op2matrix', 'utils.op2matrix', (['"""H"""'], {}), "('H')\n", (2081, 2086), True, 'import VZcomp.utils as utils\n'), ((2101, 2115), 'numpy.round', 'np.round', (['H', '(3)'], {}), '(H, 3)\n', (2109, 2115), True, 'import numpy as np\n'), ((2332, 2352), 'VZcomp.utils.op2matrix', 'utils.op2matrix', (['"""T"""'], {}), "('T')\n", (2347, 2352), True, 'import VZcomp.utils as utils\n'), ((2367, 2381), 'numpy.round', 'np.round', (['T', '(3)'], {}), '(T, 3)\n', (2375, 2381), True, 'import numpy as np\n'), ((2597, 2617), 'VZcomp.utils.op2matrix', 'utils.op2matrix', (['"""I"""'], {}), "('I')\n", (2612, 2617), True, 'import VZcomp.utils as utils\n'), ((2632, 2646), 'numpy.round', 'np.round', (['I', '(3)'], {}), '(I, 3)\n', (2640, 2646), True, 'import numpy as np\n'), ((2871, 2893), 'VZcomp.utils.op2matrix', 'utils.op2matrix', (['"""X90"""'], {}), "('X90')\n", (2886, 2893), True, 'import VZcomp.utils as utils\n'), ((2913, 2932), 'VZcomp.utils.mat2szxz', 'utils.mat2szxz', (['X90'], {}), '(X90)\n', (2927, 2932), True, 'import VZcomp.utils as utils\n'), ((2957, 2978), 'numpy.round', 'np.round', (['X90_szxz', '(3)'], {}), '(X90_szxz, 3)\n', (2965, 2978), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.