code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
from os.path import exists, join
from os import makedirs
from sklearn.metrics import confusion_matrix
import tensorflow as tf
import numpy as np
import time
import random
from tqdm import tqdm
from sklearn.neighbors import KDTree
# use relative import for being compatible with Open3d main repo
from .base_model import BaseModel
from ..utils import helper_tf
from ...utils import MODEL
from ...datasets.utils import (DataProcessing, trans_normalize, trans_augment,
trans_crop_pc)
class RandLANet(BaseModel):
def __init__(
self,
name='RandLANet',
k_n=16, # KNN,
num_layers=4, # Number of layers
num_points=4096 * 11, # Number of input points
num_classes=19, # Number of valid classes
ignored_label_inds=[0],
sub_sampling_ratio=[4, 4, 4, 4],
dim_input=3,
dim_feature=8,
dim_output=[16, 64, 128, 256],
grid_size=0.06,
batcher='DefaultBatcher',
ckpt_path=None,
**kwargs):
super().__init__(name=name,
k_n=k_n,
num_layers=num_layers,
num_points=num_points,
num_classes=num_classes,
ignored_label_inds=ignored_label_inds,
sub_sampling_ratio=sub_sampling_ratio,
dim_input=dim_input,
dim_feature=dim_feature,
dim_output=dim_output,
grid_size=grid_size,
batcher=batcher,
ckpt_path=ckpt_path,
**kwargs)
cfg = self.cfg
dim_feature = cfg.dim_feature
self.fc0 = tf.keras.layers.Dense(dim_feature, activation=None)
self.batch_normalization = tf.keras.layers.BatchNormalization(
-1, momentum=0.99, epsilon=1e-6)
self.leaky_relu0 = tf.keras.layers.LeakyReLU()
# ###########################Encoder############################
d_encoder_list = []
# Encoder
for i in range(cfg.num_layers):
name = 'Encoder_layer_' + str(i)
self.init_dilated_res_block(dim_feature, cfg.dim_output[i], name)
dim_feature = cfg.dim_output[i] * 2
if i == 0:
d_encoder_list.append(dim_feature)
d_encoder_list.append(dim_feature)
feature = helper_tf.conv2d(True, dim_feature)
setattr(self, 'decoder_0', feature)
# Decoder
for j in range(cfg.num_layers):
name = 'Decoder_layer_' + str(j)
dim_input = d_encoder_list[-j - 2] + dim_feature
dim_output = d_encoder_list[-j - 2]
f_decoder_i = helper_tf.conv2d_transpose(True, dim_output)
setattr(self, name, f_decoder_i)
dim_feature = d_encoder_list[-j - 2]
f_layer_fc1 = helper_tf.conv2d(True, 64)
setattr(self, 'fc1', f_layer_fc1)
f_layer_fc2 = helper_tf.conv2d(True, 32)
setattr(self, 'fc2', f_layer_fc2)
f_dropout = tf.keras.layers.Dropout(0.5)
setattr(self, 'dropout1', f_dropout)
f_layer_fc3 = helper_tf.conv2d(False, cfg.num_classes, activation=False)
setattr(self, 'fc', f_layer_fc3)
def init_att_pooling(self, d, dim_output, name):
att_activation = tf.keras.layers.Dense(d, activation=None)
setattr(self, name + 'fc', att_activation)
f_agg = helper_tf.conv2d(True, dim_output)
setattr(self, name + 'mlp', f_agg)
def init_building_block(self, dim_input, dim_output, name):
f_pc = helper_tf.conv2d(True, dim_input)
setattr(self, name + 'mlp1', f_pc)
self.init_att_pooling(dim_input * 2, dim_output // 2,
name + 'att_pooling_1')
f_xyz = helper_tf.conv2d(True, dim_output // 2)
setattr(self, name + 'mlp2', f_xyz)
self.init_att_pooling(dim_input * 2, dim_output, name + 'att_pooling_2')
def init_dilated_res_block(self, dim_input, dim_output, name):
f_pc = helper_tf.conv2d(True, dim_output // 2)
setattr(self, name + 'mlp1', f_pc)
self.init_building_block(dim_output // 2, dim_output, name + 'LFA')
f_pc = helper_tf.conv2d(True, dim_output * 2, activation=False)
setattr(self, name + 'mlp2', f_pc)
shortcut = helper_tf.conv2d(True, dim_output * 2, activation=False)
setattr(self, name + 'shortcut', shortcut)
def forward_gather_neighbour(self, pc, neighbor_idx):
# pc: BxNxd
# neighbor_idx: BxNxK
B, N, K = neighbor_idx.shape
d = pc.shape[2]
index_input = tf.reshape(neighbor_idx, shape=[-1, N * K])
features = tf.gather(pc, index_input, axis=1, batch_dims=1)
features = tf.reshape(features, [-1, N, K, d])
return features
def forward_att_pooling(self, feature_set, name):
# feature_set: BxNxKxd
batch_size = feature_set.shape[0]
num_points = feature_set.shape[1]
num_neigh = feature_set.shape[2]
d = feature_set.shape[3]
f_reshaped = tf.reshape(feature_set, shape=[-1, num_neigh, d])
m_dense = getattr(self, name + 'fc')
att_activation = m_dense(f_reshaped)
att_scores = tf.nn.softmax(att_activation, axis=1)
# print("att_scores = ", att_scores.shape)
f_agg = f_reshaped * att_scores
f_agg = tf.reduce_sum(f_agg, axis=1)
f_agg = tf.reshape(f_agg, [-1, num_points, 1, d])
m_conv2d = getattr(self, name + 'mlp')
f_agg = m_conv2d(f_agg, training=self.training)
return f_agg
def forward_relative_pos_encoding(self, xyz, neigh_idx):
B, N, K = neigh_idx.shape
neighbor_xyz = self.forward_gather_neighbour(xyz, neigh_idx)
xyz_tile = tf.tile(tf.expand_dims(xyz, axis=2),
[1, 1, tf.shape(neigh_idx)[-1], 1])
relative_xyz = xyz_tile - neighbor_xyz
relative_dis = tf.sqrt(
tf.reduce_sum(tf.square(relative_xyz), axis=-1, keepdims=True))
relative_feature = tf.concat(
[relative_dis, relative_xyz, xyz_tile, neighbor_xyz], axis=-1)
return relative_feature
def forward_building_block(self, xyz, feature, neigh_idx, name):
f_xyz = self.forward_relative_pos_encoding(xyz, neigh_idx)
m_conv2d = getattr(self, name + 'mlp1')
f_xyz = m_conv2d(f_xyz, training=self.training)
f_neighbours = self.forward_gather_neighbour(
tf.squeeze(feature, axis=2), neigh_idx)
f_concat = tf.concat([f_neighbours, f_xyz], axis=-1)
f_pc_agg = self.forward_att_pooling(f_concat, name + 'att_pooling_1')
m_conv2d = getattr(self, name + 'mlp2')
f_xyz = m_conv2d(f_xyz, training=self.training)
f_neighbours = self.forward_gather_neighbour(
tf.squeeze(f_pc_agg, axis=2), neigh_idx)
f_concat = tf.concat([f_neighbours, f_xyz], axis=-1)
f_pc_agg = self.forward_att_pooling(f_concat, name + 'att_pooling_2')
return f_pc_agg
def forward_dilated_res_block(self, feature, xyz, neigh_idx, dim_output,
name):
m_conv2d = getattr(self, name + 'mlp1')
f_pc = m_conv2d(feature, training=self.training)
f_pc = self.forward_building_block(xyz, f_pc, neigh_idx, name + 'LFA')
m_conv2d = getattr(self, name + 'mlp2')
f_pc = m_conv2d(f_pc, training=self.training)
m_conv2d = getattr(self, name + 'shortcut')
shortcut = m_conv2d(feature, training=self.training)
result = tf.nn.leaky_relu(f_pc + shortcut)
return result
def call(self, inputs, training=True):
self.training = training
num_layers = self.cfg.num_layers
xyz = inputs[:num_layers]
neigh_idx = inputs[num_layers:2 * num_layers]
sub_idx = inputs[2 * num_layers:3 * num_layers]
interp_idx = inputs[3 * num_layers:4 * num_layers]
feature = inputs[4 * num_layers]
m_dense = getattr(self, 'fc0')
feature = m_dense(feature, training=self.training)
m_bn = getattr(self, 'batch_normalization')
feature = m_bn(feature, training=self.training)
feature = tf.nn.leaky_relu(feature)
feature = tf.expand_dims(feature, axis=2)
# B N 1 d
# Encoder
f_encoder_list = []
for i in range(self.cfg.num_layers):
name = 'Encoder_layer_' + str(i)
f_encoder_i = self.forward_dilated_res_block(
feature, xyz[i], neigh_idx[i], self.cfg.dim_output[i], name)
f_sampled_i = self.random_sample(f_encoder_i, sub_idx[i])
feature = f_sampled_i
if i == 0:
f_encoder_list.append(f_encoder_i)
f_encoder_list.append(f_sampled_i)
m_conv2d = getattr(self, 'decoder_0')
feature = m_conv2d(f_encoder_list[-1], training=self.training)
# Decoder
f_decoder_list = []
for j in range(self.cfg.num_layers):
f_interp_i = self.nearest_interpolation(feature, interp_idx[-j - 1])
name = 'Decoder_layer_' + str(j)
m_transposeconv2d = getattr(self, name)
concat_feature = tf.concat([f_encoder_list[-j - 2], f_interp_i],
axis=3)
f_decoder_i = m_transposeconv2d(concat_feature,
training=self.training)
feature = f_decoder_i
f_decoder_list.append(f_decoder_i)
m_conv2d = getattr(self, 'fc1')
f_layer_fc1 = m_conv2d(f_decoder_list[-1], training=self.training)
m_conv2d = getattr(self, 'fc2')
f_layer_fc2 = m_conv2d(f_layer_fc1, training=self.training)
self.test_hidden = f_layer_fc2
m_dropout = getattr(self, 'dropout1')
f_layer_drop = m_dropout(f_layer_fc2, training=self.training)
m_conv2d = getattr(self, 'fc')
f_layer_fc3 = m_conv2d(f_layer_drop, training=self.training)
f_out = tf.squeeze(f_layer_fc3, [2])
# f_out = tf.nn.softmax(f_out)
return f_out
def get_optimizer(self, cfg_pipeline):
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
cfg_pipeline.adam_lr,
decay_steps=100000,
decay_rate=cfg_pipeline.scheduler_gamma)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
return optimizer
def get_loss(self, Loss, results, inputs):
"""
Runs the loss on outputs of the model
:param outputs: logits
:param labels: labels
:return: loss
"""
cfg = self.cfg
labels = inputs[-1]
scores, labels = Loss.filter_valid_label(results, labels)
loss = Loss.weighted_CrossEntropyLoss(scores, labels)
return loss, labels, scores
@staticmethod
def random_sample(feature, pool_idx):
"""
:param feature: [B, N, d] input features matrix
:param pool_idx: [B, N', max_num] N' < N, N' is the selected position after pooling
:return: pool_features = [B, N', d] pooled features matrix
"""
feature = tf.squeeze(feature, axis=2)
num_neigh = tf.shape(pool_idx)[-1]
d = feature.get_shape()[-1]
batch_size = tf.shape(pool_idx)[0]
pool_idx = tf.reshape(pool_idx, [batch_size, -1])
pool_features = tf.gather(feature, pool_idx, axis=1, batch_dims=1)
pool_features = tf.reshape(pool_features,
[batch_size, -1, num_neigh, d])
pool_features = tf.reduce_max(pool_features, axis=2, keepdims=True)
return pool_features
@staticmethod
def nearest_interpolation(feature, interp_idx):
"""
:param feature: [B, N, d] input features matrix
:param interp_idx: [B, up_num_points, 1] nearest neighbour index
:return: [B, up_num_points, d] interpolated features matrix
"""
feature = tf.squeeze(feature, axis=2)
batch_size = tf.shape(interp_idx)[0]
up_num_points = tf.shape(interp_idx)[1]
interp_idx = tf.reshape(interp_idx, [batch_size, up_num_points])
interpolatedim_features = tf.gather(feature,
interp_idx,
axis=1,
batch_dims=1)
interpolatedim_features = tf.expand_dims(interpolatedim_features,
axis=2)
return interpolatedim_features
@staticmethod
def gather_neighbour(pc, neighbor_idx):
# gather the coordinates or features of neighboring points
batch_size = tf.shape(pc)[0]
num_points = tf.shape(pc)[1]
d = pc.get_shape()[2].value
index_input = tf.reshape(neighbor_idx, shape=[batch_size, -1])
features = tf.batch_gather(pc, index_input)
features = tf.reshape(
features, [batch_size, num_points,
tf.shape(neighbor_idx)[-1], d])
return features
def get_batch_gen(self, dataset, steps_per_epoch=None, batch_size=1):
cfg = self.cfg
def gen():
n_iters = dataset.num_pc if steps_per_epoch is None else steps_per_epoch * batch_size
for i in range(n_iters):
data, attr = dataset.read_data(i % dataset.num_pc)
pc = data['point'].copy()
label = data['label'].copy()
feat = data['feat'].copy() if data['feat'] is not None else None
tree = data['search_tree']
pick_idx = np.random.choice(len(pc), 1)
center_point = pc[pick_idx, :].reshape(1, -1)
pc, feat, label, _ = trans_crop_pc(pc, feat, label, tree,
pick_idx,
self.cfg.num_points)
t_normalize = cfg.get('t_normalize', {})
pc, feat = trans_normalize(pc, feat, t_normalize)
if attr['split'] in ['training', 'train']:
t_augment = cfg.get('t_augment', None)
pc = trans_augment(pc, t_augment)
if feat is None:
feat = pc.copy()
else:
feat = np.concatenate([pc, feat], axis=1)
assert self.cfg.dim_input == feat.shape[
1], "Wrong feature dimension, please update dim_input(3 + feature_dimension) in config"
yield (pc.astype(np.float32), feat.astype(np.float32),
label.astype(np.float32))
gen_func = gen
gen_types = (tf.float32, tf.float32, tf.int32)
gen_shapes = ([None, 3], [None, cfg.dim_input], [None])
return gen_func, gen_types, gen_shapes
def transform_inference(self, data, min_possibility_idx):
cfg = self.cfg
inputs = dict()
pc = data['point'].copy()
label = data['label'].copy()
feat = data['feat'].copy() if data['feat'] is not None else None
tree = data['search_tree']
pick_idx = min_possibility_idx
center_point = pc[pick_idx, :].reshape(1, -1)
pc, feat, label, selected_idx = trans_crop_pc(pc, feat, label, tree,
pick_idx,
self.cfg.num_points)
dists = np.sum(np.square(pc.astype(np.float32)), axis=1)
delta = np.square(1 - dists / np.max(dists))
self.possibility[selected_idx] += delta
inputs['point_inds'] = selected_idx
t_normalize = cfg.get('t_normalize', {})
pc, feat = trans_normalize(pc, feat, t_normalize)
if feat is None:
feat = pc.copy()
else:
feat = np.concatenate([pc, feat], axis=1)
assert self.cfg.dim_input == feat.shape[
1], "Wrong feature dimension, please update dim_input(3 + feature_dimension) in config"
features = feat
input_points = []
input_neighbors = []
input_pools = []
input_up_samples = []
for i in range(cfg.num_layers):
neighbour_idx = DataProcessing.knn_search(pc, pc, cfg.k_n)
sub_points = pc[:pc.shape[0] // cfg.sub_sampling_ratio[i], :]
pool_i = neighbour_idx[:pc.shape[0] // cfg.sub_sampling_ratio[i], :]
up_i = DataProcessing.knn_search(sub_points, pc, 1)
input_points.append(pc)
input_neighbors.append(neighbour_idx.astype(np.int64))
input_pools.append(pool_i.astype(np.int64))
input_up_samples.append(up_i.astype(np.int64))
pc = sub_points
inputs['xyz'] = input_points
inputs['neigh_idx'] = input_neighbors
inputs['sub_idx'] = input_pools
inputs['interp_idx'] = input_up_samples
inputs['features'] = features
inputs['labels'] = label.astype(np.int64)
return inputs
def transform(self, pc, feat, label):
cfg = self.cfg
pc = pc
feat = feat
input_points = []
input_neighbors = []
input_pools = []
input_up_samples = []
for i in range(cfg.num_layers):
neighbour_idx = tf.numpy_function(DataProcessing.knn_search,
[pc, pc, cfg.k_n], tf.int32)
sub_points = pc[:tf.shape(pc)[0] // cfg.sub_sampling_ratio[i], :]
pool_i = neighbour_idx[:tf.shape(pc)[0] //
cfg.sub_sampling_ratio[i], :]
up_i = tf.numpy_function(DataProcessing.knn_search,
[sub_points, pc, 1], tf.int32)
input_points.append(pc)
input_neighbors.append(neighbour_idx)
input_pools.append(pool_i)
input_up_samples.append(up_i)
pc = sub_points
input_list = input_points + input_neighbors + input_pools + input_up_samples
input_list += [feat, label]
return input_list
def inference_begin(self, data):
self.test_smooth = 0.95
attr = {'split': 'test'}
self.inference_data = self.preprocess(data, attr)
num_points = self.inference_data['search_tree'].data.shape[0]
self.possibility = np.random.rand(num_points) * 1e-3
self.test_probs = np.zeros(shape=[num_points, self.cfg.num_classes],
dtype=np.float16)
self.pbar = tqdm(total=self.possibility.shape[0])
self.pbar_update = 0
def inference_preprocess(self):
min_possibility_idx = np.argmin(self.possibility)
data = self.transform_inference(self.inference_data,
min_possibility_idx)
inputs = {'data': data, 'attr': []}
# inputs = self.batcher.collate_fn([inputs])
self.inference_input = inputs
flat_inputs = data['xyz'] + data['neigh_idx'] + data['sub_idx'] + data[
'interp_idx']
flat_inputs += [data['features'], data['labels']]
for i in range(len(flat_inputs)):
flat_inputs[i] = np.expand_dims(flat_inputs[i], 0)
return flat_inputs
def inference_end(self, results):
inputs = self.inference_input
results = tf.reshape(results, (-1, self.cfg.num_classes))
results = tf.nn.softmax(results, axis=-1)
results = results.cpu().numpy()
probs = np.reshape(results, [-1, self.cfg.num_classes])
inds = inputs['data']['point_inds']
self.test_probs[inds] = self.test_smooth * self.test_probs[inds] + (
1 - self.test_smooth) * probs
self.pbar.update(self.possibility[self.possibility > 0.5].shape[0] -
self.pbar_update)
self.pbar_update = self.possibility[self.possibility > 0.5].shape[0]
if np.min(self.possibility) > 0.5:
self.pbar.close()
reproj_inds = self.inference_data['proj_inds']
self.test_probs = self.test_probs[reproj_inds]
inference_result = {
'predict_labels': np.argmax(self.test_probs, 1),
'predict_scores': self.test_probs
}
self.inference_result = inference_result
return True
else:
return False
def preprocess(self, data, attr):
cfg = self.cfg
points = data['point'][:, 0:3]
if 'label' not in data.keys() or data['label'] is None:
labels = np.zeros((points.shape[0],), dtype=np.int32)
else:
labels = np.array(data['label'], dtype=np.int32).reshape((-1,))
if 'feat' not in data.keys() or data['feat'] is None:
feat = None
else:
feat = np.array(data['feat'], dtype=np.float32)
split = attr['split']
data = dict()
if cfg.get('t_align', False):
points_min = np.expand_dims(points.min(0), 0)
points_min[0, :2] = 0
points = points - points_min
if (feat is None):
sub_points, sub_labels = DataProcessing.grid_subsampling(
points, labels=labels, grid_size=cfg.grid_size)
sub_feat = None
else:
sub_points, sub_feat, sub_labels = DataProcessing.grid_subsampling(
points, features=feat, labels=labels, grid_size=cfg.grid_size)
search_tree = KDTree(sub_points)
data['point'] = sub_points
data['feat'] = sub_feat
data['label'] = sub_labels
data['search_tree'] = search_tree
if split in ["test", "testing"]:
proj_inds = np.squeeze(
search_tree.query(points, return_distance=False))
proj_inds = proj_inds.astype(np.int32)
data['proj_inds'] = proj_inds
return data
MODEL._register_module(RandLANet, 'tf')
| [
"tensorflow.reduce_sum",
"tensorflow.keras.layers.Dense",
"numpy.argmax",
"tensorflow.reshape",
"numpy.argmin",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.numpy_function",
"tensorflow.reduce_max",
"tensorflow.nn.leaky_relu",
"tensorflow.nn.softmax",
"tensorflow.keras.layers.BatchNormalizat... | [((1831, 1882), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['dim_feature'], {'activation': 'None'}), '(dim_feature, activation=None)\n', (1852, 1882), True, 'import tensorflow as tf\n'), ((1918, 1986), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', (['(-1)'], {'momentum': '(0.99)', 'epsilon': '(1e-06)'}), '(-1, momentum=0.99, epsilon=1e-06)\n', (1952, 1986), True, 'import tensorflow as tf\n'), ((2026, 2053), 'tensorflow.keras.layers.LeakyReLU', 'tf.keras.layers.LeakyReLU', ([], {}), '()\n', (2051, 2053), True, 'import tensorflow as tf\n'), ((3190, 3218), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (3213, 3218), True, 'import tensorflow as tf\n'), ((3466, 3507), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['d'], {'activation': 'None'}), '(d, activation=None)\n', (3487, 3507), True, 'import tensorflow as tf\n'), ((4802, 4845), 'tensorflow.reshape', 'tf.reshape', (['neighbor_idx'], {'shape': '[-1, N * K]'}), '(neighbor_idx, shape=[-1, N * K])\n', (4812, 4845), True, 'import tensorflow as tf\n'), ((4866, 4914), 'tensorflow.gather', 'tf.gather', (['pc', 'index_input'], {'axis': '(1)', 'batch_dims': '(1)'}), '(pc, index_input, axis=1, batch_dims=1)\n', (4875, 4914), True, 'import tensorflow as tf\n'), ((4935, 4970), 'tensorflow.reshape', 'tf.reshape', (['features', '[-1, N, K, d]'], {}), '(features, [-1, N, K, d])\n', (4945, 4970), True, 'import tensorflow as tf\n'), ((5262, 5311), 'tensorflow.reshape', 'tf.reshape', (['feature_set'], {'shape': '[-1, num_neigh, d]'}), '(feature_set, shape=[-1, num_neigh, d])\n', (5272, 5311), True, 'import tensorflow as tf\n'), ((5424, 5461), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['att_activation'], {'axis': '(1)'}), '(att_activation, axis=1)\n', (5437, 5461), True, 'import tensorflow as tf\n'), ((5570, 5598), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['f_agg'], {'axis': '(1)'}), '(f_agg, axis=1)\n', (5583, 5598), True, 'import tensorflow as tf\n'), ((5615, 5656), 'tensorflow.reshape', 'tf.reshape', (['f_agg', '[-1, num_points, 1, d]'], {}), '(f_agg, [-1, num_points, 1, d])\n', (5625, 5656), True, 'import tensorflow as tf\n'), ((6250, 6322), 'tensorflow.concat', 'tf.concat', (['[relative_dis, relative_xyz, xyz_tile, neighbor_xyz]'], {'axis': '(-1)'}), '([relative_dis, relative_xyz, xyz_tile, neighbor_xyz], axis=-1)\n', (6259, 6322), True, 'import tensorflow as tf\n'), ((6736, 6777), 'tensorflow.concat', 'tf.concat', (['[f_neighbours, f_xyz]'], {'axis': '(-1)'}), '([f_neighbours, f_xyz], axis=-1)\n', (6745, 6777), True, 'import tensorflow as tf\n'), ((7089, 7130), 'tensorflow.concat', 'tf.concat', (['[f_neighbours, f_xyz]'], {'axis': '(-1)'}), '([f_neighbours, f_xyz], axis=-1)\n', (7098, 7130), True, 'import tensorflow as tf\n'), ((7773, 7806), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['(f_pc + shortcut)'], {}), '(f_pc + shortcut)\n', (7789, 7806), True, 'import tensorflow as tf\n'), ((8418, 8443), 'tensorflow.nn.leaky_relu', 'tf.nn.leaky_relu', (['feature'], {}), '(feature)\n', (8434, 8443), True, 'import tensorflow as tf\n'), ((8462, 8493), 'tensorflow.expand_dims', 'tf.expand_dims', (['feature'], {'axis': '(2)'}), '(feature, axis=2)\n', (8476, 8493), True, 'import tensorflow as tf\n'), ((10240, 10268), 'tensorflow.squeeze', 'tf.squeeze', (['f_layer_fc3', '[2]'], {}), '(f_layer_fc3, [2])\n', (10250, 10268), True, 'import tensorflow as tf\n'), ((10396, 10529), 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', (['cfg_pipeline.adam_lr'], {'decay_steps': '(100000)', 'decay_rate': 'cfg_pipeline.scheduler_gamma'}), '(cfg_pipeline.adam_lr,\n decay_steps=100000, decay_rate=cfg_pipeline.scheduler_gamma)\n', (10442, 10529), True, 'import tensorflow as tf\n'), ((10583, 10634), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'lr_schedule'}), '(learning_rate=lr_schedule)\n', (10607, 10634), True, 'import tensorflow as tf\n'), ((11398, 11425), 'tensorflow.squeeze', 'tf.squeeze', (['feature'], {'axis': '(2)'}), '(feature, axis=2)\n', (11408, 11425), True, 'import tensorflow as tf\n'), ((11567, 11605), 'tensorflow.reshape', 'tf.reshape', (['pool_idx', '[batch_size, -1]'], {}), '(pool_idx, [batch_size, -1])\n', (11577, 11605), True, 'import tensorflow as tf\n'), ((11631, 11681), 'tensorflow.gather', 'tf.gather', (['feature', 'pool_idx'], {'axis': '(1)', 'batch_dims': '(1)'}), '(feature, pool_idx, axis=1, batch_dims=1)\n', (11640, 11681), True, 'import tensorflow as tf\n'), ((11707, 11764), 'tensorflow.reshape', 'tf.reshape', (['pool_features', '[batch_size, -1, num_neigh, d]'], {}), '(pool_features, [batch_size, -1, num_neigh, d])\n', (11717, 11764), True, 'import tensorflow as tf\n'), ((11825, 11876), 'tensorflow.reduce_max', 'tf.reduce_max', (['pool_features'], {'axis': '(2)', 'keepdims': '(True)'}), '(pool_features, axis=2, keepdims=True)\n', (11838, 11876), True, 'import tensorflow as tf\n'), ((12217, 12244), 'tensorflow.squeeze', 'tf.squeeze', (['feature'], {'axis': '(2)'}), '(feature, axis=2)\n', (12227, 12244), True, 'import tensorflow as tf\n'), ((12359, 12410), 'tensorflow.reshape', 'tf.reshape', (['interp_idx', '[batch_size, up_num_points]'], {}), '(interp_idx, [batch_size, up_num_points])\n', (12369, 12410), True, 'import tensorflow as tf\n'), ((12446, 12498), 'tensorflow.gather', 'tf.gather', (['feature', 'interp_idx'], {'axis': '(1)', 'batch_dims': '(1)'}), '(feature, interp_idx, axis=1, batch_dims=1)\n', (12455, 12498), True, 'import tensorflow as tf\n'), ((12665, 12712), 'tensorflow.expand_dims', 'tf.expand_dims', (['interpolatedim_features'], {'axis': '(2)'}), '(interpolatedim_features, axis=2)\n', (12679, 12712), True, 'import tensorflow as tf\n'), ((13063, 13111), 'tensorflow.reshape', 'tf.reshape', (['neighbor_idx'], {'shape': '[batch_size, -1]'}), '(neighbor_idx, shape=[batch_size, -1])\n', (13073, 13111), True, 'import tensorflow as tf\n'), ((13131, 13163), 'tensorflow.batch_gather', 'tf.batch_gather', (['pc', 'index_input'], {}), '(pc, index_input)\n', (13146, 13163), True, 'import tensorflow as tf\n'), ((18694, 18762), 'numpy.zeros', 'np.zeros', ([], {'shape': '[num_points, self.cfg.num_classes]', 'dtype': 'np.float16'}), '(shape=[num_points, self.cfg.num_classes], dtype=np.float16)\n', (18702, 18762), True, 'import numpy as np\n'), ((18818, 18855), 'tqdm.tqdm', 'tqdm', ([], {'total': 'self.possibility.shape[0]'}), '(total=self.possibility.shape[0])\n', (18822, 18855), False, 'from tqdm import tqdm\n'), ((18952, 18979), 'numpy.argmin', 'np.argmin', (['self.possibility'], {}), '(self.possibility)\n', (18961, 18979), True, 'import numpy as np\n'), ((19631, 19678), 'tensorflow.reshape', 'tf.reshape', (['results', '(-1, self.cfg.num_classes)'], {}), '(results, (-1, self.cfg.num_classes))\n', (19641, 19678), True, 'import tensorflow as tf\n'), ((19697, 19728), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['results'], {'axis': '(-1)'}), '(results, axis=-1)\n', (19710, 19728), True, 'import tensorflow as tf\n'), ((19786, 19833), 'numpy.reshape', 'np.reshape', (['results', '[-1, self.cfg.num_classes]'], {}), '(results, [-1, self.cfg.num_classes])\n', (19796, 19833), True, 'import numpy as np\n'), ((21760, 21778), 'sklearn.neighbors.KDTree', 'KDTree', (['sub_points'], {}), '(sub_points)\n', (21766, 21778), False, 'from sklearn.neighbors import KDTree\n'), ((5976, 6003), 'tensorflow.expand_dims', 'tf.expand_dims', (['xyz'], {'axis': '(2)'}), '(xyz, axis=2)\n', (5990, 6003), True, 'import tensorflow as tf\n'), ((6677, 6704), 'tensorflow.squeeze', 'tf.squeeze', (['feature'], {'axis': '(2)'}), '(feature, axis=2)\n', (6687, 6704), True, 'import tensorflow as tf\n'), ((7029, 7057), 'tensorflow.squeeze', 'tf.squeeze', (['f_pc_agg'], {'axis': '(2)'}), '(f_pc_agg, axis=2)\n', (7039, 7057), True, 'import tensorflow as tf\n'), ((9427, 9482), 'tensorflow.concat', 'tf.concat', (['[f_encoder_list[-j - 2], f_interp_i]'], {'axis': '(3)'}), '([f_encoder_list[-j - 2], f_interp_i], axis=3)\n', (9436, 9482), True, 'import tensorflow as tf\n'), ((11446, 11464), 'tensorflow.shape', 'tf.shape', (['pool_idx'], {}), '(pool_idx)\n', (11454, 11464), True, 'import tensorflow as tf\n'), ((11526, 11544), 'tensorflow.shape', 'tf.shape', (['pool_idx'], {}), '(pool_idx)\n', (11534, 11544), True, 'import tensorflow as tf\n'), ((12266, 12286), 'tensorflow.shape', 'tf.shape', (['interp_idx'], {}), '(interp_idx)\n', (12274, 12286), True, 'import tensorflow as tf\n'), ((12314, 12334), 'tensorflow.shape', 'tf.shape', (['interp_idx'], {}), '(interp_idx)\n', (12322, 12334), True, 'import tensorflow as tf\n'), ((12952, 12964), 'tensorflow.shape', 'tf.shape', (['pc'], {}), '(pc)\n', (12960, 12964), True, 'import tensorflow as tf\n'), ((12989, 13001), 'tensorflow.shape', 'tf.shape', (['pc'], {}), '(pc)\n', (12997, 13001), True, 'import tensorflow as tf\n'), ((16116, 16150), 'numpy.concatenate', 'np.concatenate', (['[pc, feat]'], {'axis': '(1)'}), '([pc, feat], axis=1)\n', (16130, 16150), True, 'import numpy as np\n'), ((17581, 17654), 'tensorflow.numpy_function', 'tf.numpy_function', (['DataProcessing.knn_search', '[pc, pc, cfg.k_n]', 'tf.int32'], {}), '(DataProcessing.knn_search, [pc, pc, cfg.k_n], tf.int32)\n', (17598, 17654), True, 'import tensorflow as tf\n'), ((17919, 17994), 'tensorflow.numpy_function', 'tf.numpy_function', (['DataProcessing.knn_search', '[sub_points, pc, 1]', 'tf.int32'], {}), '(DataProcessing.knn_search, [sub_points, pc, 1], tf.int32)\n', (17936, 17994), True, 'import tensorflow as tf\n'), ((18634, 18660), 'numpy.random.rand', 'np.random.rand', (['num_points'], {}), '(num_points)\n', (18648, 18660), True, 'import numpy as np\n'), ((19474, 19507), 'numpy.expand_dims', 'np.expand_dims', (['flat_inputs[i]', '(0)'], {}), '(flat_inputs[i], 0)\n', (19488, 19507), True, 'import numpy as np\n'), ((20207, 20231), 'numpy.min', 'np.min', (['self.possibility'], {}), '(self.possibility)\n', (20213, 20231), True, 'import numpy as np\n'), ((20853, 20897), 'numpy.zeros', 'np.zeros', (['(points.shape[0],)'], {'dtype': 'np.int32'}), '((points.shape[0],), dtype=np.int32)\n', (20861, 20897), True, 'import numpy as np\n'), ((21108, 21148), 'numpy.array', 'np.array', (["data['feat']"], {'dtype': 'np.float32'}), "(data['feat'], dtype=np.float32)\n", (21116, 21148), True, 'import numpy as np\n'), ((6173, 6196), 'tensorflow.square', 'tf.square', (['relative_xyz'], {}), '(relative_xyz)\n', (6182, 6196), True, 'import tensorflow as tf\n'), ((20454, 20483), 'numpy.argmax', 'np.argmax', (['self.test_probs', '(1)'], {}), '(self.test_probs, 1)\n', (20463, 20483), True, 'import numpy as np\n'), ((6039, 6058), 'tensorflow.shape', 'tf.shape', (['neigh_idx'], {}), '(neigh_idx)\n', (6047, 6058), True, 'import tensorflow as tf\n'), ((13265, 13287), 'tensorflow.shape', 'tf.shape', (['neighbor_idx'], {}), '(neighbor_idx)\n', (13273, 13287), True, 'import tensorflow as tf\n'), ((14596, 14630), 'numpy.concatenate', 'np.concatenate', (['[pc, feat]'], {'axis': '(1)'}), '([pc, feat], axis=1)\n', (14610, 14630), True, 'import numpy as np\n'), ((15813, 15826), 'numpy.max', 'np.max', (['dists'], {}), '(dists)\n', (15819, 15826), True, 'import numpy as np\n'), ((20933, 20972), 'numpy.array', 'np.array', (["data['label']"], {'dtype': 'np.int32'}), "(data['label'], dtype=np.int32)\n", (20941, 20972), True, 'import numpy as np\n'), ((17731, 17743), 'tensorflow.shape', 'tf.shape', (['pc'], {}), '(pc)\n', (17739, 17743), True, 'import tensorflow as tf\n'), ((17816, 17828), 'tensorflow.shape', 'tf.shape', (['pc'], {}), '(pc)\n', (17824, 17828), True, 'import tensorflow as tf\n')] |
# https://github.com/qq456cvb/doudizhu-C
import sys
from utils.card import Card, action_space, CardGroup, augment_action_space_onehot60, \
augment_action_space, clamp_action_idx
from utils.utils import get_mask_onehot60
import numpy as np
from config import ENV_DIR
sys.path.insert(0, ENV_DIR)
from env import get_combinations_nosplit, get_combinations_recursive
class Decomposer:
def __init__(self, num_actions=(100, 21)):
self.num_actions = num_actions
def get_combinations(self, curr_cards_char, last_cards_char):
if len(curr_cards_char) > 10:
card_mask = Card.char2onehot60(curr_cards_char).astype(np.uint8)
mask = augment_action_space_onehot60
a = np.expand_dims(1 - card_mask, 0) * mask
invalid_row_idx = set(np.where(a > 0)[0])
if len(last_cards_char) == 0:
invalid_row_idx.add(0)
valid_row_idx = [i for i in range(len(augment_action_space)) if i not in invalid_row_idx]
mask = mask[valid_row_idx, :]
idx_mapping = dict(zip(range(mask.shape[0]), valid_row_idx))
# augment mask
# TODO: known issue: 555444666 will not decompose into 5554 and 66644
combs = get_combinations_nosplit(mask, card_mask)
combs = [([] if len(last_cards_char) == 0 else [0]) + [clamp_action_idx(idx_mapping[idx]) for idx in comb]
for
comb in combs]
if len(last_cards_char) > 0:
idx_must_be_contained = set(
[idx for idx in valid_row_idx if CardGroup.to_cardgroup(augment_action_space[idx]). \
bigger_than(CardGroup.to_cardgroup(last_cards_char))])
combs = [comb for comb in combs if not idx_must_be_contained.isdisjoint(comb)]
fine_mask = np.zeros([len(combs), self.num_actions[1]], dtype=np.bool)
for i in range(len(combs)):
for j in range(len(combs[i])):
if combs[i][j] in idx_must_be_contained:
fine_mask[i][j] = True
else:
fine_mask = None
else:
mask = get_mask_onehot60(curr_cards_char, action_space, None).reshape(len(action_space), 15, 4).sum(
-1).astype(
np.uint8)
valid = mask.sum(-1) > 0
cards_target = Card.char2onehot60(curr_cards_char).reshape(-1, 4).sum(-1).astype(np.uint8)
# do not feed empty to C++, which will cause infinite loop
combs = get_combinations_recursive(mask[valid, :], cards_target)
idx_mapping = dict(zip(range(valid.shape[0]), np.where(valid)[0]))
combs = [([] if len(last_cards_char) == 0 else [0]) + [idx_mapping[idx] for idx in comb] for comb in combs]
if len(last_cards_char) > 0:
valid[0] = True
idx_must_be_contained = set(
[idx for idx in range(len(action_space)) if
valid[idx] and CardGroup.to_cardgroup(action_space[idx]). \
bigger_than(CardGroup.to_cardgroup(last_cards_char))])
combs = [comb for comb in combs if not idx_must_be_contained.isdisjoint(comb)]
fine_mask = np.zeros([len(combs), self.num_actions[1]], dtype=np.bool)
for i in range(len(combs)):
for j in range(len(combs[i])):
if combs[i][j] in idx_must_be_contained:
fine_mask[i][j] = True
else:
fine_mask = None
return combs, fine_mask
| [
"env.get_combinations_recursive",
"utils.card.clamp_action_idx",
"utils.card.CardGroup.to_cardgroup",
"numpy.expand_dims",
"sys.path.insert",
"utils.card.Card.char2onehot60",
"numpy.where",
"utils.utils.get_mask_onehot60",
"env.get_combinations_nosplit"
] | [((271, 298), 'sys.path.insert', 'sys.path.insert', (['(0)', 'ENV_DIR'], {}), '(0, ENV_DIR)\n', (286, 298), False, 'import sys\n'), ((1245, 1286), 'env.get_combinations_nosplit', 'get_combinations_nosplit', (['mask', 'card_mask'], {}), '(mask, card_mask)\n', (1269, 1286), False, 'from env import get_combinations_nosplit, get_combinations_recursive\n'), ((2595, 2651), 'env.get_combinations_recursive', 'get_combinations_recursive', (['mask[valid, :]', 'cards_target'], {}), '(mask[valid, :], cards_target)\n', (2621, 2651), False, 'from env import get_combinations_nosplit, get_combinations_recursive\n'), ((721, 753), 'numpy.expand_dims', 'np.expand_dims', (['(1 - card_mask)', '(0)'], {}), '(1 - card_mask, 0)\n', (735, 753), True, 'import numpy as np\n'), ((603, 638), 'utils.card.Card.char2onehot60', 'Card.char2onehot60', (['curr_cards_char'], {}), '(curr_cards_char)\n', (621, 638), False, 'from utils.card import Card, action_space, CardGroup, augment_action_space_onehot60, augment_action_space, clamp_action_idx\n'), ((795, 810), 'numpy.where', 'np.where', (['(a > 0)'], {}), '(a > 0)\n', (803, 810), True, 'import numpy as np\n'), ((1354, 1388), 'utils.card.clamp_action_idx', 'clamp_action_idx', (['idx_mapping[idx]'], {}), '(idx_mapping[idx])\n', (1370, 1388), False, 'from utils.card import Card, action_space, CardGroup, augment_action_space_onehot60, augment_action_space, clamp_action_idx\n'), ((2710, 2725), 'numpy.where', 'np.where', (['valid'], {}), '(valid)\n', (2718, 2725), True, 'import numpy as np\n'), ((1696, 1735), 'utils.card.CardGroup.to_cardgroup', 'CardGroup.to_cardgroup', (['last_cards_char'], {}), '(last_cards_char)\n', (1718, 1735), False, 'from utils.card import Card, action_space, CardGroup, augment_action_space_onehot60, augment_action_space, clamp_action_idx\n'), ((1607, 1656), 'utils.card.CardGroup.to_cardgroup', 'CardGroup.to_cardgroup', (['augment_action_space[idx]'], {}), '(augment_action_space[idx])\n', (1629, 1656), False, 'from utils.card import Card, action_space, CardGroup, augment_action_space_onehot60, augment_action_space, clamp_action_idx\n'), ((2216, 2270), 'utils.utils.get_mask_onehot60', 'get_mask_onehot60', (['curr_cards_char', 'action_space', 'None'], {}), '(curr_cards_char, action_space, None)\n', (2233, 2270), False, 'from utils.utils import get_mask_onehot60\n'), ((2428, 2463), 'utils.card.Card.char2onehot60', 'Card.char2onehot60', (['curr_cards_char'], {}), '(curr_cards_char)\n', (2446, 2463), False, 'from utils.card import Card, action_space, CardGroup, augment_action_space_onehot60, augment_action_space, clamp_action_idx\n'), ((3153, 3192), 'utils.card.CardGroup.to_cardgroup', 'CardGroup.to_cardgroup', (['last_cards_char'], {}), '(last_cards_char)\n', (3175, 3192), False, 'from utils.card import Card, action_space, CardGroup, augment_action_space_onehot60, augment_action_space, clamp_action_idx\n'), ((3071, 3112), 'utils.card.CardGroup.to_cardgroup', 'CardGroup.to_cardgroup', (['action_space[idx]'], {}), '(action_space[idx])\n', (3093, 3112), False, 'from utils.card import Card, action_space, CardGroup, augment_action_space_onehot60, augment_action_space, clamp_action_idx\n')] |
# Python Standard Libraries
import warnings
import time
import os
import sys
from pathlib import Path
# Third party imports
# fancy prints
import numpy as np
from tqdm import tqdm
# grAdapt package
import grAdapt.utils.math
import grAdapt.utils.misc
import grAdapt.utils.sampling
from grAdapt import surrogate as sur, optimizer as opt, escape as esc
from grAdapt.space.transformer import Transformer
from grAdapt.sampling import initializer as init, equidistributed as equi
class Sequential:
def __init__(self, surrogate=None, optimizer=None, sampling_method=None,
initializer=None, escape=None,
training=None, random_state=1, bounds=None, parameters=None):
"""
Parameters
----------
surrogate : grAdapt Surrogate object
optimizer : grAdapt Optimizer object
sampling_method : Sampling Method to be used. static method from utils
initializer : grAdapt Initializer object
escape : grAdapt Escape object
training : (X, y) with X shape (n, m) and y shape (n,)
random_state : integer
random_state integer sets numpy seed
bounds : list
list of tuples e.g. [(-5, 5), (-5, 5)]
parameters : dict
dictionary of parameter settings
e.g. {'bounds': None, 'n_evals': 'auto', 'eps': 1e-3, 'f_min': -np.inf, 'f_min_eps': 1e-2,
'n_random_starts': 'auto', 'auto_checkpoint': False, 'show_progressbar': True,
'prints': True}
All keys must be included
"""
# seed
self.random_state = random_state
np.random.seed(self.random_state)
# Standard Values
if surrogate is None:
self.surrogate = sur.GPRSlidingWindow()
else:
self.surrogate = surrogate
if optimizer is None:
self.optimizer = opt.AMSGradBisection(surrogate=self.surrogate)
else:
self.optimizer = optimizer
if surrogate is None:
raise Exception('If optimizer is passed, then surrogate must be passed, too.')
if sampling_method is None:
self.sampling_method = equi.MaximalMinDistance()
else:
self.sampling_method = sampling_method
if initializer is None:
self.initializer = init.VerticesForceRandom(sampling_method=self.sampling_method)
else:
self.initializer = initializer
if sampling_method is None:
raise Exception('If initializer is passed, then sampling_method must be passed, too.')
if escape is None:
self.escape = esc.NormalDistributionDecay(surrogate=self.surrogate, sampling_method=self.sampling_method)
else:
self.escape = escape
if surrogate is None or sampling_method is None:
raise Exception('When passing an escape function, surrogate and sampling_method must be passed, too.')
# continue optimizing
self.training = training
if training is not None:
self.X = training[0]
self.y = training[1]
# self.fit(self.X, self.y)
else:
self.X = None
self.y = None
self.bounds = bounds
if parameters is None:
self.parameters = {'bounds': None, 'n_evals': 'auto', 'eps': 1e-3, 'f_min': -np.inf, 'f_min_eps': 1e-2,
'n_random_starts': 'auto', 'auto_checkpoint': False, 'show_progressbar': True,
'prints': True}
else:
self.parameters = parameters
# results
self.res = None
# keep track of checkpoint files
self.checkpoint_file = None
self.auto_checkpoint = False
def warn_n_evals(self):
if self.n_evals <= 0:
raise Exception('Please set n_evals higher higher than 0.')
if self.n_evals <= self.dim:
warnings.warn('n_evals should be higher than the dimension of the problem.')
if self.n_random_starts == 'auto' or not isinstance(self.n_random_starts, (int, float)):
self.n_random_starts = grAdapt.utils.misc.random_starts(self.n_evals, self.dim)
if self.n_evals < self.n_random_starts:
warnings.warn('n_random starts can\'t be higher than n_evals.')
warnings.warn('n_random_starts set automatically. ')
self.n_random_starts = grAdapt.utils.misc.random_starts(self.n_evals, self.dim)
def fit(self, X, y):
"""Fit known points on surrogate model
Parameters
----------
X : array-like (n, m)
y : array (n,)
Returns
-------
None
"""
print('Surrogate model fitting on known points.')
self.surrogate.fit(X, y)
def escape_x_criteria(self, x_train, iteration):
"""Checks whether new point is different than the latest point by the euclidean distance
Checks whether new point is inside the defined search space/bounds.
Returns True if one of the conditions above are fulfilled.
Parameters
----------
x_train : ndarray (n, d)
iteration : integer
Returns
-------
boolean
"""
# x convergence
# escape_convergence = (np.linalg.norm(x_train[iteration - 1] - x_train[iteration])) < self.eps
n_hist = 2
escape_convergence_history = any(
(np.linalg.norm(x_train[iteration - n_hist:] - x_train[iteration], axis=1)) < self.eps)
# check whether point is inside bounds
escape_valid = not (grAdapt.utils.sampling.inside_bounds(self.bounds, x_train[iteration]))
# escape_x = escape_convergence or escape_valid
escape_x = escape_convergence_history or escape_valid
return escape_x
@staticmethod
def escape_y_criteria(y_train, iteration, pct):
"""
Parameters
----------
y_train : array-like (n, d)
iteration : integer
pct : numeric
pct should be less than 1.
Returns
-------
boolean
"""
try:
return grAdapt.utils.misc.is_inside_relative_range(y_train[iteration - 1], y_train[iteration - 2], pct)
except:
return False
def minimize(self, func, bounds, n_evals='auto', eps=1e-3, f_min=-np.inf, f_min_eps=1e-2, n_random_starts='auto',
auto_checkpoint=False, show_progressbar=True, prints=True):
"""Minimize the objective func
Parameters
----------
func : takes ndarray and returns scalar
bounds : list
list of tuples e.g. [(-5, 5), (-5, 5)]
n_evals : int or string
number of max. function evaluations
'auto' : will evaluate func based on the probability of
hitting the optimal solution by coincidence
between 100 and 10000
eps : float
convergence criteria, absolute tolerance
f_min : numeric
if the minimal target value of func is known
can lead to earlier convergence
f_min_eps : numeric
early stop criteria, relative tolerance
n_random_starts : string or int
auto_checkpoint : bool
show_progressbar : bool
prints : bool
Returns
-------
res: dictionary
res['x'] : ndarray (n, d)
res['y'] : ndarray (n,)
res['x_sol'] : solution vector
res['y_sol'] : y solution
res['surrogate'] : grAdapt surrogate object
"""
# print('Optimizing ' + func.__name__)
self.func = Transformer(func, bounds)
# self.bounds = List() # Numba Typed List for performance
# [self.bounds.append((float(x[0]), float(x[1]))) for x in bounds]
self.bounds = bounds
self.n_evals = int(n_evals)
self.eps = eps
self.f_min = f_min
self.f_min_eps = f_min_eps
self.n_random_starts = n_random_starts
self.auto_checkpoint = auto_checkpoint
self.dim = len(bounds)
self.prints = prints
if not self.prints:
sys.stdout = open(os.devnull, 'w')
"""n_evals value based on the probability of finding
the optimal solution by coincidence
"""
if self.n_evals == 'auto':
vol_sphere = grAdapt.utils.math.geometry.volume_hypersphere(len(bounds), self.eps)
vol_rec = grAdapt.utils.math.geometry.volume_hyperrectangle(bounds)
# limit n_evals to 100 and 10000
self.n_evals = max(100, int(vol_rec / vol_sphere))
self.n_evals = min(10000, self.n_evals)
"""Catching errors/displaying warnings related to n_evals and n_random_starts
and automatically set n_random_starts if not given
"""
self.warn_n_evals()
"""checkpoint print directory
"""
if auto_checkpoint:
directory_path = os.getcwd() + '/checkpoints'
print('auto_checkpoint set to True. The training directory is located at\n' + directory_path)
"""Inittialize x_train and y_train
Check whether training can be continued
"""
# TODO: Change number of dimensions for nominal datatype
x_train = np.zeros((self.n_evals, self.dim))
y_train = np.zeros((self.n_evals,)) + np.inf
if self.training is not None:
x_train = np.vstack((self.X, x_train))
y_train = np.hstack((self.y, y_train))
# prevent same sample points in training continuation
self.random_state += 1
np.random.seed(self.random_state)
print('Training data added successfully.')
"""Randomly guess n_random_starts points
"""
print('Sampling {0} random points.'.format(self.n_random_starts))
print('Random function evaluations. This might take a while.')
train_len = len(y_train) - self.n_evals
if train_len == 0:
x_train[train_len:self.n_random_starts + train_len] = \
self.initializer.sample(self.bounds, self.n_random_starts)
y_train[train_len:self.n_random_starts + train_len] = \
np.array(list(map(self.func, tqdm(x_train[train_len:self.n_random_starts + train_len],
total=self.n_evals, leave=False))))
else:
x_train[train_len:self.n_random_starts + train_len] = \
self.sampling_method.sample(self.bounds, self.n_random_starts, x_history=x_train[:train_len])
y_train[train_len:self.n_random_starts + train_len] = \
np.array(list(map(self.func, tqdm(x_train[train_len:self.n_random_starts + train_len],
total=self.n_evals, leave=False))))
print('Finding optimum...')
"""Start from best point
"""
best_idx = np.argmin(y_train[:self.n_random_starts + train_len])
# swap positions
x_train[[best_idx, self.n_random_starts - 1 + train_len]] = \
x_train[[self.n_random_starts - 1 + train_len, best_idx]]
y_train[[best_idx, self.n_random_starts - 1 + train_len]] = \
y_train[[self.n_random_starts - 1 + train_len, best_idx]]
"""Optimizing loop
"""
start_time = time.perf_counter()
pbar = tqdm(total=self.n_evals + train_len, initial=self.n_random_starts + train_len,
disable=not show_progressbar)
for iteration in range(self.n_random_starts + train_len, self.n_evals + train_len):
# print(iteration)
pbar.update() # progressbar update
# Fit data on surrogate model
self.surrogate.fit(x_train[:iteration], y_train[:iteration])
# gradient parameters specific for the surrogate model
surrogate_grad_params = [x_train[:iteration], y_train[:iteration], self.func, bounds]
# print(x_train[iteration-1])
x_train[iteration] = self.optimizer.run(x_train[iteration - 1], grAdapt.utils.misc.epochs(iteration),
surrogate_grad_params)
escape_x_criteria_boolean = self.escape_x_criteria(x_train, iteration)
escape_y_criteria_boolean = self.escape_y_criteria(y_train, iteration, self.f_min_eps)
escape_boolean = escape_x_criteria_boolean or escape_y_criteria_boolean
# sample new point if must escape or bounds not valid
if escape_boolean:
x_train[iteration] = self.escape.get_point(x_train[:iteration], y_train[:iteration], iteration,
self.bounds)
# obtain y_train
y_train[iteration] = self.func(x_train[iteration])
# stop early
if grAdapt.utils.misc.is_inside_relative_range(y_train[iteration], self.f_min, self.f_min_eps):
break
# global variables
self.X = x_train
self.X = y_train
# auto_checkpoint
if auto_checkpoint and time.perf_counter() - start_time >= 60:
self.X = x_train
self.y = y_train
res = self.build_res()
self.save_checkpoint(res)
start_time = time.perf_counter()
# progressbar
pbar.close()
self.X = x_train
self.y = y_train
# save current training data
self.training = (self.X, self.y)
self.res = self.build_res()
if auto_checkpoint:
self.save_checkpoint(self.res)
# restore prints
if not self.prints:
sys.stdout = sys.__stdout__
return self.res
def maximize(self, func, bounds, *args, **kwargs):
"""
Parameters
----------
func : takes ndarray and returns scalar
bounds : list of tuples e.g. [(-5, 5), (-5, 5)]
args : args from minimize
kwargs : args from minimize
Returns
-------
res: dictionary
res['x'] : ndarray (n, d)
res['y'] : ndarray (n,)
res['x_sol'] : solution vector
res['y_sol'] : y solution
res['surrogate'] : grAdapt surrogate object
"""
def f_max(x):
return -func(x)
# x_train, y_train, surrogate = self.minimize(f_max, bounds, *args, **kwargs)
res = self.minimize(f_max, bounds, *args, **kwargs)
res['y'] = -res['y']
res['y_sol'] = -res['y_sol']
# save the right y values
if self.auto_checkpoint:
self.save_checkpoint(res)
return res
def minimize_args(self, func, bounds, *args, **kwargs):
"""If objective f takes multiple inputs instead of a vector
f(x1, x2, x3...)
Parameters
----------
func : takes ndarray and returns scalar
bounds : list of tuples e.g. [(-5, 5), (-5, 5)]
args : args from minimize
kwargs : args from minimize
Returns
-------
res: dictionary
res['x'] : ndarray (n, d)
res['y'] : ndarray (n,)
res['x_sol'] : solution vector
res['y_sol'] : y solution
res['surrogate'] : grAdapt surrogate object
"""
def f_min_args(*args2):
arr = args2[0]
args_list = arr.tolist()
return func(*args_list)
# x_train, y_train, surrogate = self.minimize(f_max, bounds, *args, **kwargs)
res = self.minimize(f_min_args, bounds, *args, **kwargs)
return res
def maximize_args(self, func, bounds, *args, **kwargs):
"""If objective f takes multiple inputs instead of a vector
f(x1, x2, x3...)
Parameters
----------
func : takes ndarray and returns scalar
bounds : list of tuples e.g. [(-5, 5), (-5, 5)]
args : args from minimize
kwargs : args from minimize
Returns
-------
res: dictionary
res['x'] : ndarray (n, d)
res['y'] : ndarray (n,)
res['x_sol'] : solution vector
res['y_sol'] : y solution
res['surrogate'] : grAdapt surrogate object
"""
def f_max(*args2):
return -func(*args2)
# x_train, y_train, surrogate = self.minimize(f_max, bounds, *args, **kwargs)
res = self.minimize_args(f_max, bounds, *args, **kwargs)
res['y'] = -res['y']
res['y_sol'] = -res['y_sol']
# save the right y values
if self.auto_checkpoint:
self.save_checkpoint(res)
return res
def scipy_wrapper_minimize(self, func, x0, bounds=None, tol=None, *args, **kwargs):
"""
@ https://github.com/scipy/scipy/blob/v1.5.3/scipy/optimize/_minimize.py
Parameters
----------
func : callable
The objective function to be minimized.
``fun(x, *args) -> float``
where ``x`` is an 1-D array with shape (d,) and ``args``
is a tuple of the fixed parameters needed to completely
specify the function.
x0 : ndarray, shape (d,)
Initial guess. Array of real elements of size (d,),
bounds : sequence or `Bounds`, optional
There are two ways to specify the bounds:
1. Instance of `Bounds` class.
2. Sequence of ``(min, max)`` pairs for each element in `x`. None
is used to specify no bound.
tol : float, optional
Tolerance for termination. For detailed control, use solver-specific options.
-------
Returns
-------
ndarray (d,)
"""
self.training = (x0.reshape(1, -1), func(x0))
self.X = self.training[0]
self.y = self.training[1]
# self.fit(self.X, self.y)
res = self.minimize(func, bounds, *args, **kwargs)
return res['x_sol']
def build_res(self):
# define output
transformed_input = []
for i in range(self.X.shape[0]):
arguments = np.zeros((self.X.shape[1],), dtype='O')
for j in range(len(arguments)):
try:
arguments[j] = self.bounds[j].transform(self.X[i, j])
except:
arguments[j] = self.X[i, j]
transformed_input.append(arguments)
res = {'x': np.array(transformed_input), 'x_internal': self.X, 'y': self.y,
'x_sol': np.array(transformed_input)[np.argmin(self.y)],
'x_sol_internal': self.X[np.argmin(self.y)], 'y_sol': np.min(self.y),
'surrogate': self.surrogate,
'optimizer': self.optimizer}
return res
def load_checkpoint(self, filename):
"""
Parameters
----------
filename : string to filepath of checkpoint
Returns
-------
None
"""
res = np.load(filename, allow_pickle=True).item()
self.X = res['x']
self.y = res['y']
return res
# self.surrogate = res['surrogate']
def save_checkpoint(self, res):
"""
Parameters
----------
res: dictionary
res['x'] : ndarray (n, d)
res['y'] : ndarray (n,)
res['x_sol'] : solution vector
res['y_sol'] : y solution
res['surrogate'] : grAdapt surrogate object
Returns
-------
None
"""
directory = Path(os.getcwd()) / 'checkpoints'
filename = ('checkpointXY' + time.strftime('%y%b%d-%H%M%S') + '.npy')
filename = directory / filename
if not os.path.exists(directory):
os.makedirs(directory)
# save new file
np.save(filename, res)
print('Checkpoint created in ' + str(filename))
# delete last file
if self.checkpoint_file is not None:
os.remove(self.checkpoint_file)
print('Old checkpoint file {0} deleted.'.format(self.checkpoint_file))
self.checkpoint_file = filename
| [
"os.remove",
"numpy.load",
"numpy.random.seed",
"time.strftime",
"numpy.argmin",
"numpy.linalg.norm",
"grAdapt.escape.NormalDistributionDecay",
"os.path.exists",
"grAdapt.sampling.equidistributed.MaximalMinDistance",
"grAdapt.surrogate.GPRSlidingWindow",
"tqdm.tqdm",
"numpy.save",
"grAdapt.s... | [((1670, 1703), 'numpy.random.seed', 'np.random.seed', (['self.random_state'], {}), '(self.random_state)\n', (1684, 1703), True, 'import numpy as np\n'), ((7791, 7816), 'grAdapt.space.transformer.Transformer', 'Transformer', (['func', 'bounds'], {}), '(func, bounds)\n', (7802, 7816), False, 'from grAdapt.space.transformer import Transformer\n'), ((9443, 9477), 'numpy.zeros', 'np.zeros', (['(self.n_evals, self.dim)'], {}), '((self.n_evals, self.dim))\n', (9451, 9477), True, 'import numpy as np\n'), ((11109, 11162), 'numpy.argmin', 'np.argmin', (['y_train[:self.n_random_starts + train_len]'], {}), '(y_train[:self.n_random_starts + train_len])\n', (11118, 11162), True, 'import numpy as np\n'), ((11530, 11549), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (11547, 11549), False, 'import time\n'), ((11565, 11677), 'tqdm.tqdm', 'tqdm', ([], {'total': '(self.n_evals + train_len)', 'initial': '(self.n_random_starts + train_len)', 'disable': '(not show_progressbar)'}), '(total=self.n_evals + train_len, initial=self.n_random_starts +\n train_len, disable=not show_progressbar)\n', (11569, 11677), False, 'from tqdm import tqdm\n'), ((20094, 20116), 'numpy.save', 'np.save', (['filename', 'res'], {}), '(filename, res)\n', (20101, 20116), True, 'import numpy as np\n'), ((1790, 1812), 'grAdapt.surrogate.GPRSlidingWindow', 'sur.GPRSlidingWindow', ([], {}), '()\n', (1810, 1812), True, 'from grAdapt import surrogate as sur, optimizer as opt, escape as esc\n'), ((1926, 1972), 'grAdapt.optimizer.AMSGradBisection', 'opt.AMSGradBisection', ([], {'surrogate': 'self.surrogate'}), '(surrogate=self.surrogate)\n', (1946, 1972), True, 'from grAdapt import surrogate as sur, optimizer as opt, escape as esc\n'), ((2227, 2252), 'grAdapt.sampling.equidistributed.MaximalMinDistance', 'equi.MaximalMinDistance', ([], {}), '()\n', (2250, 2252), True, 'from grAdapt.sampling import initializer as init, equidistributed as equi\n'), ((2382, 2444), 'grAdapt.sampling.initializer.VerticesForceRandom', 'init.VerticesForceRandom', ([], {'sampling_method': 'self.sampling_method'}), '(sampling_method=self.sampling_method)\n', (2406, 2444), True, 'from grAdapt.sampling import initializer as init, equidistributed as equi\n'), ((2699, 2795), 'grAdapt.escape.NormalDistributionDecay', 'esc.NormalDistributionDecay', ([], {'surrogate': 'self.surrogate', 'sampling_method': 'self.sampling_method'}), '(surrogate=self.surrogate, sampling_method=self.\n sampling_method)\n', (2726, 2795), True, 'from grAdapt import surrogate as sur, optimizer as opt, escape as esc\n'), ((4014, 4090), 'warnings.warn', 'warnings.warn', (['"""n_evals should be higher than the dimension of the problem."""'], {}), "('n_evals should be higher than the dimension of the problem.')\n", (4027, 4090), False, 'import warnings\n'), ((4342, 4404), 'warnings.warn', 'warnings.warn', (['"""n_random starts can\'t be higher than n_evals."""'], {}), '("n_random starts can\'t be higher than n_evals.")\n', (4355, 4404), False, 'import warnings\n'), ((4418, 4470), 'warnings.warn', 'warnings.warn', (['"""n_random_starts set automatically. """'], {}), "('n_random_starts set automatically. ')\n", (4431, 4470), False, 'import warnings\n'), ((9496, 9521), 'numpy.zeros', 'np.zeros', (['(self.n_evals,)'], {}), '((self.n_evals,))\n', (9504, 9521), True, 'import numpy as np\n'), ((9591, 9619), 'numpy.vstack', 'np.vstack', (['(self.X, x_train)'], {}), '((self.X, x_train))\n', (9600, 9619), True, 'import numpy as np\n'), ((9642, 9670), 'numpy.hstack', 'np.hstack', (['(self.y, y_train)'], {}), '((self.y, y_train))\n', (9651, 9670), True, 'import numpy as np\n'), ((9785, 9818), 'numpy.random.seed', 'np.random.seed', (['self.random_state'], {}), '(self.random_state)\n', (9799, 9818), True, 'import numpy as np\n'), ((18400, 18439), 'numpy.zeros', 'np.zeros', (['(self.X.shape[1],)'], {'dtype': '"""O"""'}), "((self.X.shape[1],), dtype='O')\n", (18408, 18439), True, 'import numpy as np\n'), ((18721, 18748), 'numpy.array', 'np.array', (['transformed_input'], {}), '(transformed_input)\n', (18729, 18748), True, 'import numpy as np\n'), ((18926, 18940), 'numpy.min', 'np.min', (['self.y'], {}), '(self.y)\n', (18932, 18940), True, 'import numpy as np\n'), ((19999, 20024), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (20013, 20024), False, 'import os\n'), ((20038, 20060), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (20049, 20060), False, 'import os\n'), ((20258, 20289), 'os.remove', 'os.remove', (['self.checkpoint_file'], {}), '(self.checkpoint_file)\n', (20267, 20289), False, 'import os\n'), ((5535, 5608), 'numpy.linalg.norm', 'np.linalg.norm', (['(x_train[iteration - n_hist:] - x_train[iteration])'], {'axis': '(1)'}), '(x_train[iteration - n_hist:] - x_train[iteration], axis=1)\n', (5549, 5608), True, 'import numpy as np\n'), ((9121, 9132), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9130, 9132), False, 'import os\n'), ((13548, 13567), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (13565, 13567), False, 'import time\n'), ((18809, 18836), 'numpy.array', 'np.array', (['transformed_input'], {}), '(transformed_input)\n', (18817, 18836), True, 'import numpy as np\n'), ((18837, 18854), 'numpy.argmin', 'np.argmin', (['self.y'], {}), '(self.y)\n', (18846, 18854), True, 'import numpy as np\n'), ((18897, 18914), 'numpy.argmin', 'np.argmin', (['self.y'], {}), '(self.y)\n', (18906, 18914), True, 'import numpy as np\n'), ((19268, 19304), 'numpy.load', 'np.load', (['filename'], {'allow_pickle': '(True)'}), '(filename, allow_pickle=True)\n', (19275, 19304), True, 'import numpy as np\n'), ((19836, 19847), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (19845, 19847), False, 'import os\n'), ((19902, 19932), 'time.strftime', 'time.strftime', (['"""%y%b%d-%H%M%S"""'], {}), "('%y%b%d-%H%M%S')\n", (19915, 19932), False, 'import time\n'), ((10413, 10508), 'tqdm.tqdm', 'tqdm', (['x_train[train_len:self.n_random_starts + train_len]'], {'total': 'self.n_evals', 'leave': '(False)'}), '(x_train[train_len:self.n_random_starts + train_len], total=self.\n n_evals, leave=False)\n', (10417, 10508), False, 'from tqdm import tqdm\n'), ((10862, 10957), 'tqdm.tqdm', 'tqdm', (['x_train[train_len:self.n_random_starts + train_len]'], {'total': 'self.n_evals', 'leave': '(False)'}), '(x_train[train_len:self.n_random_starts + train_len], total=self.\n n_evals, leave=False)\n', (10866, 10957), False, 'from tqdm import tqdm\n'), ((13332, 13351), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (13349, 13351), False, 'import time\n')] |
# coding: utf-8
from __future__ import unicode_literals
import numpy
import tempfile
import shutil
import contextlib
import srsly
from pathlib import Path
from spacy.tokens import Doc, Span
from spacy.attrs import POS, HEAD, DEP
from spacy.compat import path2str
@contextlib.contextmanager
def make_tempfile(mode="r"):
f = tempfile.TemporaryFile(mode=mode)
yield f
f.close()
@contextlib.contextmanager
def make_tempdir():
d = Path(tempfile.mkdtemp())
yield d
shutil.rmtree(path2str(d))
def get_doc(vocab, words=[], pos=None, heads=None, deps=None, tags=None, ents=None):
"""Create Doc object from given vocab, words and annotations."""
pos = pos or [""] * len(words)
tags = tags or [""] * len(words)
heads = heads or [0] * len(words)
deps = deps or [""] * len(words)
for value in deps + tags + pos:
vocab.strings.add(value)
doc = Doc(vocab, words=words)
attrs = doc.to_array([POS, HEAD, DEP])
for i, (p, head, dep) in enumerate(zip(pos, heads, deps)):
attrs[i, 0] = doc.vocab.strings[p]
attrs[i, 1] = head
attrs[i, 2] = doc.vocab.strings[dep]
doc.from_array([POS, HEAD, DEP], attrs)
if ents:
doc.ents = [
Span(doc, start, end, label=doc.vocab.strings[label])
for start, end, label in ents
]
if tags:
for token in doc:
token.tag_ = tags[token.i]
return doc
def apply_transition_sequence(parser, doc, sequence):
"""Perform a series of pre-specified transitions, to put the parser in a
desired state."""
for action_name in sequence:
if "-" in action_name:
move, label = action_name.split("-")
parser.add_label(label)
with parser.step_through(doc) as stepwise:
for transition in sequence:
stepwise.transition(transition)
def add_vecs_to_vocab(vocab, vectors):
"""Add list of vector tuples to given vocab. All vectors need to have the
same length. Format: [("text", [1, 2, 3])]"""
length = len(vectors[0][1])
vocab.reset_vectors(width=length)
for word, vec in vectors:
vocab.set_vector(word, vector=vec)
return vocab
def get_cosine(vec1, vec2):
"""Get cosine for two given vectors"""
return numpy.dot(vec1, vec2) / (numpy.linalg.norm(vec1) * numpy.linalg.norm(vec2))
def assert_docs_equal(doc1, doc2):
"""Compare two Doc objects and assert that they're equal. Tests for tokens,
tags, dependencies and entities."""
assert [t.orth for t in doc1] == [t.orth for t in doc2]
assert [t.pos for t in doc1] == [t.pos for t in doc2]
assert [t.tag for t in doc1] == [t.tag for t in doc2]
assert [t.head.i for t in doc1] == [t.head.i for t in doc2]
assert [t.dep for t in doc1] == [t.dep for t in doc2]
if doc1.is_parsed and doc2.is_parsed:
assert [s for s in doc1.sents] == [s for s in doc2.sents]
assert [t.ent_type for t in doc1] == [t.ent_type for t in doc2]
assert [t.ent_iob for t in doc1] == [t.ent_iob for t in doc2]
for ent1, ent2 in zip(doc1.ents, doc2.ents):
assert ent1.start == ent2.start
assert ent1.end == ent2.end
assert ent1.label == ent2.label
assert ent1.kb_id == ent2.kb_id
def assert_packed_msg_equal(b1, b2):
"""Assert that two packed msgpack messages are equal."""
msg1 = srsly.msgpack_loads(b1)
msg2 = srsly.msgpack_loads(b2)
assert sorted(msg1.keys()) == sorted(msg2.keys())
for (k1, v1), (k2, v2) in zip(sorted(msg1.items()), sorted(msg2.items())):
assert k1 == k2
assert v1 == v2
| [
"spacy.tokens.Doc",
"spacy.compat.path2str",
"spacy.tokens.Span",
"tempfile.TemporaryFile",
"tempfile.mkdtemp",
"numpy.linalg.norm",
"numpy.dot",
"srsly.msgpack_loads"
] | [((330, 363), 'tempfile.TemporaryFile', 'tempfile.TemporaryFile', ([], {'mode': 'mode'}), '(mode=mode)\n', (352, 363), False, 'import tempfile\n'), ((898, 921), 'spacy.tokens.Doc', 'Doc', (['vocab'], {'words': 'words'}), '(vocab, words=words)\n', (901, 921), False, 'from spacy.tokens import Doc, Span\n'), ((3368, 3391), 'srsly.msgpack_loads', 'srsly.msgpack_loads', (['b1'], {}), '(b1)\n', (3387, 3391), False, 'import srsly\n'), ((3403, 3426), 'srsly.msgpack_loads', 'srsly.msgpack_loads', (['b2'], {}), '(b2)\n', (3422, 3426), False, 'import srsly\n'), ((452, 470), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (468, 470), False, 'import tempfile\n'), ((502, 513), 'spacy.compat.path2str', 'path2str', (['d'], {}), '(d)\n', (510, 513), False, 'from spacy.compat import path2str\n'), ((2276, 2297), 'numpy.dot', 'numpy.dot', (['vec1', 'vec2'], {}), '(vec1, vec2)\n', (2285, 2297), False, 'import numpy\n'), ((1233, 1286), 'spacy.tokens.Span', 'Span', (['doc', 'start', 'end'], {'label': 'doc.vocab.strings[label]'}), '(doc, start, end, label=doc.vocab.strings[label])\n', (1237, 1286), False, 'from spacy.tokens import Doc, Span\n'), ((2301, 2324), 'numpy.linalg.norm', 'numpy.linalg.norm', (['vec1'], {}), '(vec1)\n', (2318, 2324), False, 'import numpy\n'), ((2327, 2350), 'numpy.linalg.norm', 'numpy.linalg.norm', (['vec2'], {}), '(vec2)\n', (2344, 2350), False, 'import numpy\n')] |
# 多项式拟合
import numpy as np
import matplotlib.pyplot as plt
# 定义x,y的散点坐标
# lat=[23.0262148,22.90151,22.786518,22.763319,22.707455,22.643107,23.43427,23.29968,23.185855,22.4337616,19.4386,18.7329,13.9486,11.9237,12.8544,20.2995,
# 25.1499,16.0203,19.5012,22.5281,28.3964,21.0806,26.2406,
# 25.8036,23.1643,16.2186,17.8153,21.3750
# ]
# lon=[113.930326,114.17249,114.3903805,114.97676,115.468780,116.082854,117.74620,119.778955,123.889953,134.725895,147.2406,166.3881,176.2856,-169.8227,-148.9735,-119.4781,
# -95.2608,-79.0345,-52.5548,-16.0017,15.3057,27.6257,37.0373,
# 48.8891,59.6118,75.1684,91.9555,109.7094
# ]
lon=[-169.8046,-163.4765,-157.5,-156.6210,-120.5859,-85.9570,-67.3242,-37.4414,-2.8125,58.7109,79.1015,96.3281,113.6266,113.8753,121.4648,165.4101]
lat=[24.0464,22.7559,23.2413,7.7109,5.4410,18.9790,29.5352,28.4590,35.0299,20.1384,15.7922,16.4676,21.8003,22.8711,25.1651,27.6835]
x=np.array(lon)
y=np.array(lat)
# 使用3次多项式拟合
f1 =np.polyfit(x,y,3)
p1 =np.poly1d(f1)
print(p1)
#拟合y值
yvals=p1(x)
# 绘图
# plot1=plt.plot(x,y,'s',label='original values')
# plot2=plt.plot(x,yvals,'r',label='polyfit values')
# plt.xlabel('x')
# plt.ylabel('y')
# plt.legend(loc=4)# 指定legend的位置右下角
# plt.title('polyfitting')
plt.plot(x,y)
plt.show()
plt.savefig('test.png')
| [
"numpy.poly1d",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.polyfit",
"numpy.array",
"matplotlib.pyplot.savefig"
] | [((900, 913), 'numpy.array', 'np.array', (['lon'], {}), '(lon)\n', (908, 913), True, 'import numpy as np\n'), ((916, 929), 'numpy.array', 'np.array', (['lat'], {}), '(lat)\n', (924, 929), True, 'import numpy as np\n'), ((948, 967), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(3)'], {}), '(x, y, 3)\n', (958, 967), True, 'import numpy as np\n'), ((970, 983), 'numpy.poly1d', 'np.poly1d', (['f1'], {}), '(f1)\n', (979, 983), True, 'import numpy as np\n'), ((1222, 1236), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (1230, 1236), True, 'import matplotlib.pyplot as plt\n'), ((1236, 1246), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1244, 1246), True, 'import matplotlib.pyplot as plt\n'), ((1247, 1270), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""test.png"""'], {}), "('test.png')\n", (1258, 1270), True, 'import matplotlib.pyplot as plt\n')] |
"""
Warping Invariant GML Regression using SRSF
moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import fdasrsf as fs
import fdasrsf.utility_functions as uf
from patsy import bs
from scipy.optimize import minimize
from numpy.random import rand
from joblib import Parallel, delayed
class elastic_glm_regression:
"""
This class provides elastic glm regression for functional data using the
SRVF framework accounting for warping
Usage: obj = elastic_glm_regression(f,y,time)
:param f: (M,N) % matrix defining N functions of M samples
:param y: response vector of length N
:param time: time vector of length M
:param alpha: intercept
:param b: coefficient vector
:param B: basis matrix
:param lambda: regularization parameter
:param SSE: sum of squared errors
Author : <NAME> (JDT) <jdtuck AT sandia.gov>
Date : 18-Mar-2018
"""
def __init__(self, f, y, time):
"""
Construct an instance of the elastic_glm_regression class
:param f: numpy ndarray of shape (M,N) of N functions with M samples
:param y: response vector
:param time: vector of size M describing the sample points
"""
a = time.shape[0]
if f.shape[0] != a:
raise Exception('Columns of f and time must be equal')
self.f = f
self.y = y
self.time = time
def calc_model(self, link='linear', B=None, lam=0, df=20, max_itr=20, smooth_data=False, sparam=25, parallel=False):
"""
This function identifies a regression model with phase-variability
using elastic pca
:param link: string of link function ('linear', 'quadratic', 'cubic')
:param B: optional matrix describing Basis elements
:param lam: regularization parameter (default 0)
:param df: number of degrees of freedom B-spline (default 20)
:param max_itr: maximum number of iterations (default 20)
:param smooth_data: smooth data using box filter (default = F)
:param sparam: number of times to apply box filter (default = 25)
:param parallel: run in parallel (default = F)
"""
if smooth_data:
self.f = fs.smooth_data(self.f,sparam)
print("Link: %s" % link)
print("Lambda: %5.1f" % lam)
self.lam = lam
self.link = link
# Create B-Spline Basis if none provided
if B is None:
B = bs(self.time, df=df, degree=4, include_intercept=True)
Nb = B.shape[1]
self.B = B
n = self.f.shape[1]
print("Initializing")
b0 = rand(Nb+1)
out = minimize(MyLogLikelihoodFn, b0, args=(self.y,self.B,self.time,self.f,parallel), method="SLSQP")
a = out.x
if self.link == 'linear':
h1, c_hat, cost = Amplitude_Index(self.f, self.time, self.B, self.y, max_itr, a, 1, parallel)
yhat1 = c_hat[0] + MapC_to_y(n,c_hat[1:],self.B,self.time,self.f,parallel)
yhat = np.polyval(h1,yhat1)
elif self.link == 'quadratic':
h1, c_hat, cost = Amplitude_Index(self.f, self.time, self.B, self.y, max_itr, a, 2, parallel)
yhat1 = c_hat[0] + MapC_to_y(n,c_hat[1:],self.B,self.time,self.f,parallel)
yhat = np.polyval(h1,yhat1)
elif self.link == 'cubic':
h1, c_hat, cost = Amplitude_Index(self.f, self.time, self.B, self.y, max_itr, a, 3, parallel)
yhat1 = c_hat[0] + MapC_to_y(n,c_hat[1:],self.B,self.time,self.f,parallel)
yhat = np.polyval(h1,yhat1)
else:
raise Exception('Invalid Link')
tmp = (self.y-yhat)**2
self.SSE = tmp.sum()
self.h = h1
self.alpha = c_hat[0]
self.b = c_hat[1:]
return
def predict(self, newdata=None, parallel=True):
"""
This function performs prediction on regression model on new data if available or current stored data in object
Usage: obj.predict()
obj.predict(newdata)
:param newdata: dict containing new data for prediction (needs the keys below, if None predicts on training data)
:type newdata: dict
:param f: (M,N) matrix of functions
:param time: vector of time points
:param y: truth if available
:param smooth: smooth data if needed
:param sparam: number of times to run filter
"""
if newdata != None:
f = newdata['f']
time = newdata['time']
y = newdata['y']
sparam = newdata['sparam']
if newdata['smooth']:
f = fs.smooth_data(f,sparam)
n = f.shape[1]
yhat1 = self.alpha + MapC_to_y(n,self.b,self.B,time,f,parallel)
yhat = np.polyval(self.h,yhat1)
if y is None:
self.SSE = np.nan
else:
self.SSE = np.sum((y-yhat)**2)
self.y_pred = yhat
else:
n = self.f.shape[1]
yhat1 = self.alpha + MapC_to_y(n,self.b,self.B,self.time,self.f,parallel)
yhat = np.polyval(self.h,yhat1)
self.SSE = np.sum((self.y-yhat)**2)
self.y_pred = yhat
return
def Amplitude_Index(f, t, B, y0, MaxIter, b, link, parallel):
J = B.shape[1]
n = f.shape[1]
c_hat = b
cost = 10000
itr = 0
while itr < MaxIter:
itr += 1
print("updating step: iter=%d" % (itr))
y = c_hat[0] + MapC_to_y(n,c_hat[1:],B,t,f,parallel)
h = np.polyfit(y, y0, link)
b0 = rand(J+1)
out = minimize(MyLogLikelihoodFn2, b0, args=(y0,B,t,f,h,parallel), method="SLSQP")
if cost > out.fun:
cost = out.fun
c_hat = out.x
else:
c_hat = out.x
break
return(h,c_hat,cost)
def MyLogLikelihoodFn2(c, y0, B, t, f, h, parallel):
N = f.shape[1]
J = c.shape[0]
y = c[0] + MapC_to_y(N,c[1:],B,t,f,parallel)
tmp = np.polyval(h,y)
x = (y0-tmp)*(y0-tmp)
return(x.sum())
def MyLogLikelihoodFn(c, y0, B, t, f, parallel):
N = f.shape[1]
J = c.shape[0]
y = c[0] + MapC_to_y(N,c[1:],B,t,f,parallel)
x = (y0-y)*(y0-y)
return(x.sum())
def MapC_to_y(n,c,B,t,f,parallel):
dt = np.diff(t)
dt = dt.mean()
y = np.zeros(n)
if parallel:
bet = np.dot(B,c)
q1 = uf.f_to_srsf(bet, t)
y = Parallel(n_jobs=-1)(delayed(map_driver)(q1,
f[:,k], bet, t, dt) for k in range(n))
else:
for k in range(0,n):
bet = np.dot(B,c)
q1 = uf.f_to_srsf(bet, t)
q2 = uf.f_to_srsf(f[:,k], t)
gam = uf.optimum_reparam(q1,t,q2)
fn = uf.warp_f_gamma(t, f[:,k], gam)
tmp = bet*fn
y[k] = tmp.sum()*dt
return(y)
def map_driver(q1, f, bet, t, dt):
q2 = uf.f_to_srsf(f, t)
gam = uf.optimum_reparam(q1,t,q2)
fn = uf.warp_f_gamma(t, f, gam)
tmp = bet*fn
y = tmp.sum()*dt
return y
| [
"fdasrsf.utility_functions.warp_f_gamma",
"scipy.optimize.minimize",
"numpy.sum",
"numpy.polyfit",
"numpy.polyval",
"fdasrsf.utility_functions.f_to_srsf",
"numpy.zeros",
"numpy.diff",
"patsy.bs",
"joblib.Parallel",
"numpy.random.rand",
"numpy.dot",
"joblib.delayed",
"fdasrsf.utility_functi... | [((6049, 6065), 'numpy.polyval', 'np.polyval', (['h', 'y'], {}), '(h, y)\n', (6059, 6065), True, 'import numpy as np\n'), ((6347, 6357), 'numpy.diff', 'np.diff', (['t'], {}), '(t)\n', (6354, 6357), True, 'import numpy as np\n'), ((6386, 6397), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (6394, 6397), True, 'import numpy as np\n'), ((6979, 6997), 'fdasrsf.utility_functions.f_to_srsf', 'uf.f_to_srsf', (['f', 't'], {}), '(f, t)\n', (6991, 6997), True, 'import fdasrsf.utility_functions as uf\n'), ((7008, 7037), 'fdasrsf.utility_functions.optimum_reparam', 'uf.optimum_reparam', (['q1', 't', 'q2'], {}), '(q1, t, q2)\n', (7026, 7037), True, 'import fdasrsf.utility_functions as uf\n'), ((7045, 7071), 'fdasrsf.utility_functions.warp_f_gamma', 'uf.warp_f_gamma', (['t', 'f', 'gam'], {}), '(t, f, gam)\n', (7060, 7071), True, 'import fdasrsf.utility_functions as uf\n'), ((2642, 2654), 'numpy.random.rand', 'rand', (['(Nb + 1)'], {}), '(Nb + 1)\n', (2646, 2654), False, 'from numpy.random import rand\n'), ((2675, 2778), 'scipy.optimize.minimize', 'minimize', (['MyLogLikelihoodFn', 'b0'], {'args': '(self.y, self.B, self.time, self.f, parallel)', 'method': '"""SLSQP"""'}), "(MyLogLikelihoodFn, b0, args=(self.y, self.B, self.time, self.f,\n parallel), method='SLSQP')\n", (2683, 2778), False, 'from scipy.optimize import minimize\n'), ((5594, 5617), 'numpy.polyfit', 'np.polyfit', (['y', 'y0', 'link'], {}), '(y, y0, link)\n', (5604, 5617), True, 'import numpy as np\n'), ((5631, 5642), 'numpy.random.rand', 'rand', (['(J + 1)'], {}), '(J + 1)\n', (5635, 5642), False, 'from numpy.random import rand\n'), ((5655, 5741), 'scipy.optimize.minimize', 'minimize', (['MyLogLikelihoodFn2', 'b0'], {'args': '(y0, B, t, f, h, parallel)', 'method': '"""SLSQP"""'}), "(MyLogLikelihoodFn2, b0, args=(y0, B, t, f, h, parallel), method=\n 'SLSQP')\n", (5663, 5741), False, 'from scipy.optimize import minimize\n'), ((6430, 6442), 'numpy.dot', 'np.dot', (['B', 'c'], {}), '(B, c)\n', (6436, 6442), True, 'import numpy as np\n'), ((6455, 6475), 'fdasrsf.utility_functions.f_to_srsf', 'uf.f_to_srsf', (['bet', 't'], {}), '(bet, t)\n', (6467, 6475), True, 'import fdasrsf.utility_functions as uf\n'), ((2225, 2255), 'fdasrsf.smooth_data', 'fs.smooth_data', (['self.f', 'sparam'], {}), '(self.f, sparam)\n', (2239, 2255), True, 'import fdasrsf as fs\n'), ((2471, 2525), 'patsy.bs', 'bs', (['self.time'], {'df': 'df', 'degree': '(4)', 'include_intercept': '(True)'}), '(self.time, df=df, degree=4, include_intercept=True)\n', (2473, 2525), False, 'from patsy import bs\n'), ((3037, 3058), 'numpy.polyval', 'np.polyval', (['h1', 'yhat1'], {}), '(h1, yhat1)\n', (3047, 3058), True, 'import numpy as np\n'), ((4818, 4843), 'numpy.polyval', 'np.polyval', (['self.h', 'yhat1'], {}), '(self.h, yhat1)\n', (4828, 4843), True, 'import numpy as np\n'), ((5165, 5190), 'numpy.polyval', 'np.polyval', (['self.h', 'yhat1'], {}), '(self.h, yhat1)\n', (5175, 5190), True, 'import numpy as np\n'), ((5213, 5241), 'numpy.sum', 'np.sum', (['((self.y - yhat) ** 2)'], {}), '((self.y - yhat) ** 2)\n', (5219, 5241), True, 'import numpy as np\n'), ((6488, 6507), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)'}), '(n_jobs=-1)\n', (6496, 6507), False, 'from joblib import Parallel, delayed\n'), ((6672, 6684), 'numpy.dot', 'np.dot', (['B', 'c'], {}), '(B, c)\n', (6678, 6684), True, 'import numpy as np\n'), ((6701, 6721), 'fdasrsf.utility_functions.f_to_srsf', 'uf.f_to_srsf', (['bet', 't'], {}), '(bet, t)\n', (6713, 6721), True, 'import fdasrsf.utility_functions as uf\n'), ((6739, 6763), 'fdasrsf.utility_functions.f_to_srsf', 'uf.f_to_srsf', (['f[:, k]', 't'], {}), '(f[:, k], t)\n', (6751, 6763), True, 'import fdasrsf.utility_functions as uf\n'), ((6781, 6810), 'fdasrsf.utility_functions.optimum_reparam', 'uf.optimum_reparam', (['q1', 't', 'q2'], {}), '(q1, t, q2)\n', (6799, 6810), True, 'import fdasrsf.utility_functions as uf\n'), ((6826, 6858), 'fdasrsf.utility_functions.warp_f_gamma', 'uf.warp_f_gamma', (['t', 'f[:, k]', 'gam'], {}), '(t, f[:, k], gam)\n', (6841, 6858), True, 'import fdasrsf.utility_functions as uf\n'), ((3309, 3330), 'numpy.polyval', 'np.polyval', (['h1', 'yhat1'], {}), '(h1, yhat1)\n', (3319, 3330), True, 'import numpy as np\n'), ((4670, 4695), 'fdasrsf.smooth_data', 'fs.smooth_data', (['f', 'sparam'], {}), '(f, sparam)\n', (4684, 4695), True, 'import fdasrsf as fs\n'), ((4949, 4972), 'numpy.sum', 'np.sum', (['((y - yhat) ** 2)'], {}), '((y - yhat) ** 2)\n', (4955, 4972), True, 'import numpy as np\n'), ((3577, 3598), 'numpy.polyval', 'np.polyval', (['h1', 'yhat1'], {}), '(h1, yhat1)\n', (3587, 3598), True, 'import numpy as np\n'), ((6508, 6527), 'joblib.delayed', 'delayed', (['map_driver'], {}), '(map_driver)\n', (6515, 6527), False, 'from joblib import Parallel, delayed\n')] |
if __name__ == "__main__":
from movement import Movement
from typing import List, Tuple
import numpy as np
def cmToInches(cm: float) -> float:
return cm / 2.54
def euclidianDistance(x1: float, y1: float, x2: float, y2: float) -> float:
# return np.linalg.norm([x1 - x2, y1 - y2]) # v1
return (x1 - x2)**2 + (y1 - y2)**2 # v2
def getDirectionVectorFromAngleAndLength(angle: float, length: float) -> Tuple[float, float]:
return np.array([length * np.cos(angle), length * np.sin(angle)])
def printYaw(yawInRad: float) -> None:
yawInDeg = np.rad2deg(yawInRad)
print(f"Yaw = {yawInDeg}")
def printDistance(distance: float, text:str="Distance", unit:str="m") -> bool:
print(f"{text} = {round(distance, 2)}{unit}")
def reverse_insort(movements: List["Movement"], newMovement: "Movement", lo:int=0, hi:int=None) -> None:
"""Insert item newMovement in list movements, and keep it reverse-sorted assuming a
is reverse-sorted.
If newMovement is already in movements, insert it to the right of the rightmost newMovement.
Optional args lo (default 0) and hi (default len(movements)) bound the
slice of a to be searched.
"""
numMovements = len(movements)
if lo < 0:
raise ValueError('lo must be non-negative')
if hi is None:
hi = numMovements
while lo < hi:
mid = (lo+hi)//2
if newMovement.newDistanceToGoal > movements[mid].newDistanceToGoal: hi = mid
else: lo = mid+1
movements.insert(lo, newMovement) | [
"numpy.rad2deg",
"numpy.sin",
"numpy.cos"
] | [((560, 580), 'numpy.rad2deg', 'np.rad2deg', (['yawInRad'], {}), '(yawInRad)\n', (570, 580), True, 'import numpy as np\n'), ((466, 479), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (472, 479), True, 'import numpy as np\n'), ((490, 503), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (496, 503), True, 'import numpy as np\n')] |
import ellipse_lib as el
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
data = el.make_test_ellipse()
lsqe = el.LSqEllipse()
lsqe.fit(data)
center, width, height, phi = lsqe.parameters()
plt.close('all')
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
ax.axis('equal')
ax.plot(data[0], data[1], 'ro', label='test data', zorder=1)
ellipse = Ellipse(xy=center, width=2*width, height=2*height, angle=np.rad2deg(phi),
edgecolor='b', fc='None', lw=2, label='Fit', zorder = 2)
ax.add_patch(ellipse)
plt.legend()
plt.show()
| [
"matplotlib.pyplot.show",
"ellipse_lib.LSqEllipse",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"numpy.rad2deg",
"matplotlib.pyplot.figure",
"ellipse_lib.make_test_ellipse"
] | [((123, 145), 'ellipse_lib.make_test_ellipse', 'el.make_test_ellipse', ([], {}), '()\n', (143, 145), True, 'import ellipse_lib as el\n'), ((154, 169), 'ellipse_lib.LSqEllipse', 'el.LSqEllipse', ([], {}), '()\n', (167, 169), True, 'import ellipse_lib as el\n'), ((233, 249), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (242, 249), True, 'import matplotlib.pyplot as plt\n'), ((256, 282), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 6)'}), '(figsize=(6, 6))\n', (266, 282), True, 'import matplotlib.pyplot as plt\n'), ((566, 578), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (576, 578), True, 'import matplotlib.pyplot as plt\n'), ((579, 589), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (587, 589), True, 'import matplotlib.pyplot as plt\n'), ((454, 469), 'numpy.rad2deg', 'np.rad2deg', (['phi'], {}), '(phi)\n', (464, 469), True, 'import numpy as np\n')] |
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Step class which lets you build your Step charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENCE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from six import string_types
import numpy as np
from ._chartobject import ChartObject
from ..models import ColumnDataSource, Range1d, DataRange1d
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class Step(ChartObject):
"""This is the Step class and it is in charge of plotting
Step charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the
source.
"""
def __init__(self, values,
index=None,
title=None, xlabel=None, ylabel=None, legend=False,
xscale="linear", yscale="linear", width=800, height=600,
tools=True, filename=False, server=False, notebook=False,
facet=False, xgrid=True, ygrid=True):
"""
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a
common custom index for all data series as follows:
- As a 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
title (str, optional): the title of your chart. Defaults
to None.
xlabel (str, optional): the x-axis label of your chart.
Defaults to None.
ylabel (str, optional): the y-axis label of your chart.
Defaults to None.
legend (str, optional): the legend of your chart. The legend
content is inferred from incoming input.It can be
``top_left``, ``top_right``, ``bottom_left``,
``bottom_right``. ``top_right`` is set if you set it
as True. Defaults to None.
xscale (str, optional): the x-axis type scale of your chart.
It can be ``linear``, ``datetime`` or ``categorical``.
Defaults to ``datetime``.
yscale (str, optional): the y-axis type scale of your chart.
It can be ``linear``, ``datetime`` or ``categorical``.
Defaults to ``linear``.
width (int, optional): the width of your chart in pixels.
Defaults to 800.
height (int, optional): the height of you chart in pixels.
Defaults to 600.
tools (bool, optional): to enable or disable the tools in
your chart. Defaults to True
filename (str or bool, optional): the name of the file where
your chart. will be written. If you pass True to this
argument, it will use ``untitled`` as a filename.
Defaults to False.
server (str or bool, optional): the name of your chart in
the server. If you pass True to this argument, it will
use ``untitled`` as the name in the server.
Defaults to False.
notebook (bool, optional): whether to output to IPython notebook
(default: False)
facet (bool, optional): generate multiple areas on multiple
separate charts for each series if True. Defaults to
False
xgrid (bool, optional): whether to display x grid lines
(default: True)
ygrid (bool, optional): whether to display y grid lines
(default: True)
Attributes:
source (obj): datasource object for your plot,
initialized as a dummy None.
xdr (obj): x-associated datarange object for you plot,
initialized as a dummy None.
ydr (obj): y-associated datarange object for you plot,
initialized as a dummy None.
groups (list): to be filled with the incoming groups of data.
Useful for legend construction.
data (dict): to be filled with the incoming data and be passed
to the ColumnDataSource in each chart inherited class.
Needed for _set_And_get method.
attr (list): to be filled with the new attributes created after
loading the data dict.
Needed for _set_And_get method.
"""
self.values = values
self.source = None
self.xdr = None
self.ydr = None
# list to save all the groups available in the incoming input
self.groups = []
self.data = dict()
self.attr = []
self.index = index
super(Step, self).__init__(
title, xlabel, ylabel, legend, xscale, yscale, width, height,
tools, filename, server, notebook, facet, xgrid, ygrid
)
def get_data(self):
"""It calculates the chart properties accordingly from Step.values.
Then build a dict containing references to all the points to be
used by the segment glyph inside the ``draw`` method.
"""
self.data = dict()
# list to save all the attributes we are going to create
self.attr = []
self.groups = []
xs = self.values_index
self.set_and_get("x", "", np.array(xs)[:-1])
self.set_and_get("x2", "", np.array(xs)[1:])
for col in self.values.keys():
if isinstance(self.index, string_types) and col == self.index:
continue
# save every new group we find
self.groups.append(col)
values = [self.values[col][x] for x in xs]
self.set_and_get("y1_", col, values[:-1])
self.set_and_get("y2_", col, values[1:])
def get_source(self):
""" Push the Step data into the ColumnDataSource and calculate
the proper ranges.
"""
sc = self.source = ColumnDataSource(self.data)
self.xdr = DataRange1d(sources=[sc.columns("x"), sc.columns("x2")])
y_names = self.attr[1:]
endy = max(max(self.data[i]) for i in y_names)
starty = min(min(self.data[i]) for i in y_names)
self.ydr = Range1d(
start=starty - 0.1 * (endy - starty),
end=endy + 0.1 * (endy - starty)
)
def draw(self):
"""Use the line glyphs to connect the xy points in the Step.
Takes reference points from the data loaded at the ColumnDataSource.
"""
tuples = list(self._chunker(self.attr[2:], 2))
colors = self._set_colors(tuples)
# duplet: y1, y2 values of each series
for i, duplet in enumerate(tuples):
# draw the step horizontal segment
self.chart.make_segment(
self.source, 'x2', duplet[0],
'x2', duplet[1], colors[i], 2,
)
# draw the step vertical segment
self.chart.make_segment(
self.source, 'x', duplet[0],
'x2', duplet[0], colors[i], 2,
)
if i < len(tuples)-1:
self.create_plot_if_facet()
if not self._facet:
self.reset_legend()
def _make_legend_glyph(self, source_legend, color):
"""Create a new glyph to represent one of the chart data
series with the specified color
The glyph is added to chart.glyphs.
NOTE: Overwrites default ChartObject in order to draw the
right number of segments on legend
Args:
source_legend (ColumnDataSource): source to be used when
creating the glyph
color (str): color of the glyph
"""
self.chart.make_segment(
source_legend, "groups", None, 'groups', None, color, 2
)
| [
"numpy.array"
] | [((6529, 6541), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (6537, 6541), True, 'import numpy as np\n'), ((6583, 6595), 'numpy.array', 'np.array', (['xs'], {}), '(xs)\n', (6591, 6595), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
mm.tinker.py
============
Garleek - Tinker bridge
"""
from __future__ import print_function, absolute_import, division
import os
import sys
import shutil
from distutils.spawn import find_executable
from subprocess import check_output
from tempfile import NamedTemporaryFile
import numpy as np
from .. import units as u
supported_versions = '8', '8.1', 'qmcharges'
default_version = '8.1'
tinker_testhess = os.environ.get('TINKER_TESTHESS') or find_executable('testhess')
tinker_analyze = os.environ.get('TINKER_ANALYZE') or find_executable('analyze')
tinker_testgrad = os.environ.get('TINKER_TESTGRAD') or find_executable('testgrad')
def prepare_tinker_xyz(atoms, bonds=None, version=None):
"""
Write a TINKER-style XYZ file. This is similar to a normal XYZ, but with more
fields::
atom_index element x y z type bonded_atom_1 bonded_order_1 ...
TINKER expects coordinates in Angstrom.
Parameters
----------
atoms : OrderedDict
Set of atoms to write, following convention defined in :mod:`garleek.qm`.
bonds : OrderedDict
Connectivity information, following convention defined in :mod:`garleek.qm`.
version : str, optional=None
Specific behavior flag, if needed. Like 'qmcharges'
Returns
-------
xyzblock : str
String with XYZ contents
"""
out = [str(len(atoms))]
for index, atom in atoms.items():
if not bonds:
atom_bonds = ''
else:
atom_bonds = ' '.join([str(bonded_to) for (bonded_to, bond_index) in bonds[index]
if bond_index >= 0.5])
line = '{index} E{element} {xyz[0]} {xyz[1]} {xyz[2]} {type} {bonds}'
line = line.format(index=index, element=atom['element'], type=atom['type'],
xyz=atom['xyz'] * u.RBOHR_TO_ANGSTROM, bonds=atom_bonds)
out.append(line)
return '\n'.join(out)
def prepare_tinker_key(forcefield, atoms=None, version=None):
"""
Prepare a file ready for TINKER's -k option.
Parameters
----------
forcefield : str
``forcefield`` should be either a:
- ``*.prm``: proper forcefield file
- ``*.key``, ``*.par``: key file that can call ``*.prm files`` and
add more parameters
If a .prm file is provided, a .key file will be written to
accommodate the forcefield in a ``parameters *`` call.
atoms : OrderedDict, optional=None
Set of atoms to write, following convention defined in :mod:`garleek.qm`.
version : str, optional=None
Specific behavior flag. Supports:
- ``qmcharges``, which would write charges provided by QM engine.
Needs ``atoms`` to be passed.
Returns
-------
path: str
Absolute path to the generated TINKER .key file
"""
if forcefield.lower().endswith('.prm'):
with open('garleek.key', 'w') as f:
print('parameters', os.path.abspath(forcefield), file=f)
keypath = os.path.abspath('garleek.key')
elif os.path.splitext(forcefield)[1].lower() in ('.par', '.key'):
keypath = os.path.abspath(forcefield)
else:
raise ValueError('TINKER key file must be .prm, .key or .par')
if version == 'qmcharges' and atoms:
keypath_original = keypath
keypath = os.path.splitext(keypath_original)[0] + '.charges.key'
shutil.copyfile(keypath_original, keypath)
with open(keypath, 'a') as f:
print('', file=f)
for index, atom in atoms.items():
print('CHARGE', -1 * index, atom['charge'], file=f)
return keypath
def _decode(data):
try:
return data.decode()
except UnicodeDecodeError:
return data.decode('utf-8', 'ignore')
def _parse_tinker_analyze(data):
"""
Takes the output of TINKER's ``analyze`` program and obtain
the potential energy (kcal/mole) and the dipole x, y, z
components (debyes).
"""
energy, dipole = None, None
for line in data.splitlines():
line = line.strip()
if line.startswith('Total Potential Energy'):
energy = float(line[26:49])
elif line.startswith('Dipole X,Y,Z-Components'):
dipole = list(map(float, [line[26:49], line[49:62], line[62:75]]))
break
return energy, np.array(dipole)
def _parse_tinker_testgrad(data):
"""
"""
gradients = []
lines = data.splitlines()
for i, line in enumerate(lines):
line = line.strip()
if line.startswith('Cartesian Gradient Breakdown over Individual Atoms'):
break
for line in lines[i+4:]:
if not line.strip() or line.startswith('Total Gradient'):
break
fields = line[16:35], line[35:47], line[47:59]
gradients.append(list(map(float, fields)))
return np.array(gradients)
def _parse_tinker_testhess(hesfile, n_atoms, remove=False):
"""
"""
hessian = np.zeros((n_atoms * 3, n_atoms * 3))
xyz_to_int = {'X': 0, 'Y': 1, 'Z': 2}
with open(hesfile) as lines:
for line in lines:
if not line.strip():
continue
elif line.startswith(' Diagonal'):
_, line = next(lines), next(lines).rstrip() # skip blank line
nums = []
while line.strip():
nums.extend(filter(bool, [line[:12], line[12:24], line[24:36],
line[36:48], line[48:60], line[60:72]]))
line = next(lines).rstrip()
for i, num in enumerate(map(float, nums)):
hessian[i, i] = num
elif line.startswith(' Off-diagonal'):
atom_pos, axis_pos = int(line[39:45])-1, xyz_to_int[line[46:47]]
_, line = next(lines), next(lines).rstrip() # skip blank line
nums = []
while line.strip():
nums.extend(filter(bool, [line[:12], line[12:24], line[24:36],
line[36:48], line[48:60], line[60:72]]))
try:
line = next(lines).rstrip()
except StopIteration:
break
j = 3*atom_pos+axis_pos
for i, num in enumerate(map(float, nums)):
hessian[i+j+1, j] = num
if remove:
os.remove(hesfile)
return hessian
def run_tinker(xyz_data, n_atoms, key, energy=True, dipole_moment=True,
gradients=True, hessian=True):
if not all([tinker_testhess, tinker_analyze, tinker_testgrad]):
raise RuntimeError('TINKER executables could not be found in $PATH')
error = 'Could not obtain {}! Command run:\n {}\n\nTINKER output:\n{}'
with NamedTemporaryFile(suffix='.xyz', delete=False, mode='w') as f_xyz:
f_xyz.write(xyz_data)
xyz = f_xyz.name
results = {}
if energy or dipole_moment:
args = ','.join(['E' if energy else '', 'M' if dipole_moment else ''])
command = [tinker_analyze, xyz, '-k', key, args]
print('Running TINKER:', *command)
output = _decode(check_output(command))
energy, dipole = _parse_tinker_analyze(output)
if energy is None:
raise ValueError(error.format('energy', ' '.join(command), _decode(output)))
results['energy'] = energy
if dipole is None:
raise ValueError(error.format('dipole', ' '.join(command), _decode(output)))
results['dipole_moment'] = dipole
if gradients:
command = [tinker_testgrad, xyz, '-k', key, 'y', 'n', '0.1D-04']
print('Running TINKER:', *command)
output = _decode(check_output(command))
gradients = _parse_tinker_testgrad(output)
if gradients is None:
raise ValueError(error.format('gradients', ' '.join(command), _decode(output)))
results['gradients'] = gradients
if hessian:
command = [tinker_testhess, xyz, '-k', key, 'y', 'n']
print('Running TINKER:', *command)
output = _decode(check_output(command))
hesfile = os.path.splitext(xyz)[0] + '.hes'
hessian = _parse_tinker_testhess(hesfile, n_atoms, remove=True)
if hessian is None:
raise ValueError(error.format('hessian', ' '.join(command), _decode(output)))
results['hessian'] = hessian
inactive_indices = []
with open(key) as f:
for line in f:
if line.lower().startswith('inactive'):
inactive_indices.extend([int(i) for i in line.split()[1:]])
if inactive_indices:
results = patch_tinker_output_for_inactive_atoms(results, inactive_indices, n_atoms)
os.remove(xyz)
return results
def patch_tinker_output_for_inactive_atoms(results, indices, n_atoms):
"""
TODO: Patch 'hessian' to support FREQ calculations with inactive
"""
values = results['gradients']
shape = (n_atoms, 3)
idx = np.array(indices) - 1
filled = np.zeros(shape, dtype=values.dtype)
mask = np.ones(shape[0], np.bool)
mask[idx] = 0
filled[mask] = values
results['gradients'] = filled
return results
| [
"tempfile.NamedTemporaryFile",
"os.remove",
"os.path.abspath",
"subprocess.check_output",
"numpy.zeros",
"numpy.ones",
"os.environ.get",
"numpy.array",
"os.path.splitext",
"distutils.spawn.find_executable",
"shutil.copyfile"
] | [((462, 495), 'os.environ.get', 'os.environ.get', (['"""TINKER_TESTHESS"""'], {}), "('TINKER_TESTHESS')\n", (476, 495), False, 'import os\n'), ((499, 526), 'distutils.spawn.find_executable', 'find_executable', (['"""testhess"""'], {}), "('testhess')\n", (514, 526), False, 'from distutils.spawn import find_executable\n'), ((544, 576), 'os.environ.get', 'os.environ.get', (['"""TINKER_ANALYZE"""'], {}), "('TINKER_ANALYZE')\n", (558, 576), False, 'import os\n'), ((580, 606), 'distutils.spawn.find_executable', 'find_executable', (['"""analyze"""'], {}), "('analyze')\n", (595, 606), False, 'from distutils.spawn import find_executable\n'), ((625, 658), 'os.environ.get', 'os.environ.get', (['"""TINKER_TESTGRAD"""'], {}), "('TINKER_TESTGRAD')\n", (639, 658), False, 'import os\n'), ((662, 689), 'distutils.spawn.find_executable', 'find_executable', (['"""testgrad"""'], {}), "('testgrad')\n", (677, 689), False, 'from distutils.spawn import find_executable\n'), ((4898, 4917), 'numpy.array', 'np.array', (['gradients'], {}), '(gradients)\n', (4906, 4917), True, 'import numpy as np\n'), ((5011, 5047), 'numpy.zeros', 'np.zeros', (['(n_atoms * 3, n_atoms * 3)'], {}), '((n_atoms * 3, n_atoms * 3))\n', (5019, 5047), True, 'import numpy as np\n'), ((8798, 8812), 'os.remove', 'os.remove', (['xyz'], {}), '(xyz)\n', (8807, 8812), False, 'import os\n'), ((9094, 9129), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'values.dtype'}), '(shape, dtype=values.dtype)\n', (9102, 9129), True, 'import numpy as np\n'), ((9141, 9167), 'numpy.ones', 'np.ones', (['shape[0]', 'np.bool'], {}), '(shape[0], np.bool)\n', (9148, 9167), True, 'import numpy as np\n'), ((3055, 3085), 'os.path.abspath', 'os.path.abspath', (['"""garleek.key"""'], {}), "('garleek.key')\n", (3070, 3085), False, 'import os\n'), ((3440, 3482), 'shutil.copyfile', 'shutil.copyfile', (['keypath_original', 'keypath'], {}), '(keypath_original, keypath)\n', (3455, 3482), False, 'import shutil\n'), ((4382, 4398), 'numpy.array', 'np.array', (['dipole'], {}), '(dipole)\n', (4390, 4398), True, 'import numpy as np\n'), ((6470, 6488), 'os.remove', 'os.remove', (['hesfile'], {}), '(hesfile)\n', (6479, 6488), False, 'import os\n'), ((6860, 6917), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', ([], {'suffix': '""".xyz"""', 'delete': '(False)', 'mode': '"""w"""'}), "(suffix='.xyz', delete=False, mode='w')\n", (6878, 6917), False, 'from tempfile import NamedTemporaryFile\n'), ((9059, 9076), 'numpy.array', 'np.array', (['indices'], {}), '(indices)\n', (9067, 9076), True, 'import numpy as np\n'), ((3174, 3201), 'os.path.abspath', 'os.path.abspath', (['forcefield'], {}), '(forcefield)\n', (3189, 3201), False, 'import os\n'), ((7237, 7258), 'subprocess.check_output', 'check_output', (['command'], {}), '(command)\n', (7249, 7258), False, 'from subprocess import check_output\n'), ((7785, 7806), 'subprocess.check_output', 'check_output', (['command'], {}), '(command)\n', (7797, 7806), False, 'from subprocess import check_output\n'), ((8169, 8190), 'subprocess.check_output', 'check_output', (['command'], {}), '(command)\n', (8181, 8190), False, 'from subprocess import check_output\n'), ((3000, 3027), 'os.path.abspath', 'os.path.abspath', (['forcefield'], {}), '(forcefield)\n', (3015, 3027), False, 'import os\n'), ((3377, 3411), 'os.path.splitext', 'os.path.splitext', (['keypath_original'], {}), '(keypath_original)\n', (3393, 3411), False, 'import os\n'), ((8210, 8231), 'os.path.splitext', 'os.path.splitext', (['xyz'], {}), '(xyz)\n', (8226, 8231), False, 'import os\n'), ((3095, 3123), 'os.path.splitext', 'os.path.splitext', (['forcefield'], {}), '(forcefield)\n', (3111, 3123), False, 'import os\n')] |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""process dataset"""
import numpy as np
from PIL import Image, ImageOps
def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h):
"""locate crop location"""
w_step = (image_w - crop_w) // 4
h_step = (image_h - crop_h) // 4
ret = list()
ret.append((0, 0)) # upper left
ret.append((4 * w_step, 0)) # upper right
ret.append((0, 4 * h_step)) # lower left
ret.append((4 * w_step, 4 * h_step)) # lower right
ret.append((2 * w_step, 2 * h_step)) # center
if more_fix_crop:
ret.append((0, 2 * h_step)) # center left
ret.append((4 * w_step, 2 * h_step)) # center right
ret.append((2 * w_step, 4 * h_step)) # lower center
ret.append((2 * w_step, 0 * h_step)) # upper center
ret.append((1 * w_step, 1 * h_step)) # upper left quarter
ret.append((3 * w_step, 1 * h_step)) # upper right quarter
ret.append((1 * w_step, 3 * h_step)) # lower left quarter
ret.append((3 * w_step, 3 * h_step)) # lower righ quarter
return ret
class GroupCenterCrop:
"""GroupCenterCrop"""
def __init__(self, size):
self.size = size
def __call__(self, img_group):
images = []
for img in img_group:
width, height = img.size
left = (width - self.size)/2
top = (height - self.size)/2
right = (width + self.size)/2
bottom = (height + self.size)/2
images.append(img.crop((left, top, right, bottom)))
return images
class GroupNormalize:
"""GroupNormalize"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
rep_mean = self.mean * (tensor.shape[0]//len(self.mean))
rep_std = self.std * (tensor.shape[0]//len(self.std))
# TODO: make efficient
for i, _ in enumerate(tensor):
tensor[i] = (tensor[i]-rep_mean[i])/rep_std[i]
return tensor
class GroupScale:
""" Rescales the input PIL.Image to the given 'size'.
'size' will be the size of the smaller edge.
For example, if height > width, then image will be
rescaled to (size * height / width, size)
size: size of the smaller edge
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
def __call__(self, img_group):
images = []
for img in img_group:
w, h = img.size
if w > h:
s = (int(self.size * w / h), self.size)
else:
s = (self.size, int(self.size * h / w))
images.append(img.resize(s, self.interpolation))
return images
class GroupOverSample:
"""GroupOverSample"""
def __init__(self, crop_size, scale_size=None):
self.crop_size = crop_size if not isinstance(crop_size, int) else (crop_size, crop_size)
if scale_size is not None:
self.scale_worker = GroupScale(scale_size)
else:
self.scale_worker = None
def __call__(self, img_group):
if self.scale_worker is not None:
img_group = self.scale_worker(img_group)
image_w, image_h = img_group[0].size
crop_w, crop_h = self.crop_size
offsets = fill_fix_offset(False, image_w, image_h, crop_w, crop_h)
#print(offsets)
oversample_group = list()
for o_w, o_h in offsets:
normal_group = list()
flip_group = list()
for i, img in enumerate(img_group):
crop = img.crop((o_w, o_h, o_w + crop_w, o_h + crop_h))
normal_group.append(crop)
flip_crop = crop.copy().transpose(Image.FLIP_LEFT_RIGHT)
if img.mode == 'L' and i % 2 == 0:
flip_group.append(ImageOps.invert(flip_crop))
else:
flip_group.append(flip_crop)
oversample_group.extend(normal_group)
oversample_group.extend(flip_group)
return oversample_group
class Stack:
"""Stack"""
def __init__(self, roll=False):
self.roll = roll
def __call__(self, img_group):
output = []
if img_group[0].mode == 'L':
output = np.concatenate([np.expand_dims(x, 2) for x in img_group], axis=2)
elif img_group[0].mode == 'RGB':
if self.roll:
output = np.concatenate([np.array(x)[:, :, ::-1] for x in img_group], axis=2)
else:
output = np.concatenate(img_group, axis=2)
return output
class ToTorchFormatTensor:
""" Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255]
to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] """
def __init__(self, div=True):
self.div_sign = div
def __call__(self, pic):
if isinstance(pic, np.ndarray):
# handle numpy array
pic = np.array(pic, np.float32)
pic = np.ascontiguousarray(pic)
img = pic.transpose((2, 0, 1))
else:
# handle PIL Image
pic = np.array(pic, np.float32)
pic = np.ascontiguousarray(pic)
img = pic.reshape(pic.size[1], pic.size[0], len(pic.mode))
img = img.transpose((2, 0, 1))
return img/255. if self.div_sign else img
| [
"numpy.expand_dims",
"PIL.ImageOps.invert",
"numpy.array",
"numpy.ascontiguousarray",
"numpy.concatenate"
] | [((5693, 5718), 'numpy.array', 'np.array', (['pic', 'np.float32'], {}), '(pic, np.float32)\n', (5701, 5718), True, 'import numpy as np\n'), ((5737, 5762), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['pic'], {}), '(pic)\n', (5757, 5762), True, 'import numpy as np\n'), ((5869, 5894), 'numpy.array', 'np.array', (['pic', 'np.float32'], {}), '(pic, np.float32)\n', (5877, 5894), True, 'import numpy as np\n'), ((5913, 5938), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['pic'], {}), '(pic)\n', (5933, 5938), True, 'import numpy as np\n'), ((5008, 5028), 'numpy.expand_dims', 'np.expand_dims', (['x', '(2)'], {}), '(x, 2)\n', (5022, 5028), True, 'import numpy as np\n'), ((5262, 5295), 'numpy.concatenate', 'np.concatenate', (['img_group'], {'axis': '(2)'}), '(img_group, axis=2)\n', (5276, 5295), True, 'import numpy as np\n'), ((4556, 4582), 'PIL.ImageOps.invert', 'ImageOps.invert', (['flip_crop'], {}), '(flip_crop)\n', (4571, 4582), False, 'from PIL import Image, ImageOps\n'), ((5166, 5177), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5174, 5177), True, 'import numpy as np\n')] |
from planer import tile, mapcoord
import planer as rt
import numpy as np
import scipy.ndimage as ndimg
root = '/'.join(__file__.split('\\')[:-1])+'/models'
def load(lang='ch'):
with open(root+('/ch', '/en')[lang=='en']+'_dict.txt', encoding='utf-8') as f:
globals()['lab_dic'] = np.array(f.read().split('\n') + [' '])
globals()['det_net'] = rt.InferenceSession(root+'/ppocr_mobilev2_det_%s.onnx'%lang)
globals()['rec_net'] = rt.InferenceSession(root+'/ppocr_mobilev2_rec_%s.onnx'%lang)
globals()['cls_net'] = rt.InferenceSession(root+'/ppocr_mobilev2_cls_all.onnx')
# get mask
@tile(glob=32)
def get_mask(img):
img = img[:,:,:3].astype('float32')/255
offset = [0.485, 0.456, 0.406]
offset = np.array(offset, dtype=np.float32)
img = img - offset[None,None,:]
img /= np.array([0.229, 0.224, 0.225])
img = img.transpose(2,0,1)[None,:]
return det_net.run(None, {'x':img})[0][0,0]
# find boxes from mask, and filter the bad boxes
def db_box(hot, thr=0.3, boxthr=0.7, sizethr=5, ratio=2):
lab, n = ndimg.label(hot > thr)
idx = np.arange(n) + 1
level = ndimg.mean(hot, lab, idx)
objs = ndimg.find_objects(lab, n)
boxes = []
for i, l, sli in zip(idx, level, objs):
if l < boxthr: continue
rcs = np.array(np.where(lab[sli]==i)).T
if rcs.shape[0] < sizethr**2: continue
o = rcs.mean(axis=0); rcs = rcs - o
vs, ds = np.linalg.eig(np.cov(rcs.T))
if vs[0]>vs[1]:
vs, ds = vs[[1,0]], ds[:,[1,0]]
if ds[0,1]<0: ds[:,1] *= -1
if np.cross(ds[:,0], ds[:,1])>0:
ds[:,0] *= -1
mar = vs.min() ** 0.5 * ratio * 2
rcs = np.linalg.inv(ds) @ rcs.T
minr, minc = rcs.min(axis=1) - mar
maxr, maxc = rcs.max(axis=1) + mar
if rcs.ptp(axis=1).min()<sizethr: continue
rs = [minr,minc,minr,maxc,
maxr,maxc,maxr,minc]
rec = ds @ np.array(rs).reshape(-1,2,1)
o += sli[0].start, sli[1].start
rec = rec.reshape(4,2) + o
first = np.argmin(rec.sum(axis=1))
if vs[1]/vs[0]>2 and first%2==0: first+=1
boxes.append(rec[(np.arange(5)+first)%4])
return np.array(boxes)
# extract text image from given box
def extract(img, box, height=32):
h = ((box[1]-box[0])**2).sum()**0.5
w = ((box[2]-box[1])**2).sum()**0.5
h, w = height, int(height * w / h)
rr = box[[0,3,1,2],0].reshape(2,2)
cc = box[[0,3,1,2],1].reshape(2,2)
rcs = np.mgrid[0:1:h*1j, 0:1:w*1j]
r2 = mapcoord(rr, *rcs, backend=np)
c2 = mapcoord(cc, *rcs, backend=np)
return mapcoord(img, r2, c2, backend=np)
# batch extract by boxes
def extracts(img, boxes, height=32, mwidth=0):
rst = []
for box in boxes:
temp = extract(img, box, height)
temp = temp.astype(np.float32)
temp /= 128; temp -= 1
rst.append(temp)
ws = np.array([i.shape[1] for i in rst])
maxw = max([i.shape[1] for i in rst])
for i in range(len(rst)):
mar = maxw - rst[i].shape[1] + 10
rst[i] = np.pad(rst[i], [(0,0),(0,mar),(0,0)])
if mwidth>0: rst[i] = rst[i][:,:mwidth]
return np.array(rst).transpose(0,3,1,2), ws
# direction fixed
def fixdir(img, boxes):
x, ws = extracts(img, boxes, 48, 256)
y = cls_net.run(None, {'x':x})[0]
dirs = np.argmax(y, axis=1)
prob = np.max(y, axis=1)
for b,d,p in zip(boxes, dirs, prob):
if d and p>0.9: b[:] = b[[2,3,0,1,2]]
return dirs, np.max(y, axis=1)
# decode
def ctc_decode(x, blank=10):
x, p = x.argmax(axis=1), x.max(axis=1)
if x.max()==0: return 'nothing', 0
sep = (np.diff(x, prepend=[-1]) != 0)
lut = np.where(sep)[0][np.cumsum(sep)-1]
cal = np.arange(len(lut)) - (lut-1)
msk = np.hstack((sep[1:], x[-1:]>0))
msk = (msk>0) & ((x>0) | (cal>blank))
cont = ''.join(lab_dic[x[msk]-1])
return cont, p[msk].mean()
# recognize and decode every tensor
def recognize(img, boxes):
x, ws = extracts(img, boxes, 32)
cls = ws // 256
rsts = ['nothing'] * len(boxes)
for level in range(cls.max()+1):
idx = np.where(cls==level)[0]
if len(idx)==0: continue
subx = x[idx,:,:,:(level+1) * 256]
y = rec_net.run(None, {'x':subx})[0]
for i in range(len(y)):
rsts[idx[i]] = ctc_decode(y[i])
return rsts
def ocr(img, autodir=False, thr=0.3, boxthr=0.7,
sizethr=5, ratio=1.5, prothr=0.6):
hot = get_mask(img)
boxes = db_box(hot, thr, boxthr, sizethr, ratio)
if autodir: fixdir(img, boxes)
box_cont = zip(boxes, recognize(img, boxes))
rst = [(b.tolist(), *sp) for b,sp in box_cont]
return [i for i in rst if i[2]>prothr]
def test():
import matplotlib.pyplot as plt
from imageio import imread
img = imread(root + '/card.jpg')[:,:,:3]
conts = ocr(img, autodir=True)
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.imshow(img)
for b,s,p in conts:
b = np.array(b)
plt.plot(*b.T[::-1], 'blue')
plt.text(*b[0,::-1]-5, s, color='red')
plt.show()
if __name__ == '__main__':
import planer
model = planer.load(__name__)
model.load('en')
model.test()
| [
"numpy.argmax",
"scipy.ndimage.find_objects",
"numpy.arange",
"scipy.ndimage.mean",
"numpy.pad",
"planer.mapcoord",
"matplotlib.pyplot.imshow",
"numpy.cumsum",
"numpy.max",
"planer.tile",
"numpy.cov",
"matplotlib.pyplot.show",
"imageio.imread",
"numpy.cross",
"numpy.hstack",
"planer.lo... | [((606, 619), 'planer.tile', 'tile', ([], {'glob': '(32)'}), '(glob=32)\n', (610, 619), False, 'from planer import tile, mapcoord\n'), ((360, 424), 'planer.InferenceSession', 'rt.InferenceSession', (["(root + '/ppocr_mobilev2_det_%s.onnx' % lang)"], {}), "(root + '/ppocr_mobilev2_det_%s.onnx' % lang)\n", (379, 424), True, 'import planer as rt\n'), ((448, 512), 'planer.InferenceSession', 'rt.InferenceSession', (["(root + '/ppocr_mobilev2_rec_%s.onnx' % lang)"], {}), "(root + '/ppocr_mobilev2_rec_%s.onnx' % lang)\n", (467, 512), True, 'import planer as rt\n'), ((536, 594), 'planer.InferenceSession', 'rt.InferenceSession', (["(root + '/ppocr_mobilev2_cls_all.onnx')"], {}), "(root + '/ppocr_mobilev2_cls_all.onnx')\n", (555, 594), True, 'import planer as rt\n'), ((731, 765), 'numpy.array', 'np.array', (['offset'], {'dtype': 'np.float32'}), '(offset, dtype=np.float32)\n', (739, 765), True, 'import numpy as np\n'), ((813, 844), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (821, 844), True, 'import numpy as np\n'), ((1053, 1075), 'scipy.ndimage.label', 'ndimg.label', (['(hot > thr)'], {}), '(hot > thr)\n', (1064, 1075), True, 'import scipy.ndimage as ndimg\n'), ((1115, 1140), 'scipy.ndimage.mean', 'ndimg.mean', (['hot', 'lab', 'idx'], {}), '(hot, lab, idx)\n', (1125, 1140), True, 'import scipy.ndimage as ndimg\n'), ((1152, 1178), 'scipy.ndimage.find_objects', 'ndimg.find_objects', (['lab', 'n'], {}), '(lab, n)\n', (1170, 1178), True, 'import scipy.ndimage as ndimg\n'), ((2192, 2207), 'numpy.array', 'np.array', (['boxes'], {}), '(boxes)\n', (2200, 2207), True, 'import numpy as np\n'), ((2524, 2554), 'planer.mapcoord', 'mapcoord', (['rr', '*rcs'], {'backend': 'np'}), '(rr, *rcs, backend=np)\n', (2532, 2554), False, 'from planer import tile, mapcoord\n'), ((2564, 2594), 'planer.mapcoord', 'mapcoord', (['cc', '*rcs'], {'backend': 'np'}), '(cc, *rcs, backend=np)\n', (2572, 2594), False, 'from planer import tile, mapcoord\n'), ((2606, 2639), 'planer.mapcoord', 'mapcoord', (['img', 'r2', 'c2'], {'backend': 'np'}), '(img, r2, c2, backend=np)\n', (2614, 2639), False, 'from planer import tile, mapcoord\n'), ((2893, 2928), 'numpy.array', 'np.array', (['[i.shape[1] for i in rst]'], {}), '([i.shape[1] for i in rst])\n', (2901, 2928), True, 'import numpy as np\n'), ((3328, 3348), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (3337, 3348), True, 'import numpy as np\n'), ((3360, 3377), 'numpy.max', 'np.max', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (3366, 3377), True, 'import numpy as np\n'), ((3758, 3790), 'numpy.hstack', 'np.hstack', (['(sep[1:], x[-1:] > 0)'], {}), '((sep[1:], x[-1:] > 0))\n', (3767, 3790), True, 'import numpy as np\n'), ((4972, 4987), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (4982, 4987), True, 'import matplotlib.pyplot as plt\n'), ((5124, 5134), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5132, 5134), True, 'import matplotlib.pyplot as plt\n'), ((5193, 5214), 'planer.load', 'planer.load', (['__name__'], {}), '(__name__)\n', (5204, 5214), False, 'import planer\n'), ((1086, 1098), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1095, 1098), True, 'import numpy as np\n'), ((3060, 3102), 'numpy.pad', 'np.pad', (['rst[i]', '[(0, 0), (0, mar), (0, 0)]'], {}), '(rst[i], [(0, 0), (0, mar), (0, 0)])\n', (3066, 3102), True, 'import numpy as np\n'), ((3482, 3499), 'numpy.max', 'np.max', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (3488, 3499), True, 'import numpy as np\n'), ((3632, 3656), 'numpy.diff', 'np.diff', (['x'], {'prepend': '[-1]'}), '(x, prepend=[-1])\n', (3639, 3656), True, 'import numpy as np\n'), ((4796, 4822), 'imageio.imread', 'imread', (["(root + '/card.jpg')"], {}), "(root + '/card.jpg')\n", (4802, 4822), False, 'from imageio import imread\n'), ((5024, 5035), 'numpy.array', 'np.array', (['b'], {}), '(b)\n', (5032, 5035), True, 'import numpy as np\n'), ((5044, 5072), 'matplotlib.pyplot.plot', 'plt.plot', (['*b.T[::-1]', '"""blue"""'], {}), "(*b.T[::-1], 'blue')\n", (5052, 5072), True, 'import matplotlib.pyplot as plt\n'), ((5081, 5124), 'matplotlib.pyplot.text', 'plt.text', (['*(b[0, ::-1] - 5)', 's'], {'color': '"""red"""'}), "(*(b[0, ::-1] - 5), s, color='red')\n", (5089, 5124), True, 'import matplotlib.pyplot as plt\n'), ((1440, 1453), 'numpy.cov', 'np.cov', (['rcs.T'], {}), '(rcs.T)\n', (1446, 1453), True, 'import numpy as np\n'), ((1570, 1598), 'numpy.cross', 'np.cross', (['ds[:, 0]', 'ds[:, 1]'], {}), '(ds[:, 0], ds[:, 1])\n', (1578, 1598), True, 'import numpy as np\n'), ((1682, 1699), 'numpy.linalg.inv', 'np.linalg.inv', (['ds'], {}), '(ds)\n', (1695, 1699), True, 'import numpy as np\n'), ((3673, 3686), 'numpy.where', 'np.where', (['sep'], {}), '(sep)\n', (3681, 3686), True, 'import numpy as np\n'), ((3690, 3704), 'numpy.cumsum', 'np.cumsum', (['sep'], {}), '(sep)\n', (3699, 3704), True, 'import numpy as np\n'), ((4112, 4134), 'numpy.where', 'np.where', (['(cls == level)'], {}), '(cls == level)\n', (4120, 4134), True, 'import numpy as np\n'), ((1293, 1316), 'numpy.where', 'np.where', (['(lab[sli] == i)'], {}), '(lab[sli] == i)\n', (1301, 1316), True, 'import numpy as np\n'), ((3157, 3170), 'numpy.array', 'np.array', (['rst'], {}), '(rst)\n', (3165, 3170), True, 'import numpy as np\n'), ((1934, 1946), 'numpy.array', 'np.array', (['rs'], {}), '(rs)\n', (1942, 1946), True, 'import numpy as np\n'), ((2157, 2169), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (2166, 2169), True, 'import numpy as np\n')] |
import led_panel
import numpy
import time
x = 0
y = 0
while True:
frame = numpy.zeros((led_panel.height, led_panel.width), dtype=numpy.uint8)
frame[y, x] = 255
led_panel.send(brightness=50, packed_frame=led_panel.pack(frame))
if x < led_panel.width - 1:
x += 1
else:
x = 0
if y < led_panel.height - 1:
y += 1
else:
y = 0
| [
"led_panel.pack",
"numpy.zeros"
] | [((79, 146), 'numpy.zeros', 'numpy.zeros', (['(led_panel.height, led_panel.width)'], {'dtype': 'numpy.uint8'}), '((led_panel.height, led_panel.width), dtype=numpy.uint8)\n', (90, 146), False, 'import numpy\n'), ((216, 237), 'led_panel.pack', 'led_panel.pack', (['frame'], {}), '(frame)\n', (230, 237), False, 'import led_panel\n')] |
import time
import numpy as np
import tkinter as tk
from PIL import ImageTk, Image
np.random.seed(1)
PhotoImage = ImageTk.PhotoImage
unit = 50
height = 15
width = 15
class Env(tk.Tk):
def __init__(self):
super(Env, self).__init__()
self.action_space = ['w', 's', 'a', 'd'] # wsad 키보드 자판을 기준으로 상하좌우
self.actions_length = len(self.action_space)
self.height = height*unit
self.width = width*unit
self.title('codefair qlearning')
self.geometry(f'{self.height}x{self.width}')
self.shapes = self.load_images()
self.canvas = self.build_canvas()
def build_canvas(self):
canvas = tk.Canvas(self, bg="white", height=self.height, width=self.width)
for c in range(0, self.width, unit):
x0, y0, x1, y1 = c, 0, c, self.height
canvas.create_line(x0, y0, x1, y1)
for r in range(0, self.height, unit):
x0, y0, x1, y1 = 0, r, self.height, r
canvas.create_line(x0, y0, x1, y1)
# mark images to canvas
self.agent = canvas.create_image(50, 50, image=self.shapes[0])
self.virus = canvas.create_image(175, 175, image=self.shapes[1])
self.destination = canvas.create_image(275, 275, image=self.shapes[2])
canvas.pack()
return canvas
def load_images(self):
agent = PhotoImage(Image.open("./img/agent.png").resize((50, 50)))
virus = PhotoImage(Image.open("./img/virus.jpg").resize((50, 50)))
destination = PhotoImage(Image.open("./img/destination.png").resize((50, 50)))
return agent, virus, destination
def coords_to_state(self, coords):
x = int((coords[0] - 50) / 100)
y = int((coords[1] - 50) / 100)
return [x, y]
def state_to_coords(self, state):
x = int(state[0] * 100 + 50)
y = int(state[1] * 100 + 50)
return [x, y]
def reset(self):
self.update()
time.sleep(.5)
x, y = self.canvas.coords(self.agent)
self.canvas.move(self.agent, unit/2 - x, unit/2 - y)
self.render()
return self.coords_to_state(self.canvas.coords(self.agent))
def step(self, action):
state = self.canvas.coords(self.agent)
base_action = np.array([0, 0])
self.render()
# actions
if action == 0: # w
if state[1] > unit:
base_action[1] -= unit
elif action == 1: # s
if state[1] < (height - 1) * unit:
base_action[1] += unit
elif action == 2: # a
if state[0] > unit:
base_action[0] -= unit
elif action == 3: # d
if state[0] < (width - 1) * unit:
base_action[0] += unit
self.canvas.move(self.agent, base_action[0], base_action[1])
self.canvas.tag_raise(self.agent)
next_state = self.canvas.coords(self.agent)
# reward
if next_state == self.canvas.coords(self.destination):
reward = 100
finish = True
elif next_state == self.canvas.coords(self.virus):
reward = -100
finish = True
else:
reward = 0
finish = False
next_state = self.coords_to_state(next_state)
return next_state, reward, finish
def render(self):
time.sleep(.03)
self.update() | [
"numpy.random.seed",
"tkinter.Canvas",
"PIL.Image.open",
"time.sleep",
"numpy.array"
] | [((84, 101), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (98, 101), True, 'import numpy as np\n'), ((619, 684), 'tkinter.Canvas', 'tk.Canvas', (['self'], {'bg': '"""white"""', 'height': 'self.height', 'width': 'self.width'}), "(self, bg='white', height=self.height, width=self.width)\n", (628, 684), True, 'import tkinter as tk\n'), ((1799, 1814), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1809, 1814), False, 'import time\n'), ((2083, 2099), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (2091, 2099), True, 'import numpy as np\n'), ((3018, 3034), 'time.sleep', 'time.sleep', (['(0.03)'], {}), '(0.03)\n', (3028, 3034), False, 'import time\n'), ((1266, 1295), 'PIL.Image.open', 'Image.open', (['"""./img/agent.png"""'], {}), "('./img/agent.png')\n", (1276, 1295), False, 'from PIL import ImageTk, Image\n'), ((1337, 1366), 'PIL.Image.open', 'Image.open', (['"""./img/virus.jpg"""'], {}), "('./img/virus.jpg')\n", (1347, 1366), False, 'from PIL import ImageTk, Image\n'), ((1414, 1449), 'PIL.Image.open', 'Image.open', (['"""./img/destination.png"""'], {}), "('./img/destination.png')\n", (1424, 1449), False, 'from PIL import ImageTk, Image\n')] |
import logging
import re
import shutil
import subprocess
from collections import OrderedDict
import traceback
from pathlib import Path
import numpy as np
import pandas as pd
import one.alf.io as alfio
from ibllib.misc import check_nvidia_driver
from ibllib.ephys import ephysqc, spikes, sync_probes
from ibllib.io import ffmpeg, spikeglx
from ibllib.io.video import label_from_path
from ibllib.io.extractors import ephys_fpga, ephys_passive, camera
from ibllib.pipes import tasks
from ibllib.pipes.training_preprocessing import TrainingRegisterRaw as EphysRegisterRaw
from ibllib.pipes.misc import create_alyx_probe_insertions
from ibllib.qc.task_extractors import TaskQCExtractor
from ibllib.qc.task_metrics import TaskQC
from ibllib.qc.camera import run_all_qc as run_camera_qc
from ibllib.dsp import rms
from ibllib.io.extractors import signatures
_logger = logging.getLogger("ibllib")
# level 0
class EphysPulses(tasks.Task):
"""
Extract Pulses from raw electrophysiology data into numpy arrays
Perform the probes synchronisation with nidq (3B) or main probe (3A)
"""
cpu = 2
io_charge = 30 # this jobs reads raw ap files
priority = 90 # a lot of jobs depend on this one
level = 0 # this job doesn't depend on anything
def _run(self, overwrite=False):
# outputs numpy
syncs, out_files = ephys_fpga.extract_sync(self.session_path, overwrite=overwrite)
for out_file in out_files:
_logger.info(f"extracted pulses for {out_file}")
status, sync_files = sync_probes.sync(self.session_path)
return out_files + sync_files
class RawEphysQC(tasks.Task):
"""
Computes raw electrophysiology QC
"""
cpu = 2
io_charge = 30 # this jobs reads raw ap files
priority = 10 # a lot of jobs depend on this one
level = 0 # this job doesn't depend on anything
signature = {'input_files': signatures.RAWEPHYSQC, 'output_files': ()}
def _run(self, overwrite=False):
eid = self.one.path2eid(self.session_path)
pids = [x['id'] for x in self.one.alyx.rest('insertions', 'list', session=eid)]
# Usually there should be two probes, if there are less, check if all probes are registered
if len(pids) < 2:
_logger.warning(f"{len(pids)} probes registered for session {eid}, trying to register from local data")
pids = [p['id'] for p in create_alyx_probe_insertions(self.session_path, one=self.one)]
qc_files = []
for pid in pids:
try:
eqc = ephysqc.EphysQC(pid, session_path=self.session_path, one=self.one)
qc_files.extend(eqc.run(update=True, overwrite=overwrite))
except AssertionError:
self.status = -1
continue
return qc_files
class EphysAudio(tasks.Task):
"""
Compresses the microphone wav file in a lossless flac file
"""
cpu = 2
priority = 10 # a lot of jobs depend on this one
level = 0 # this job doesn't depend on anything
signature = {'input_files': ('_iblrig_micData.raw.wav', 'raw_behavior_data', True),
'output_files': ('_iblrig_micData.raw.flac', 'raw_behavior_data', True),
}
def _run(self, overwrite=False):
command = "ffmpeg -i {file_in} -y -nostdin -c:a flac -nostats {file_out}"
file_in = next(self.session_path.rglob("_iblrig_micData.raw.wav"), None)
if file_in is None:
return
file_out = file_in.with_suffix(".flac")
status, output_file = ffmpeg.compress(file_in=file_in, file_out=file_out, command=command)
return [output_file]
class SpikeSorting(tasks.Task):
"""
Pykilosort 2.5 pipeline
"""
gpu = 1
io_charge = 70 # this jobs reads raw ap files
priority = 60
level = 1 # this job doesn't depend on anything
SHELL_SCRIPT = Path.home().joinpath(
"Documents/PYTHON/iblscripts/deploy/serverpc/kilosort2/run_pykilosort.sh"
)
SPIKE_SORTER_NAME = 'pykilosort'
PYKILOSORT_REPO = Path.home().joinpath('Documents/PYTHON/SPIKE_SORTING/pykilosort')
signature = {'input_files': signatures.SPIKESORTING, 'output_files': ()}
@staticmethod
def _sample2v(ap_file):
md = spikeglx.read_meta_data(ap_file.with_suffix(".meta"))
s2v = spikeglx._conversion_sample2v_from_meta(md)
return s2v["ap"][0]
@staticmethod
def _fetch_pykilosort_version(repo_path):
init_file = Path(repo_path).joinpath('pykilosort', '__init__.py')
version = SpikeSorting._fetch_ks2_commit_hash(repo_path) # default
try:
with open(init_file) as fid:
lines = fid.readlines()
for line in lines:
if line.startswith("__version__ = "):
version = line.split('=')[-1].strip().replace('"', '').replace("'", '')
except Exception:
pass
return f"pykilosort_{version}"
@staticmethod
def _fetch_ks2_commit_hash(repo_path):
command2run = f"git --git-dir {repo_path}/.git rev-parse --verify HEAD"
process = subprocess.Popen(
command2run, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
info, error = process.communicate()
if process.returncode != 0:
_logger.error(
f"Can't fetch pykilsort commit hash, will still attempt to run \n"
f"Error: {error.decode('utf-8')}"
)
return ""
return info.decode("utf-8").strip()
def _run_pykilosort(self, ap_file):
"""
Runs the ks2 matlab spike sorting for one probe dataset
the raw spike sorting output is in session_path/spike_sorters/{self.SPIKE_SORTER_NAME}/probeXX folder
(discontinued support for old spike sortings in the probe folder <1.5.5)
:return: path of the folder containing ks2 spike sorting output
"""
self.version = self._fetch_pykilosort_version(self.PYKILOSORT_REPO)
label = ap_file.parts[-2] # this is usually the probe name
sorter_dir = self.session_path.joinpath("spike_sorters", self.SPIKE_SORTER_NAME, label)
FORCE_RERUN = False
if not FORCE_RERUN:
if sorter_dir.joinpath(f"spike_sorting_{self.SPIKE_SORTER_NAME}.log").exists():
_logger.info(f"Already ran: spike_sorting_{self.SPIKE_SORTER_NAME}.log"
f" found in {sorter_dir}, skipping.")
return sorter_dir
print(sorter_dir.joinpath(f"spike_sorting_{self.SPIKE_SORTER_NAME}.log"))
# get the scratch drive from the shell script
with open(self.SHELL_SCRIPT) as fid:
lines = fid.readlines()
line = [line for line in lines if line.startswith("SCRATCH_DRIVE=")][0]
m = re.search(r"\=(.*?)(\#|\n)", line)[0]
scratch_drive = Path(m[1:-1].strip())
assert scratch_drive.exists()
# clean up and create directory, this also checks write permissions
# temp_dir has the following shape: pykilosort/ZM_3003_2020-07-29_001_probe00
# first makes sure the tmp dir is clean
shutil.rmtree(scratch_drive.joinpath(self.SPIKE_SORTER_NAME), ignore_errors=True)
temp_dir = scratch_drive.joinpath(
self.SPIKE_SORTER_NAME, "_".join(list(self.session_path.parts[-3:]) + [label])
)
if temp_dir.exists(): # hmmm this has to be decided, we may want to restart ?
# But failed sessions may then clog the scratch dir and have users run out of space
shutil.rmtree(temp_dir, ignore_errors=True)
temp_dir.mkdir(parents=True, exist_ok=True)
check_nvidia_driver()
command2run = f"{self.SHELL_SCRIPT} {ap_file} {temp_dir}"
_logger.info(command2run)
process = subprocess.Popen(
command2run,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
executable="/bin/bash",
)
info, error = process.communicate()
info_str = info.decode("utf-8").strip()
_logger.info(info_str)
if process.returncode != 0:
error_str = error.decode("utf-8").strip()
# try and get the kilosort log if any
for log_file in temp_dir.rglob('*_kilosort.log'):
with open(log_file) as fid:
log = fid.read()
_logger.error(log)
break
raise RuntimeError(f"{self.SPIKE_SORTER_NAME} {info_str}, {error_str}")
shutil.copytree(temp_dir.joinpath('output'), sorter_dir, dirs_exist_ok=True)
shutil.rmtree(temp_dir, ignore_errors=True)
return sorter_dir
def _run(self, probes=None):
"""
Multiple steps. For each probe:
- Runs ks2 (skips if it already ran)
- synchronize the spike sorting
- output the probe description files
:param probes: (list of str) if provided, will only run spike sorting for specified probe names
:return: list of files to be registered on database
"""
efiles = spikeglx.glob_ephys_files(self.session_path)
ap_files = [(ef.get("ap"), ef.get("label")) for ef in efiles if "ap" in ef.keys()]
out_files = []
for ap_file, label in ap_files:
if isinstance(probes, list) and label not in probes:
continue
try:
ks2_dir = self._run_pykilosort(ap_file) # runs ks2, skips if it already ran
probe_out_path = self.session_path.joinpath("alf", label, self.SPIKE_SORTER_NAME)
shutil.rmtree(probe_out_path, ignore_errors=True)
probe_out_path.mkdir(parents=True, exist_ok=True)
spikes.ks2_to_alf(
ks2_dir,
bin_path=ap_file.parent,
out_path=probe_out_path,
bin_file=ap_file,
ampfactor=self._sample2v(ap_file),
)
logfile = ks2_dir.joinpath(f"spike_sorting_{self.SPIKE_SORTER_NAME}.log")
if logfile.exists():
shutil.copyfile(logfile, probe_out_path.joinpath(
f"_ibl_log.info_{self.SPIKE_SORTER_NAME}.log"))
out, _ = spikes.sync_spike_sorting(ap_file=ap_file, out_path=probe_out_path)
out_files.extend(out)
# convert ks2_output into tar file and also register
# Make this in case spike sorting is in old raw_ephys_data folders, for new
# sessions it should already exist
tar_dir = self.session_path.joinpath(
'spike_sorters', self.SPIKE_SORTER_NAME, label)
tar_dir.mkdir(parents=True, exist_ok=True)
out = spikes.ks2_to_tar(ks2_dir, tar_dir)
out_files.extend(out)
except BaseException:
_logger.error(traceback.format_exc())
self.status = -1
continue
probe_files = spikes.probes_description(self.session_path, one=self.one)
return out_files + probe_files
class EphysVideoCompress(tasks.Task):
priority = 40
level = 1
def _run(self, **kwargs):
# avi to mp4 compression
command = ('ffmpeg -i {file_in} -y -nostdin -codec:v libx264 -preset slow -crf 17 '
'-loglevel 0 -codec:a copy {file_out}')
output_files = ffmpeg.iblrig_video_compression(self.session_path, command)
if len(output_files) == 0:
_logger.info('No compressed videos found; skipping timestamp extraction')
return
labels = [label_from_path(x) for x in output_files]
# Video timestamps extraction
data, files = camera.extract_all(self.session_path, save=True, labels=labels)
output_files.extend(files)
# Video QC
run_camera_qc(self.session_path, update=True, one=self.one, cameras=labels)
return output_files
# level 1
class EphysTrials(tasks.Task):
priority = 90
level = 1
signature = {'input_files': signatures.EPHYSTRIALS, 'output_files': ()}
def _behaviour_criterion(self):
"""
Computes and update the behaviour criterion on Alyx
"""
from brainbox.behavior import training
trials = alfio.load_object(self.session_path.joinpath("alf"), "trials")
good_enough = training.criterion_delay(
n_trials=trials["intervals"].shape[0],
perf_easy=training.compute_performance_easy(trials),
)
eid = self.one.path2eid(self.session_path, query_type='remote')
self.one.alyx.json_field_update(
"sessions", eid, "extended_qc", {"behavior": int(good_enough)}
)
def _run(self):
dsets, out_files = ephys_fpga.extract_all(self.session_path, save=True)
if not self.one or self.one.offline:
return out_files
self._behaviour_criterion()
# Run the task QC
qc = TaskQC(self.session_path, one=self.one, log=_logger)
qc.extractor = TaskQCExtractor(self.session_path, lazy=True, one=qc.one)
# Extract extra datasets required for QC
qc.extractor.data = dsets
qc.extractor.extract_data()
# Aggregate and update Alyx QC fields
qc.run(update=True)
return out_files
class EphysCellsQc(tasks.Task):
priority = 90
level = 3
def _compute_cell_qc(self, folder_probe):
"""
Computes the cell QC given an extracted probe alf path
:param folder_probe: folder
:return:
"""
# compute the straight qc
_logger.info(f"Computing cluster qc for {folder_probe}")
spikes = alfio.load_object(folder_probe, 'spikes')
clusters = alfio.load_object(folder_probe, 'clusters')
df_units, drift = ephysqc.spike_sorting_metrics(
spikes.times, spikes.clusters, spikes.amps, spikes.depths,
cluster_ids=np.arange(clusters.channels.size))
# if the ks2 labels file exist, load them and add the column
file_labels = folder_probe.joinpath('cluster_KSLabel.tsv')
if file_labels.exists():
ks2_labels = pd.read_csv(file_labels, sep='\t')
ks2_labels.rename(columns={'KSLabel': 'ks2_label'}, inplace=True)
df_units = pd.concat(
[df_units, ks2_labels['ks2_label'].reindex(df_units.index)], axis=1)
# save as parquet file
df_units.to_parquet(folder_probe.joinpath("clusters.metrics.pqt"))
return folder_probe.joinpath("clusters.metrics.pqt"), df_units, drift
def _label_probe_qc(self, folder_probe, df_units, drift):
"""
Labels the json field of the alyx corresponding probe insertion
:param folder_probe:
:param df_units:
:param drift:
:return:
"""
eid = self.one.path2eid(self.session_path, query_type='remote')
# the probe name is the first folder after alf: {session_path}/alf/{probe_name}/{spike_sorter_name}
probe_name = Path(folder_probe).relative_to(self.session_path.joinpath('alf')).parts[0]
pdict = self.one.alyx.rest('insertions', 'list', session=eid, name=probe_name, no_cache=True)
if len(pdict) != 1:
_logger.warning(f'No probe found for probe name: {probe_name}')
return
isok = df_units['label'] == 1
qcdict = {'n_units': int(df_units.shape[0]),
'n_units_qc_pass': int(np.sum(isok)),
'firing_rate_max': np.max(df_units['firing_rate'][isok]),
'firing_rate_median': np.median(df_units['firing_rate'][isok]),
'amplitude_max_uV': np.max(df_units['amp_max'][isok]) * 1e6,
'amplitude_median_uV': np.max(df_units['amp_median'][isok]) * 1e6,
'drift_rms_um': rms(drift['drift_um']),
}
file_wm = folder_probe.joinpath('_kilosort_whitening.matrix.npy')
if file_wm.exists():
wm = np.load(file_wm)
qcdict['whitening_matrix_conditioning'] = np.linalg.cond(wm)
# groom qc dict (this function will eventually go directly into the json field update)
for k in qcdict:
if isinstance(qcdict[k], np.int64):
qcdict[k] = int(qcdict[k])
elif isinstance(qcdict[k], float):
qcdict[k] = np.round(qcdict[k], 2)
self.one.alyx.json_field_update("insertions", pdict[0]["id"], "json", qcdict)
def _run(self):
"""
Post spike-sorting quality control at the cluster level.
Outputs a QC table in the clusters ALF object and labels corresponding probes in Alyx
"""
files_spikes = Path(self.session_path).joinpath('alf').rglob('spikes.times.npy')
folder_probes = [f.parent for f in files_spikes]
out_files = []
for folder_probe in folder_probes:
try:
qc_file, df_units, drift = self._compute_cell_qc(folder_probe)
out_files.append(qc_file)
self._label_probe_qc(folder_probe, df_units, drift)
except Exception:
_logger.error(traceback.format_exc())
self.status = -1
continue
return out_files
class EphysMtscomp(tasks.Task):
priority = 50 # ideally after spike sorting
level = 0
def _run(self):
"""
Compress ephys files looking for `compress_ephys.flag` within the probes folder
Original bin file will be removed
The registration flag created contains targeted file names at the root of the session
"""
out_files = []
ephys_files = spikeglx.glob_ephys_files(self.session_path)
ephys_files += spikeglx.glob_ephys_files(self.session_path, ext="ch")
ephys_files += spikeglx.glob_ephys_files(self.session_path, ext="meta")
for ef in ephys_files:
for typ in ["ap", "lf", "nidq"]:
bin_file = ef.get(typ)
if not bin_file:
continue
if bin_file.suffix.find("bin") == 1:
with spikeglx.Reader(bin_file) as sr:
if sr.is_mtscomp:
out_files.append(bin_file)
else:
_logger.info(f"Compressing binary file {bin_file}")
out_files.append(sr.compress_file(keep_original=False))
out_files.append(bin_file.with_suffix('.ch'))
else:
out_files.append(bin_file)
return out_files
class EphysDLC(tasks.Task):
gpu = 1
cpu = 4
io_charge = 90
level = 2
def _run(self):
"""empty placeholder for job creation only"""
pass
class EphysPassive(tasks.Task):
cpu = 1
io_charge = 90
level = 1
signature = {'input_files': signatures.EPHYSPASSIVE, 'output_files': ()}
def _run(self):
"""returns a list of pathlib.Paths. """
data, paths = ephys_passive.PassiveChoiceWorld(self.session_path).extract(save=True)
if any([x is None for x in paths]):
self.status = -1
# Register?
return paths
class EphysExtractionPipeline(tasks.Pipeline):
label = __name__
def __init__(self, session_path=None, **kwargs):
super(EphysExtractionPipeline, self).__init__(session_path, **kwargs)
tasks = OrderedDict()
self.session_path = session_path
# level 0
tasks["EphysRegisterRaw"] = EphysRegisterRaw(self.session_path)
tasks["EphysPulses"] = EphysPulses(self.session_path)
tasks["EphysRawQC"] = RawEphysQC(self.session_path)
tasks["EphysAudio"] = EphysAudio(self.session_path)
tasks["EphysMtscomp"] = EphysMtscomp(self.session_path)
# level 1
tasks["SpikeSorting"] = SpikeSorting(
self.session_path, parents=[tasks["EphysMtscomp"], tasks["EphysPulses"]])
tasks["EphysVideoCompress"] = EphysVideoCompress(
self.session_path, parents=[tasks["EphysPulses"]])
tasks["EphysTrials"] = EphysTrials(self.session_path, parents=[tasks["EphysPulses"]])
tasks["EphysPassive"] = EphysPassive(self.session_path, parents=[tasks["EphysPulses"]])
# level 2
tasks["EphysCellsQc"] = EphysCellsQc(self.session_path, parents=[tasks["SpikeSorting"]])
tasks["EphysDLC"] = EphysDLC(self.session_path, parents=[tasks["EphysVideoCompress"]])
self.tasks = tasks
| [
"numpy.load",
"numpy.sum",
"ibllib.io.extractors.camera.extract_all",
"pathlib.Path.home",
"pandas.read_csv",
"one.alf.io.load_object",
"ibllib.qc.task_extractors.TaskQCExtractor",
"ibllib.io.spikeglx.glob_ephys_files",
"numpy.linalg.cond",
"pathlib.Path",
"numpy.arange",
"ibllib.misc.check_nv... | [((864, 891), 'logging.getLogger', 'logging.getLogger', (['"""ibllib"""'], {}), "('ibllib')\n", (881, 891), False, 'import logging\n'), ((1354, 1417), 'ibllib.io.extractors.ephys_fpga.extract_sync', 'ephys_fpga.extract_sync', (['self.session_path'], {'overwrite': 'overwrite'}), '(self.session_path, overwrite=overwrite)\n', (1377, 1417), False, 'from ibllib.io.extractors import ephys_fpga, ephys_passive, camera\n'), ((1544, 1579), 'ibllib.ephys.sync_probes.sync', 'sync_probes.sync', (['self.session_path'], {}), '(self.session_path)\n', (1560, 1579), False, 'from ibllib.ephys import ephysqc, spikes, sync_probes\n'), ((3568, 3636), 'ibllib.io.ffmpeg.compress', 'ffmpeg.compress', ([], {'file_in': 'file_in', 'file_out': 'file_out', 'command': 'command'}), '(file_in=file_in, file_out=file_out, command=command)\n', (3583, 3636), False, 'from ibllib.io import ffmpeg, spikeglx\n'), ((4337, 4380), 'ibllib.io.spikeglx._conversion_sample2v_from_meta', 'spikeglx._conversion_sample2v_from_meta', (['md'], {}), '(md)\n', (4376, 4380), False, 'from ibllib.io import ffmpeg, spikeglx\n'), ((5149, 5243), 'subprocess.Popen', 'subprocess.Popen', (['command2run'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), '(command2run, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n', (5165, 5243), False, 'import subprocess\n'), ((7726, 7747), 'ibllib.misc.check_nvidia_driver', 'check_nvidia_driver', ([], {}), '()\n', (7745, 7747), False, 'from ibllib.misc import check_nvidia_driver\n'), ((7866, 7984), 'subprocess.Popen', 'subprocess.Popen', (['command2run'], {'shell': '(True)', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'executable': '"""/bin/bash"""'}), "(command2run, shell=True, stdout=subprocess.PIPE, stderr=\n subprocess.PIPE, executable='/bin/bash')\n", (7882, 7984), False, 'import subprocess\n'), ((8696, 8739), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {'ignore_errors': '(True)'}), '(temp_dir, ignore_errors=True)\n', (8709, 8739), False, 'import shutil\n'), ((9175, 9219), 'ibllib.io.spikeglx.glob_ephys_files', 'spikeglx.glob_ephys_files', (['self.session_path'], {}), '(self.session_path)\n', (9200, 9219), False, 'from ibllib.io import ffmpeg, spikeglx\n'), ((11126, 11184), 'ibllib.ephys.spikes.probes_description', 'spikes.probes_description', (['self.session_path'], {'one': 'self.one'}), '(self.session_path, one=self.one)\n', (11151, 11184), False, 'from ibllib.ephys import ephysqc, spikes, sync_probes\n'), ((11534, 11593), 'ibllib.io.ffmpeg.iblrig_video_compression', 'ffmpeg.iblrig_video_compression', (['self.session_path', 'command'], {}), '(self.session_path, command)\n', (11565, 11593), False, 'from ibllib.io import ffmpeg, spikeglx\n'), ((11856, 11919), 'ibllib.io.extractors.camera.extract_all', 'camera.extract_all', (['self.session_path'], {'save': '(True)', 'labels': 'labels'}), '(self.session_path, save=True, labels=labels)\n', (11874, 11919), False, 'from ibllib.io.extractors import ephys_fpga, ephys_passive, camera\n'), ((11983, 12058), 'ibllib.qc.camera.run_all_qc', 'run_camera_qc', (['self.session_path'], {'update': '(True)', 'one': 'self.one', 'cameras': 'labels'}), '(self.session_path, update=True, one=self.one, cameras=labels)\n', (11996, 12058), True, 'from ibllib.qc.camera import run_all_qc as run_camera_qc\n'), ((12909, 12961), 'ibllib.io.extractors.ephys_fpga.extract_all', 'ephys_fpga.extract_all', (['self.session_path'], {'save': '(True)'}), '(self.session_path, save=True)\n', (12931, 12961), False, 'from ibllib.io.extractors import ephys_fpga, ephys_passive, camera\n'), ((13113, 13165), 'ibllib.qc.task_metrics.TaskQC', 'TaskQC', (['self.session_path'], {'one': 'self.one', 'log': '_logger'}), '(self.session_path, one=self.one, log=_logger)\n', (13119, 13165), False, 'from ibllib.qc.task_metrics import TaskQC\n'), ((13189, 13246), 'ibllib.qc.task_extractors.TaskQCExtractor', 'TaskQCExtractor', (['self.session_path'], {'lazy': '(True)', 'one': 'qc.one'}), '(self.session_path, lazy=True, one=qc.one)\n', (13204, 13246), False, 'from ibllib.qc.task_extractors import TaskQCExtractor\n'), ((13834, 13875), 'one.alf.io.load_object', 'alfio.load_object', (['folder_probe', '"""spikes"""'], {}), "(folder_probe, 'spikes')\n", (13851, 13875), True, 'import one.alf.io as alfio\n'), ((13895, 13938), 'one.alf.io.load_object', 'alfio.load_object', (['folder_probe', '"""clusters"""'], {}), "(folder_probe, 'clusters')\n", (13912, 13938), True, 'import one.alf.io as alfio\n'), ((17841, 17885), 'ibllib.io.spikeglx.glob_ephys_files', 'spikeglx.glob_ephys_files', (['self.session_path'], {}), '(self.session_path)\n', (17866, 17885), False, 'from ibllib.io import ffmpeg, spikeglx\n'), ((17909, 17963), 'ibllib.io.spikeglx.glob_ephys_files', 'spikeglx.glob_ephys_files', (['self.session_path'], {'ext': '"""ch"""'}), "(self.session_path, ext='ch')\n", (17934, 17963), False, 'from ibllib.io import ffmpeg, spikeglx\n'), ((17987, 18043), 'ibllib.io.spikeglx.glob_ephys_files', 'spikeglx.glob_ephys_files', (['self.session_path'], {'ext': '"""meta"""'}), "(self.session_path, ext='meta')\n", (18012, 18043), False, 'from ibllib.io import ffmpeg, spikeglx\n'), ((19618, 19631), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (19629, 19631), False, 'from collections import OrderedDict\n'), ((19727, 19762), 'ibllib.pipes.training_preprocessing.TrainingRegisterRaw', 'EphysRegisterRaw', (['self.session_path'], {}), '(self.session_path)\n', (19743, 19762), True, 'from ibllib.pipes.training_preprocessing import TrainingRegisterRaw as EphysRegisterRaw\n'), ((3897, 3908), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (3906, 3908), False, 'from pathlib import Path\n'), ((4066, 4077), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (4075, 4077), False, 'from pathlib import Path\n'), ((6859, 6895), 're.search', 're.search', (['"""\\\\=(.*?)(\\\\#|\\\\n)"""', 'line'], {}), "('\\\\=(.*?)(\\\\#|\\\\n)', line)\n", (6868, 6895), False, 'import re\n'), ((7621, 7664), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {'ignore_errors': '(True)'}), '(temp_dir, ignore_errors=True)\n', (7634, 7664), False, 'import shutil\n'), ((11754, 11772), 'ibllib.io.video.label_from_path', 'label_from_path', (['x'], {}), '(x)\n', (11769, 11772), False, 'from ibllib.io.video import label_from_path\n'), ((14320, 14354), 'pandas.read_csv', 'pd.read_csv', (['file_labels'], {'sep': '"""\t"""'}), "(file_labels, sep='\\t')\n", (14331, 14354), True, 'import pandas as pd\n'), ((15673, 15710), 'numpy.max', 'np.max', (["df_units['firing_rate'][isok]"], {}), "(df_units['firing_rate'][isok])\n", (15679, 15710), True, 'import numpy as np\n'), ((15752, 15792), 'numpy.median', 'np.median', (["df_units['firing_rate'][isok]"], {}), "(df_units['firing_rate'][isok])\n", (15761, 15792), True, 'import numpy as np\n'), ((15992, 16014), 'ibllib.dsp.rms', 'rms', (["drift['drift_um']"], {}), "(drift['drift_um'])\n", (15995, 16014), False, 'from ibllib.dsp import rms\n'), ((16156, 16172), 'numpy.load', 'np.load', (['file_wm'], {}), '(file_wm)\n', (16163, 16172), True, 'import numpy as np\n'), ((16227, 16245), 'numpy.linalg.cond', 'np.linalg.cond', (['wm'], {}), '(wm)\n', (16241, 16245), True, 'import numpy as np\n'), ((2555, 2621), 'ibllib.ephys.ephysqc.EphysQC', 'ephysqc.EphysQC', (['pid'], {'session_path': 'self.session_path', 'one': 'self.one'}), '(pid, session_path=self.session_path, one=self.one)\n', (2570, 2621), False, 'from ibllib.ephys import ephysqc, spikes, sync_probes\n'), ((4494, 4509), 'pathlib.Path', 'Path', (['repo_path'], {}), '(repo_path)\n', (4498, 4509), False, 'from pathlib import Path\n'), ((9688, 9737), 'shutil.rmtree', 'shutil.rmtree', (['probe_out_path'], {'ignore_errors': '(True)'}), '(probe_out_path, ignore_errors=True)\n', (9701, 9737), False, 'import shutil\n'), ((10363, 10430), 'ibllib.ephys.spikes.sync_spike_sorting', 'spikes.sync_spike_sorting', ([], {'ap_file': 'ap_file', 'out_path': 'probe_out_path'}), '(ap_file=ap_file, out_path=probe_out_path)\n', (10388, 10430), False, 'from ibllib.ephys import ephysqc, spikes, sync_probes\n'), ((10884, 10919), 'ibllib.ephys.spikes.ks2_to_tar', 'spikes.ks2_to_tar', (['ks2_dir', 'tar_dir'], {}), '(ks2_dir, tar_dir)\n', (10901, 10919), False, 'from ibllib.ephys import ephysqc, spikes, sync_probes\n'), ((12610, 12651), 'brainbox.behavior.training.compute_performance_easy', 'training.compute_performance_easy', (['trials'], {}), '(trials)\n', (12643, 12651), False, 'from brainbox.behavior import training\n'), ((14091, 14124), 'numpy.arange', 'np.arange', (['clusters.channels.size'], {}), '(clusters.channels.size)\n', (14100, 14124), True, 'import numpy as np\n'), ((15621, 15633), 'numpy.sum', 'np.sum', (['isok'], {}), '(isok)\n', (15627, 15633), True, 'import numpy as np\n'), ((15832, 15865), 'numpy.max', 'np.max', (["df_units['amp_max'][isok]"], {}), "(df_units['amp_max'][isok])\n", (15838, 15865), True, 'import numpy as np\n'), ((15914, 15950), 'numpy.max', 'np.max', (["df_units['amp_median'][isok]"], {}), "(df_units['amp_median'][isok])\n", (15920, 15950), True, 'import numpy as np\n'), ((19215, 19266), 'ibllib.io.extractors.ephys_passive.PassiveChoiceWorld', 'ephys_passive.PassiveChoiceWorld', (['self.session_path'], {}), '(self.session_path)\n', (19247, 19266), False, 'from ibllib.io.extractors import ephys_fpga, ephys_passive, camera\n'), ((2406, 2467), 'ibllib.pipes.misc.create_alyx_probe_insertions', 'create_alyx_probe_insertions', (['self.session_path'], {'one': 'self.one'}), '(self.session_path, one=self.one)\n', (2434, 2467), False, 'from ibllib.pipes.misc import create_alyx_probe_insertions\n'), ((16532, 16554), 'numpy.round', 'np.round', (['qcdict[k]', '(2)'], {}), '(qcdict[k], 2)\n', (16540, 16554), True, 'import numpy as np\n'), ((11022, 11044), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (11042, 11044), False, 'import traceback\n'), ((15189, 15207), 'pathlib.Path', 'Path', (['folder_probe'], {}), '(folder_probe)\n', (15193, 15207), False, 'from pathlib import Path\n'), ((16868, 16891), 'pathlib.Path', 'Path', (['self.session_path'], {}), '(self.session_path)\n', (16872, 16891), False, 'from pathlib import Path\n'), ((17323, 17345), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (17343, 17345), False, 'import traceback\n'), ((18300, 18325), 'ibllib.io.spikeglx.Reader', 'spikeglx.Reader', (['bin_file'], {}), '(bin_file)\n', (18315, 18325), False, 'from ibllib.io import ffmpeg, spikeglx\n')] |
"""
Module wraps some legacy code to construct a series of vtu files with 2D
CFD data on unstructured mesh from structured mesh in numpy format.
Code is not very general and likely only works for exact flow past cylinder
dataset used in this project. Note this code is meant to be a wrapper for
legacy code that is intended to not be used used very often or in a
critical/production setting. Therefore sustainability may be lacking.
"""
import vtktools
import numpy as np
from utils import get_grid_end_points
import os
import sys
if sys.version_info[0] < 3:
import u2r # noqa
else:
import u2rpy3 # noqa
u2r = u2rpy3
__author__ = " <NAME>, <NAME>"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def get_clean_vtk_file(filename):
"""
Removes fields and arrays from a vtk file,
leaving the coordinates/connectivity information.
"""
vtu_data = vtktools.vtu(filename)
clean_vtu = vtktools.vtu()
clean_vtu.ugrid.DeepCopy(vtu_data.ugrid)
fieldNames = clean_vtu.GetFieldNames()
# remove all fields and arrays from this vtu
for field in fieldNames:
clean_vtu.RemoveField(field)
fieldNames = clean_vtu.GetFieldNames()
vtkdata = clean_vtu.ugrid.GetCellData()
arrayNames = [
vtkdata.GetArrayName(i) for i in range(vtkdata.GetNumberOfArrays())
]
for array in arrayNames:
vtkdata.RemoveArray(array)
return clean_vtu
def create_vtu_file(
path, nNodes, value_mesh_twice_interp, filename, orig_vel, iTime, nDim=2
):
velocity_field = np.zeros((nNodes, 3))
velocity_field[:, 0:nDim] = np.transpose(
value_mesh_twice_interp[0:nDim, :]
)
# streamwise component only
difference = np.zeros((nNodes, 3))
difference[:, 0:nDim] = (
np.transpose(value_mesh_twice_interp[0:nDim, :]) - orig_vel
)
# streamwise component only
difference = difference / np.max(velocity_field)
clean_vtk = get_clean_vtk_file(filename)
new_vtu = vtktools.vtu()
new_vtu.ugrid.DeepCopy(clean_vtk.ugrid)
new_vtu.filename = path + "recon_" + str(iTime) + ".vtu"
new_vtu.AddField("Velocity", velocity_field)
new_vtu.AddField("Original", orig_vel)
new_vtu.AddField("Velocity_diff", difference)
new_vtu.Write()
return
def reconstruct(
snapshot_data_location="./../../data/FPC_Re3900_2D_CG_new/",
snapshot_file_base="fpc_",
reconstructed_file="reconstruction_test.npy", # POD coefficients
nGrids=4,
xlength=2.2,
ylength=0.41,
nTime=300,
field_names=["Velocity"],
offset=0
):
"""
Requires data in format (ngrids, nscalar, nx, ny, ntime)
Args:
snapshot_data_location (str, optional): location of sample vtu file.
Defaults to
`./../../data/FPC_Re3900_2D_CG_new/`.
snapshot_file_base (str, optional): file base of sample vtu file.
Defaults to `fpc_`.
reconstructed_file (str, optional): reconstruction data file. Defaults
to `reconstruction_test.npy`.
xlength (float, optional): length in x direction. Defaults to 2.2.
ylength (float, optional): length in y direction. Defaults to 0.41.
nTime (int, optional): number of timesteps. Defaults to 300.
field_names (list, optional): names of fields in vtu file. Defaults to
["Velocity"].
offset (int, optional): starting timestep. Defaults to 0.
"""
nFields = len(field_names)
# get a vtu file (any will do as the mesh is not adapted)
filename = snapshot_data_location + snapshot_file_base + "0.vtu"
representative_vtu = vtktools.vtu(filename)
coordinates = representative_vtu.GetLocations()
nNodes = coordinates.shape[0] # vtu_data.ugrid.GetNumberOfPoints()
nEl = representative_vtu.ugrid.GetNumberOfCells()
nScalar = 2 # dimension of fields
nDim = 2 # dimension of problem (no need to interpolate in dim no 3)
nloc = 3 # number of local nodes, ie three nodes per element (in 2D)
# get global node numbers
x_ndgln = np.zeros((nEl * nloc), dtype=int)
for iEl in range(nEl):
n = representative_vtu.GetCellPoints(iEl) + 1
x_ndgln[iEl * nloc: (iEl + 1) * nloc] = n
# set grid size
if nGrids == 4:
nx = 55
ny = 42
nz = 1 # nz = 1 for 2D problems
elif nGrids == 1:
nx = 221
ny = 42
nz = 1 # nz = 1 for 2D problems
else:
print("nx, ny, nz not known for ", nGrids, "grids")
x_all = np.transpose(coordinates[:, 0:nDim])
ddx = np.array((xlength / (nGrids * (nx - 1)), ylength / (ny - 1)))
grid_origin = [0.0, 0.0]
grid_width = [xlength / nGrids, 0.0]
# -------------------------------------------------------------------------------------------------
# find node duplications when superposing results
my_field = representative_vtu.GetField(field_names[0])[:, 0]
my_field = 1
nScalar_test = 1
# for one timestep
# for one field
value_mesh = np.zeros((nScalar_test, nNodes, 1)) # nTime=1
value_mesh[:, :, 0] = np.transpose(my_field)
superposed_grids = np.zeros((nNodes))
for iGrid in range(nGrids):
block_x_start = get_grid_end_points(grid_origin, grid_width, iGrid)
zeros_on_mesh = 0
value_grid = u2r.simple_interpolate_from_mesh_to_grid(
value_mesh,
x_all,
x_ndgln,
ddx,
block_x_start,
nx,
ny,
nz,
zeros_on_mesh,
nEl,
nloc,
nNodes,
nScalar_test,
nDim,
1,
)
zeros_on_grid = 1
value_back_on_mesh = u2r.interpolate_from_grid_to_mesh(
value_grid,
block_x_start,
ddx,
x_all,
zeros_on_grid,
nScalar_test,
nx,
ny,
nz,
nNodes,
nDim,
1,
)
superposed_grids = superposed_grids + np.rint(
np.squeeze(value_back_on_mesh)
)
superposed_grids = np.array(superposed_grids, dtype="int")
duplicated_nodal_values = []
for iNode in range(nNodes):
if superposed_grids[iNode] == 0:
# this is bad news - the node hasn't appeared in any grid
print("zero:", iNode)
elif superposed_grids[iNode] == 2:
print("two:", iNode)
# the node appears in two grids - deal with this later
duplicated_nodal_values.append(iNode)
elif superposed_grids[iNode] != 1:
# most of the nodes will appear in one grid
print("unknown:", iNode, superposed_grids[iNode])
reconstruction_on_mesh = np.zeros((nScalar * nTime, nNodes))
reconstructed = np.load(reconstructed_file)
for iGrid in range(nGrids):
reconstruction_grid = reconstructed[iGrid, :, :, :, :]
# reconstruction_grid here has the shape of (nScalar, nx, ny, nTime)
block_x_start = get_grid_end_points(grid_origin, grid_width, iGrid)
for iTime in range(nTime):
zeros_beyond_grid = 1 # 0 extrapolate solution; 1 gives zeros
reconstruction_on_mesh_from_one_grid = (
u2r.interpolate_from_grid_to_mesh(
reconstruction_grid[:, :, :, iTime],
block_x_start,
ddx,
x_all,
zeros_beyond_grid,
nScalar,
nx,
ny,
nz,
nNodes,
nDim,
1,
)
)
reconstruction_on_mesh[
nScalar * iTime: nScalar * (iTime + 1), :
] = reconstruction_on_mesh[
nScalar * iTime: nScalar * (iTime + 1), :
] + np.squeeze(
reconstruction_on_mesh_from_one_grid
)
reconstruction_on_mesh[:, duplicated_nodal_values] = (
0.5 * reconstruction_on_mesh[:, duplicated_nodal_values]
)
# for ifield in range(nFields):
# nDoF = nNodes # could be different value per field
# original_data.append(np.zeros((nNodes, nDim*nTime)))
original = np.zeros((nNodes, nDim * nTime))
for iTime in range(nTime):
filename = (
snapshot_data_location
+ snapshot_file_base
+ str(offset + iTime)
+ ".vtu"
)
vtu_data = vtktools.vtu(filename)
for iField in range(nFields):
my_field = vtu_data.GetField(field_names[iField])[:, 0:nDim]
original[:, iTime * nDim: (iTime + 1) * nDim] = my_field
# make diretory for results
path_to_reconstructed_results = "reconstructed_results/"
if not os.path.isdir(path_to_reconstructed_results):
os.mkdir(path_to_reconstructed_results)
template_vtu = snapshot_data_location + snapshot_file_base + "0.vtu"
for iTime in range(nTime):
create_vtu_file(
path_to_reconstructed_results,
nNodes,
reconstruction_on_mesh[iTime * nScalar: (iTime + 1) * nScalar, :],
template_vtu,
original[:, iTime * nDim: (iTime + 1) * nDim],
iTime,
)
if __name__ == "__main__":
reconstruct()
| [
"os.mkdir",
"numpy.load",
"utils.get_grid_end_points",
"u2r.interpolate_from_grid_to_mesh",
"os.path.isdir",
"numpy.transpose",
"numpy.zeros",
"vtktools.vtu",
"numpy.max",
"numpy.array",
"numpy.squeeze",
"u2r.simple_interpolate_from_mesh_to_grid"
] | [((974, 996), 'vtktools.vtu', 'vtktools.vtu', (['filename'], {}), '(filename)\n', (986, 996), False, 'import vtktools\n'), ((1013, 1027), 'vtktools.vtu', 'vtktools.vtu', ([], {}), '()\n', (1025, 1027), False, 'import vtktools\n'), ((1648, 1669), 'numpy.zeros', 'np.zeros', (['(nNodes, 3)'], {}), '((nNodes, 3))\n', (1656, 1669), True, 'import numpy as np\n'), ((1702, 1750), 'numpy.transpose', 'np.transpose', (['value_mesh_twice_interp[0:nDim, :]'], {}), '(value_mesh_twice_interp[0:nDim, :])\n', (1714, 1750), True, 'import numpy as np\n'), ((1815, 1836), 'numpy.zeros', 'np.zeros', (['(nNodes, 3)'], {}), '((nNodes, 3))\n', (1823, 1836), True, 'import numpy as np\n'), ((2087, 2101), 'vtktools.vtu', 'vtktools.vtu', ([], {}), '()\n', (2099, 2101), False, 'import vtktools\n'), ((3883, 3905), 'vtktools.vtu', 'vtktools.vtu', (['filename'], {}), '(filename)\n', (3895, 3905), False, 'import vtktools\n'), ((4317, 4348), 'numpy.zeros', 'np.zeros', (['(nEl * nloc)'], {'dtype': 'int'}), '(nEl * nloc, dtype=int)\n', (4325, 4348), True, 'import numpy as np\n'), ((4775, 4811), 'numpy.transpose', 'np.transpose', (['coordinates[:, 0:nDim]'], {}), '(coordinates[:, 0:nDim])\n', (4787, 4811), True, 'import numpy as np\n'), ((4823, 4884), 'numpy.array', 'np.array', (['(xlength / (nGrids * (nx - 1)), ylength / (ny - 1))'], {}), '((xlength / (nGrids * (nx - 1)), ylength / (ny - 1)))\n', (4831, 4884), True, 'import numpy as np\n'), ((5278, 5313), 'numpy.zeros', 'np.zeros', (['(nScalar_test, nNodes, 1)'], {}), '((nScalar_test, nNodes, 1))\n', (5286, 5313), True, 'import numpy as np\n'), ((5351, 5373), 'numpy.transpose', 'np.transpose', (['my_field'], {}), '(my_field)\n', (5363, 5373), True, 'import numpy as np\n'), ((5397, 5413), 'numpy.zeros', 'np.zeros', (['nNodes'], {}), '(nNodes)\n', (5405, 5413), True, 'import numpy as np\n'), ((6396, 6435), 'numpy.array', 'np.array', (['superposed_grids'], {'dtype': '"""int"""'}), "(superposed_grids, dtype='int')\n", (6404, 6435), True, 'import numpy as np\n'), ((7030, 7065), 'numpy.zeros', 'np.zeros', (['(nScalar * nTime, nNodes)'], {}), '((nScalar * nTime, nNodes))\n', (7038, 7065), True, 'import numpy as np\n'), ((7087, 7114), 'numpy.load', 'np.load', (['reconstructed_file'], {}), '(reconstructed_file)\n', (7094, 7114), True, 'import numpy as np\n'), ((8568, 8600), 'numpy.zeros', 'np.zeros', (['(nNodes, nDim * nTime)'], {}), '((nNodes, nDim * nTime))\n', (8576, 8600), True, 'import numpy as np\n'), ((1875, 1923), 'numpy.transpose', 'np.transpose', (['value_mesh_twice_interp[0:nDim, :]'], {}), '(value_mesh_twice_interp[0:nDim, :])\n', (1887, 1923), True, 'import numpy as np\n'), ((2004, 2026), 'numpy.max', 'np.max', (['velocity_field'], {}), '(velocity_field)\n', (2010, 2026), True, 'import numpy as np\n'), ((5472, 5523), 'utils.get_grid_end_points', 'get_grid_end_points', (['grid_origin', 'grid_width', 'iGrid'], {}), '(grid_origin, grid_width, iGrid)\n', (5491, 5523), False, 'from utils import get_grid_end_points\n'), ((5572, 5737), 'u2r.simple_interpolate_from_mesh_to_grid', 'u2r.simple_interpolate_from_mesh_to_grid', (['value_mesh', 'x_all', 'x_ndgln', 'ddx', 'block_x_start', 'nx', 'ny', 'nz', 'zeros_on_mesh', 'nEl', 'nloc', 'nNodes', 'nScalar_test', 'nDim', '(1)'], {}), '(value_mesh, x_all, x_ndgln, ddx,\n block_x_start, nx, ny, nz, zeros_on_mesh, nEl, nloc, nNodes,\n nScalar_test, nDim, 1)\n', (5612, 5737), False, 'import u2r\n'), ((5977, 6111), 'u2r.interpolate_from_grid_to_mesh', 'u2r.interpolate_from_grid_to_mesh', (['value_grid', 'block_x_start', 'ddx', 'x_all', 'zeros_on_grid', 'nScalar_test', 'nx', 'ny', 'nz', 'nNodes', 'nDim', '(1)'], {}), '(value_grid, block_x_start, ddx, x_all,\n zeros_on_grid, nScalar_test, nx, ny, nz, nNodes, nDim, 1)\n', (6010, 6111), False, 'import u2r\n'), ((7314, 7365), 'utils.get_grid_end_points', 'get_grid_end_points', (['grid_origin', 'grid_width', 'iGrid'], {}), '(grid_origin, grid_width, iGrid)\n', (7333, 7365), False, 'from utils import get_grid_end_points\n'), ((8805, 8827), 'vtktools.vtu', 'vtktools.vtu', (['filename'], {}), '(filename)\n', (8817, 8827), False, 'import vtktools\n'), ((9114, 9158), 'os.path.isdir', 'os.path.isdir', (['path_to_reconstructed_results'], {}), '(path_to_reconstructed_results)\n', (9127, 9158), False, 'import os\n'), ((9168, 9207), 'os.mkdir', 'os.mkdir', (['path_to_reconstructed_results'], {}), '(path_to_reconstructed_results)\n', (9176, 9207), False, 'import os\n'), ((7547, 7709), 'u2r.interpolate_from_grid_to_mesh', 'u2r.interpolate_from_grid_to_mesh', (['reconstruction_grid[:, :, :, iTime]', 'block_x_start', 'ddx', 'x_all', 'zeros_beyond_grid', 'nScalar', 'nx', 'ny', 'nz', 'nNodes', 'nDim', '(1)'], {}), '(reconstruction_grid[:, :, :, iTime],\n block_x_start, ddx, x_all, zeros_beyond_grid, nScalar, nx, ny, nz,\n nNodes, nDim, 1)\n', (7580, 7709), False, 'import u2r\n'), ((6331, 6361), 'numpy.squeeze', 'np.squeeze', (['value_back_on_mesh'], {}), '(value_back_on_mesh)\n', (6341, 6361), True, 'import numpy as np\n'), ((8184, 8232), 'numpy.squeeze', 'np.squeeze', (['reconstruction_on_mesh_from_one_grid'], {}), '(reconstruction_on_mesh_from_one_grid)\n', (8194, 8232), True, 'import numpy as np\n')] |
"""
Tests for IRSA implementation. Since it is hard to find a real benchmark, the tests are basically
just visual inspections, reproducing some plots from the original IRSA article.
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import unittest
import os
import itertools
import json
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
import numpy as np
import src.irsa as irsa
class IrsaTestFixed(unittest.TestCase):
COLORS = itertools.cycle(["r", "b", "g", "m", "k"])
MARKERS = itertools.cycle(["s", "o", "^", "v", "<"])
@classmethod
def setUpClass(cls):
# directory to store the test plots
try:
os.mkdir("../tests")
except FileExistsError:
# do nothing all good
pass
plt.style.use('classic')
def test_varying_frame_size_fixed(self):
"""
We use visual benchmark by plotting the values...
If more reliable benchmark values available, use assertAlmostEqual
:return:
"""
params = {"save_to": "",
"sim_duration": 100,
"max_iter": 20,
"traffic_type": "bernoulli",
"degree_distr": [0, 0, 0.5, 0.28, 0, 0, 0, 0, 0.22]}
load_range = [0.1 * x for x in range(1, 11)]
plt.figure()
# benchmark values taken from the IRSA paper (just visual reading from the plot)
values = {50: [0.1, 0.2, 0.3, 0.4, 0.5, 0.59, 0.65, 0.6, 0.37, 0.19],
200: [0.1, 0.2, 0.3, 0.4, 0.5, 0.59, 0.69, 0.76, 0.47, 0.19],
1000: [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.79, 0.57, 0.19]}
# determined arbitrary
tolerance = 0.01
results_to_store = {}
for m in [50, 200, 1000]:
color = next(IrsaTestFixed.COLORS)
marker = next(IrsaTestFixed.MARKERS)
thr = []
thr_c = []
params["num_resources"] = m
for load_idx, load in enumerate(load_range):
params["num_ues"] = int(m*load)
params["act_prob"] = 1
res = irsa.irsa_run(**params)
t = np.mean(res.throughput_normalized)
tc = irsa.mean_confidence_interval(res.throughput_normalized)
thr.append(t)
thr_c.append(tc)
# FIXME it will be certainly valuable to check whether confidence interval is not too high
self.assertAlmostEqual(values[m][load_idx], t, delta=tc+tolerance)
results_to_store[str(m)] = thr
results_to_store[str(m) + "_c"] = thr_c
plt.errorbar(load_range, thr, linestyle="--", color=color, yerr=thr_c, label=r"$m=%d$" % m)
plt.plot(load_range, values[m], linestyle="", color=color, markeredgecolor=color,
marker=marker, label=r"IRSA, $m=%d$" % m, markerfacecolor="None")
with open("../tests/varying_frame_size.json", "w") as f:
json.dump(results_to_store, f)
plt.ylabel("Normalized throughput")
plt.xlabel("Offered Load")
plt.legend(loc=0)
plt.grid(True)
plt.savefig("../tests/varying_frame_size_fixed.pdf")
def test_packet_loss_fixed(self):
"""
We use visual benchmark by plotting the values...
If more reliable benchmark values available, use assertAlmostEqual
:return:
"""
params = {"save_to": "",
"sim_duration": 100000, # long simulations needed to capture packet loss
"num_resources": 200,
"traffic_type": "bernoulli",
"max_iter": 20}
load_range = [0.1 * x for x in range(1, 11)]
plt.figure()
degree_distrs = [[0, 1], # slotted aloha
[0, 0, 1], # 2-regular CRDSA
[0, 0, 0, 0, 1], # 4-regular CRDSA
[0, 0, 0.5, 0.28, 0, 0, 0, 0, 0.22],
[0, 0, 0.25, 0.6, 0, 0, 0, 0, 0.15]]
degree_distr_labels = ["s-aloha",
"2-CRDSA",
"4-CRDSA",
r"$\Lambda_3$",
r"$\Lambda_4$"]
results_to_store = {}
for label, degree_distr in zip(degree_distr_labels, degree_distrs):
color=next(IrsaTestFixed.COLORS)
marker=next(IrsaTestFixed.MARKERS)
params["degree_distr"] = degree_distr
pktl = []
for load_idx, load in enumerate(load_range):
params["num_ues"] = int(params["num_resources"]*load)
params["act_prob"] = 1
res = irsa.irsa_run(**params)
mean_pktl = np.mean(res.packet_loss)
pktl.append(mean_pktl)
# FIXME it will be certainly valuable to check whether confidence interval is not too high
# self.assertAlmostEqual(values[m][load_idx], t, delta=tc)
results_to_store[label] = pktl
plt.plot(load_range, pktl, "-"+color+marker, markeredgecolor=color,
markerfacecolor="None", label=label)
with open("../tests/pkt_loss.json", "w") as f:
json.dump(results_to_store, f)
plt.ylabel("Packet loss")
plt.xlabel("Offered Load")
plt.yscale("log")
plt.ylim((1e-4, 1e0))
plt.legend(loc=0)
plt.grid(True)
plt.savefig("../tests/packet_loss_fixed.pdf")
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(IrsaTestFixed))
runner = unittest.TextTestRunner()
runner.run(suite)
# unittest.main()
| [
"os.mkdir",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.figure",
"numpy.mean",
"itertools.cycle",
"src.irsa.mean_confidence_interval",
"unittest.makeSuite",
"matplotlib.pyplot.errorbar",
"json.dump",
"unittest.TestSuite",
"matplotlib.pyplot.ylim",
"matplotli... | [((305, 328), 'matplotlib.use', 'matplotlib.use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (319, 328), False, 'import matplotlib\n'), ((460, 502), 'itertools.cycle', 'itertools.cycle', (["['r', 'b', 'g', 'm', 'k']"], {}), "(['r', 'b', 'g', 'm', 'k'])\n", (475, 502), False, 'import itertools\n'), ((517, 559), 'itertools.cycle', 'itertools.cycle', (["['s', 'o', '^', 'v', '<']"], {}), "(['s', 'o', '^', 'v', '<'])\n", (532, 559), False, 'import itertools\n'), ((5587, 5607), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (5605, 5607), False, 'import unittest\n'), ((5676, 5701), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (5699, 5701), False, 'import unittest\n'), ((784, 808), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""classic"""'], {}), "('classic')\n", (797, 808), True, 'import matplotlib.pyplot as plt\n'), ((1317, 1329), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1327, 1329), True, 'import matplotlib.pyplot as plt\n'), ((3040, 3075), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Normalized throughput"""'], {}), "('Normalized throughput')\n", (3050, 3075), True, 'import matplotlib.pyplot as plt\n'), ((3084, 3110), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Offered Load"""'], {}), "('Offered Load')\n", (3094, 3110), True, 'import matplotlib.pyplot as plt\n'), ((3119, 3136), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (3129, 3136), True, 'import matplotlib.pyplot as plt\n'), ((3145, 3159), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3153, 3159), True, 'import matplotlib.pyplot as plt\n'), ((3168, 3220), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../tests/varying_frame_size_fixed.pdf"""'], {}), "('../tests/varying_frame_size_fixed.pdf')\n", (3179, 3220), True, 'import matplotlib.pyplot as plt\n'), ((3744, 3756), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3754, 3756), True, 'import matplotlib.pyplot as plt\n'), ((5326, 5351), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Packet loss"""'], {}), "('Packet loss')\n", (5336, 5351), True, 'import matplotlib.pyplot as plt\n'), ((5360, 5386), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Offered Load"""'], {}), "('Offered Load')\n", (5370, 5386), True, 'import matplotlib.pyplot as plt\n'), ((5395, 5412), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (5405, 5412), True, 'import matplotlib.pyplot as plt\n'), ((5421, 5444), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.0001, 1.0)'], {}), '((0.0001, 1.0))\n', (5429, 5444), True, 'import matplotlib.pyplot as plt\n'), ((5451, 5468), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0)'}), '(loc=0)\n', (5461, 5468), True, 'import matplotlib.pyplot as plt\n'), ((5477, 5491), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (5485, 5491), True, 'import matplotlib.pyplot as plt\n'), ((5500, 5545), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../tests/packet_loss_fixed.pdf"""'], {}), "('../tests/packet_loss_fixed.pdf')\n", (5511, 5545), True, 'import matplotlib.pyplot as plt\n'), ((5627, 5660), 'unittest.makeSuite', 'unittest.makeSuite', (['IrsaTestFixed'], {}), '(IrsaTestFixed)\n', (5645, 5660), False, 'import unittest\n'), ((672, 692), 'os.mkdir', 'os.mkdir', (['"""../tests"""'], {}), "('../tests')\n", (680, 692), False, 'import os\n'), ((2649, 2743), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['load_range', 'thr'], {'linestyle': '"""--"""', 'color': 'color', 'yerr': 'thr_c', 'label': "('$m=%d$' % m)"}), "(load_range, thr, linestyle='--', color=color, yerr=thr_c,\n label='$m=%d$' % m)\n", (2661, 2743), True, 'import matplotlib.pyplot as plt\n'), ((2753, 2904), 'matplotlib.pyplot.plot', 'plt.plot', (['load_range', 'values[m]'], {'linestyle': '""""""', 'color': 'color', 'markeredgecolor': 'color', 'marker': 'marker', 'label': "('IRSA, $m=%d$' % m)", 'markerfacecolor': '"""None"""'}), "(load_range, values[m], linestyle='', color=color, markeredgecolor=\n color, marker=marker, label='IRSA, $m=%d$' % m, markerfacecolor='None')\n", (2761, 2904), True, 'import matplotlib.pyplot as plt\n'), ((3000, 3030), 'json.dump', 'json.dump', (['results_to_store', 'f'], {}), '(results_to_store, f)\n', (3009, 3030), False, 'import json\n'), ((5092, 5204), 'matplotlib.pyplot.plot', 'plt.plot', (['load_range', 'pktl', "('-' + color + marker)"], {'markeredgecolor': 'color', 'markerfacecolor': '"""None"""', 'label': 'label'}), "(load_range, pktl, '-' + color + marker, markeredgecolor=color,\n markerfacecolor='None', label=label)\n", (5100, 5204), True, 'import matplotlib.pyplot as plt\n'), ((5286, 5316), 'json.dump', 'json.dump', (['results_to_store', 'f'], {}), '(results_to_store, f)\n', (5295, 5316), False, 'import json\n'), ((2129, 2152), 'src.irsa.irsa_run', 'irsa.irsa_run', ([], {}), '(**params)\n', (2142, 2152), True, 'import src.irsa as irsa\n'), ((2173, 2207), 'numpy.mean', 'np.mean', (['res.throughput_normalized'], {}), '(res.throughput_normalized)\n', (2180, 2207), True, 'import numpy as np\n'), ((2229, 2285), 'src.irsa.mean_confidence_interval', 'irsa.mean_confidence_interval', (['res.throughput_normalized'], {}), '(res.throughput_normalized)\n', (2258, 2285), True, 'import src.irsa as irsa\n'), ((4738, 4761), 'src.irsa.irsa_run', 'irsa.irsa_run', ([], {}), '(**params)\n', (4751, 4761), True, 'import src.irsa as irsa\n'), ((4790, 4814), 'numpy.mean', 'np.mean', (['res.packet_loss'], {}), '(res.packet_loss)\n', (4797, 4814), True, 'import numpy as np\n')] |
"""
Example of using Automatic Mixed Precision (AMP) with PyTorch
This example shows the change needed to incorporate AMP in a
PyTorch model. In general the benefits are
1. Faster computations due to the introduction of half-precision
floats and tensor core operations with e.g. V100 GPUs
2. Larger batch size as loss, cache, and gradients can be
saved at a lower precision.
This is just an example of using AMP. The solution can be computed
more easily using linear least-squares and we use this for
validating the results.
<NAME>
Dec 2020
<EMAIL>
"""
import torch
import numpy as np
from apex import amp
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Using device:', device)
def compute(amp_type='None', iterations=5000, verbose=False):
"""
amt_type:
'apex': use AMP from the APEX package
'native': use AMP from the Torch package
'none': do not use AMP
"""
# Create Tensors to hold input and outputs.
x = torch.linspace(-np.pi, np.pi, 2000).to(device)
y = torch.sin(x).to(device)
# Prepare the input tensor (x, x^2, x^3).
p = torch.tensor([1, 2, 3]).to(device)
xx = x.unsqueeze(-1).pow(p)
# Use the nn package to define our model and loss function.
model = torch.nn.Sequential(
torch.nn.Linear(3, 1),
torch.nn.Flatten(0, 1)
)
model.to(device)
loss_fn = torch.nn.MSELoss(reduction='sum')
# Create optimizer
optimizer = torch.optim.RMSprop(model.parameters(), lr=1e-3)
if amp_type == 'apex':
# Make model and optimizer AMP models and optimizers
model, optimizer = amp.initialize(model, optimizer)
elif amp_type == 'native':
scaler = torch.cuda.amp.GradScaler()
for t in range(iterations):
# Forward pass: compute predicted y by passing x to the model.
if amp_type == 'native':
with torch.cuda.amp.autocast():
y_pred = model(xx)
loss = loss_fn(y_pred, y)
else:
y_pred = model(xx)
loss = loss_fn(y_pred, y)
# Compute and print loss.
if verbose:
if t % 100 == 99:
print("t={:4}, loss={:4}".format(t, loss.item()))
optimizer.zero_grad()
# Backward pass: compute gradient of the loss with respect to model
# parameters using AMP. Substitutes loss.backward() in other models
if amp_type == 'apex':
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
elif amp_type == 'native':
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
elif amp_type == 'none':
loss.backward()
optimizer.step()
else:
print(f'No such option amp_type={amp_type}')
raise ValueError
return model[0], loss.item()
def computeLS():
x = np.linspace(-np.pi, np.pi, 2000)
y = np.sin(x)
p, res, rank, singular_values, rcond = np.polyfit(x, y, deg=3, full=True)
return p[::-1], res[0]
def display(model_name, loss, p):
print(f'{model_name}: MSE loss = {loss:.2e}')
print(f'{model_name}: y = {p[0]:.2e} + {p[1]:.2e} x + {p[2]:.2e} x^2 + {p[3]:.2e} x^3')
without_amp, without_amp_loss = compute(amp_type='none')
with_amp_native, with_amp_native_loss = compute(amp_type='native')
with_amp_apex, with_amp_apex_loss = compute(amp_type='apex')
ls, ls_loss = computeLS()
display("Torch with amp apex ", with_amp_apex_loss, [with_amp_apex.bias.item(), with_amp_apex.weight[:, 0].item(),
with_amp_apex.weight[:, 1].item(), with_amp_apex.weight[:, 2].item()])
display("Torch with amp native", with_amp_native_loss, [with_amp_native.bias.item(), with_amp_native.weight[:, 0].item(),
with_amp_native.weight[:, 1].item(), with_amp_native.weight[:, 2].item()])
display("Torch without amp ", without_amp_loss, [without_amp.bias.item(), without_amp.weight[:, 0].item(),
without_amp.weight[:, 1].item(), without_amp.weight[:, 2].item()])
display("LS model ", ls_loss, ls)
| [
"torch.cuda.amp.autocast",
"torch.nn.MSELoss",
"apex.amp.initialize",
"numpy.polyfit",
"numpy.sin",
"torch.cuda.is_available",
"apex.amp.scale_loss",
"numpy.linspace",
"torch.nn.Linear",
"torch.cuda.amp.GradScaler",
"torch.linspace",
"torch.sin",
"torch.tensor",
"torch.nn.Flatten"
] | [((1402, 1435), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (1418, 1435), False, 'import torch\n'), ((3012, 3044), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(2000)'], {}), '(-np.pi, np.pi, 2000)\n', (3023, 3044), True, 'import numpy as np\n'), ((3053, 3062), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (3059, 3062), True, 'import numpy as np\n'), ((3106, 3140), 'numpy.polyfit', 'np.polyfit', (['x', 'y'], {'deg': '(3)', 'full': '(True)'}), '(x, y, deg=3, full=True)\n', (3116, 3140), True, 'import numpy as np\n'), ((650, 675), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (673, 675), False, 'import torch\n'), ((1306, 1327), 'torch.nn.Linear', 'torch.nn.Linear', (['(3)', '(1)'], {}), '(3, 1)\n', (1321, 1327), False, 'import torch\n'), ((1337, 1359), 'torch.nn.Flatten', 'torch.nn.Flatten', (['(0)', '(1)'], {}), '(0, 1)\n', (1353, 1359), False, 'import torch\n'), ((1643, 1675), 'apex.amp.initialize', 'amp.initialize', (['model', 'optimizer'], {}), '(model, optimizer)\n', (1657, 1675), False, 'from apex import amp\n'), ((995, 1030), 'torch.linspace', 'torch.linspace', (['(-np.pi)', 'np.pi', '(2000)'], {}), '(-np.pi, np.pi, 2000)\n', (1009, 1030), False, 'import torch\n'), ((1050, 1062), 'torch.sin', 'torch.sin', (['x'], {}), '(x)\n', (1059, 1062), False, 'import torch\n'), ((1133, 1156), 'torch.tensor', 'torch.tensor', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1145, 1156), False, 'import torch\n'), ((1724, 1751), 'torch.cuda.amp.GradScaler', 'torch.cuda.amp.GradScaler', ([], {}), '()\n', (1749, 1751), False, 'import torch\n'), ((1914, 1939), 'torch.cuda.amp.autocast', 'torch.cuda.amp.autocast', ([], {}), '()\n', (1937, 1939), False, 'import torch\n'), ((2492, 2523), 'apex.amp.scale_loss', 'amp.scale_loss', (['loss', 'optimizer'], {}), '(loss, optimizer)\n', (2506, 2523), False, 'from apex import amp\n')] |
#!/usr/bin/env python
# pylint: disable=invalid-name
"""rts_smooth.py: Smooth raster time series."""
from __future__ import absolute_import, division, print_function
import argparse
from array import array
import os
from os.path import basename
try:
from pathlib2 import Path
except ImportError:
from pathlib import Path
import sys
import time
import numpy as np
try:
import gdal
except ImportError:
from osgeo import gdal
from modape.utils import dtype_GDNP
from modape.whittaker import ws2d, ws2doptv, ws2doptvp # pylint: disable=no-name-in-module
def init_gdal(x, path, fn=None, dt=None):
"""Initializes empty GeoTIFF based on template.
Args:
x: Path to template
path: Target directory
fn: Output filename (optional)
dt: Output datatype (optional)
"""
if not fn:
fn = basename(x) # same filename if none is supplied
ds = gdal.Open(x)
driver = ds.GetDriver()
if not dt:
dt_new = ds.GetRasterBand(1).DataType # same datatype if none is supplied
else:
dt_new = dtype_GDNP(dt)[0] # Parse datatype
# Create empty copy
ds_new = driver.Create(path.joinpath(fn).as_posix(), ds.RasterXSize, ds.RasterYSize, ds.RasterCount, dt_new)
ds_new.SetGeoTransform(ds.GetGeoTransform())
ds_new.SetProjection(ds.GetProjection())
ds_new.GetRasterBand(1).SetNoDataValue(ds.GetRasterBand(1).GetNoDataValue())
ds = None
driver = None
ds_new = None
def iterate_blocks(rows, cols, n):
"""Generator for blockwise iteration over array.
Args:
rows (int): Number of rows
cols (int): Number of columns
n (int): Side length of block
Yields:
Tuple with values
- Start row
- Number of rows
- Start column
- Number of columns
"""
# Iterate over rows then columns
for row in range(0, rows, n):
for col in range(0, cols, n):
yield (row, min(n, rows-row), col, min(n, cols-col))
class RTS(object):
"""Class for raster timeseries for smoothing."""
def __init__(self, files, targetdir, bsize=256, nodata=None):
"""Creates instance of raster timeseries class.
The metadata for the timeseries is extracted from the first file in
the directory.
Args:
files ([str]): List or filepaths to process
targetdir (str): Target directory for smoothed files
bsize (int): Side length of processing blocks (default = 256)
nodata (int): Nodata value (default is read from reference file)
"""
# Select reference file and sort
self.files = files
self.files.sort()
self.ref_file = self.files[0]
self.nfiles = len(self.files)
self.bsize = bsize
ds = gdal.Open(self.ref_file)
self.nrows = ds.RasterYSize
self.ncols = ds.RasterXSize
if nodata:
self.nodata = nodata
else:
self.nodata = ds.GetRasterBand(1).GetNoDataValue() # nodata from file
if not self.nodata:
self.nodata = 0 # Set to 0 if read fails
print('Failed to read NoData value from files. NoData set to 0.')
ds = None
self.targetdir = Path(targetdir)
def init_rasters(self, tdir):
"""Intitialize empty rasters for smoothed data.
Args:
tdir (str): Target directory
"""
# Iterate over files
for f in self.files:
try:
init_gdal(f, tdir) # Initialize empty copy
except AttributeError:
print('Error initializing {}! Please check data'.format(f))
raise
def ws2d(self, s):
"""Apply whittaker smoother with fixed s-value to data.
Args:
s (float): log10 value of s
"""
tdir = self.targetdir.joinpath('filt0')
# Create full path filenames for smoothed rasters
outfiles = [tdir.joinpath(Path(x).name).as_posix() for x in self.files]
if not tdir.exists():
try:
tdir.mkdir(parents=True)
except:
print('Issues creating subdirectory {}'.format(tdir.as_posix()))
raise
self.init_rasters(tdir) # Initialize rasters
# Iterate over blocks
for yo, yd, xo, xd in iterate_blocks(self.nrows, self.ncols, self.bsize):
arr = np.zeros((yd*xd, self.nfiles), dtype='double') # values array
wts = arr.copy() # weights array
arr_helper = arr.view() # helper
arr_helper.shape = (yd, xd, self.nfiles)
# Iterate files to read data
for fix in range(self.nfiles):
ds = gdal.Open(self.files[fix])
arr_helper[..., fix] = ds.ReadAsArray(xoff=xo, xsize=xd, yoff=yo, ysize=yd)
ds = None
# Data which is not nodata gets weight 1, others 0
wts[...] = (arr != self.nodata) * 1
ndix = np.sum(arr != self.nodata, 1) > 0
map_index = np.where(ndix)[0]
if map_index.size == 0:
continue # skip bc no data in block
arr[np.logical_not(ndix), :] = self.nodata
for ix in map_index:
arr[ix, ...] = ws2d(arr[ix, ...], 10**s, wts[ix, ...])
# Write smoothed data to disk
for fix in range(self.nfiles):
ds = gdal.Open(outfiles[fix], gdal.GA_Update)
ds_b = ds.GetRasterBand(1)
ds_b.WriteArray(arr_helper[..., fix].round(), xo, yo)
ds_b.FlushCache()
ds_b = None
ds = None
# Write config text file to disk with processing parameters and info
with open(tdir.joinpath('filt0_config.txt').as_posix(), 'w') as thefile:
thefile.write('Running whittaker smoother with fixed s value\n')
thefile.write('\n')
thefile.write('Timestamp: {}\n'.format(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())))
thefile.write('\n')
thefile.write('Sopt: {}\n'.format(s))
thefile.write('log10(Sopt): {}\n'.format(np.log10(s)))
thefile.write('Nodata value: {}\n'.format(self.nodata))
thefile.write('\n')
def ws2dopt(self, srange, p=None):
"""Apply whittaker smoother with V-curve optimization of s to data.
If a p-value is supplied, the asymmetric whittaker smoother will be
applied.
Args:
srange (arr): array of s-values to apply
p (float): P-value for percentile
"""
srange_arr = array('d', srange)
if p:
tdir = self.targetdir.joinpath('filtoptvp')
else:
tdir = self.targetdir.joinpath('filtoptv')
outfiles = [tdir.joinpath(Path(x).name).as_posix() for x in self.files]
if not tdir.exists():
try:
tdir.mkdir(parents=True)
except:
print('Issues creating subdirectory {}'.format(tdir.as_posix()))
raise
self.sgrid = tdir.joinpath('sgrid.tif').as_posix() # Path to s-grid
self.init_rasters(tdir)
# S-grid needs to be initialized separately
init_gdal(self.ref_file, tdir, 'sgrid.tif', dt='float32')
for yo, yd, xo, xd in iterate_blocks(self.nrows, self.ncols, self.bsize):
arr = np.zeros((yd*xd, self.nfiles), dtype='double')
wts = arr.copy()
sarr = np.zeros((yd*xd), dtype='double')
arr_helper = arr.view()
arr_helper.shape = (yd, xd, self.nfiles)
for fix in range(self.nfiles):
ds = gdal.Open(self.files[fix])
arr_helper[..., fix] = ds.ReadAsArray(xoff=xo, xsize=xd, yoff=yo, ysize=yd)
ds = None
wts[...] = (arr != self.nodata)*1
ndix = np.sum(arr != self.nodata, 1) > 0 #70
map_index = np.where(ndix)[0]
if map_index.size == 0:
continue # skip bc no data in block
arr[np.logical_not(ndix), :] = self.nodata
for ix in map_index:
if p:
arr[ix, ...], sarr[ix] = ws2doptvp(arr[ix, ...], wts[ix, ...], srange_arr, p)
else:
arr[ix, ...], sarr[ix] = ws2doptv(arr[ix, ...], wts[ix, ...], srange_arr)
for fix in range(self.nfiles):
ds = gdal.Open(outfiles[fix], gdal.GA_Update)
ds_b = ds.GetRasterBand(1)
ds_b.WriteArray(arr_helper[..., fix].round(), xo, yo)
ds_b.FlushCache()
ds_b = None
ds = None
# Convert s values in grid to log10(s)
sarr[sarr > 0] = np.log10(sarr[sarr > 0])
# Write s-values to grid
ds = gdal.Open(self.sgrid, gdal.GA_Update)
ds_b = ds.GetRasterBand(1)
ds_b.WriteArray(sarr.reshape(yd, xd), xo, yo)
ds_b.FlushCache()
ds_b = None
ds = None
if p:
with open(tdir.joinpath('filtoptvp_config.txt').as_posix(), 'w') as thefile:
thefile.write('Running asymmetric whittaker smoother with V-curve optimization\n')
thefile.write('\n')
thefile.write('Timestamp: {}\n'.format(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())))
thefile.write('\n')
thefile.write('Sgrid: {}\n'.format(self.sgrid))
thefile.write('P value: {}\n'.format(p))
thefile.write('Nodata value: {}\n'.format(self.nodata))
thefile.write('\n')
else:
with open(tdir.joinpath('filtoptv_config.txt').as_posix(), 'w') as thefile:
thefile.write('Running whittaker smoother with V-curve optimization\n')
thefile.write('\n')
thefile.write('Timestamp: {}\n'.format(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())))
thefile.write('\n')
thefile.write('Sgrid: {}\n'.format(self.sgrid))
thefile.write('Nodata value: {}\n'.format(self.nodata))
thefile.write('\n')
def main():
"""Apply whittaker smoother to a timeseries of local raster files.
Raster files in path are combined to a timeseries and smoothed using the whittaker smoother,
optionally with V-curve optimization of s.
The user needs to make sure the raster files to be smoothed are identical in dimensions and type.
Parallel processing is (currently) not implemented, so big timeseries might take some time!
"""
parser = argparse.ArgumentParser(description='Extract a window from MODIS products')
parser.add_argument('path', help='Path containing raster files')
parser.add_argument('-P', '--pattern', help='Pattern to filter file names', default='*', metavar='')
parser.add_argument('-d', '--targetdir', help='Target directory for GeoTIFFs (default current directory)', default=os.getcwd(), metavar='')
parser.add_argument('-s', '--svalue', help='S value for smoothing (has to be log10(s))', metavar='', type=float)
parser.add_argument('-S', '--srange', help='S range for V-curve (float log10(s) values as smin smax sstep - default 0.0 4.0 0.1)', nargs='+', metavar='', type=float)
parser.add_argument('-p', '--pvalue', help='Value for asymmetric smoothing (float required)', metavar='', type=float)
parser.add_argument('-b', '--blocksize', help='Processing block side length (default 256)', default=256, metavar='', type=int)
parser.add_argument('--nodata', help='NoData value', metavar='', type=float)
parser.add_argument('--soptimize', help='Use V-curve (with p if supplied) for s value optimization', action='store_true')
# fail and print help if no arguments supplied
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(0)
args = parser.parse_args()
print('\n[{}]: Starting smoothRTS.py ... \n'.format(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())))
input_dir = Path(args.path)
if not input_dir.exists():
raise SystemExit('directory PATH does not exist!')
# Find files in path
fls = [x.as_posix() for x in input_dir.glob(args.pattern) if x.is_file()]
if not fls:
raise ValueError('No files found in {} with pattern {}, please check input.'.format(args.path, args.pattern))
# Create raster timeseries object
rts = RTS(files=fls,
targetdir=args.targetdir,
bsize=args.blocksize,
nodata=args.nodata)
# V-curve optimization is triggered by either supplying the soptimize flag or a s-range
if args.soptimize:
# Parse s-range or use default
if args.srange:
try:
assert len(args.srange) == 3
srange = np.arange(float(args.srange[0]),
float(args.srange[1]) + float(args.srange[2]),
float(args.srange[2])).round(2)
except (IndexError, TypeError, AssertionError):
raise SystemExit('Error with s value array values. Expected three values of float log10(s) - smin smax sstep !')
else:
srange = np.arange(0, 4.1, 0.1).round(2)
if args.pvalue:
print('\nRunning asymmetric whittaker smoother with V-curve optimization ... \n')
rts.ws2dopt(srange=srange, p=args.pvalue)
else:
print('\nRunning whittaker smoother with V-curve optimization ... \n')
rts.ws2dopt(srange=srange)
else:
## insert if-clause for s value if grid option is needed (see smoothMODIS)
try:
s = 10**float(args.svalue) # Convert s value from log10(s)
except:
raise SystemExit('Error with s value. Expected float log10(s)!')
print('\nRunning whittaker smoother with fixed s value ... \n')
rts.ws2d(s=s)
print('\n[{}]: smoothMODIS.py finished successfully.\n'.format(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())))
if __name__ == '__main__':
main()
| [
"modape.whittaker.ws2doptv",
"numpy.sum",
"argparse.ArgumentParser",
"os.path.basename",
"os.getcwd",
"numpy.logical_not",
"numpy.zeros",
"modape.whittaker.ws2doptvp",
"modape.utils.dtype_GDNP",
"pathlib.Path",
"numpy.where",
"time.localtime",
"array.array",
"modape.whittaker.ws2d",
"num... | [((905, 917), 'osgeo.gdal.Open', 'gdal.Open', (['x'], {}), '(x)\n', (914, 917), False, 'from osgeo import gdal\n'), ((10837, 10912), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Extract a window from MODIS products"""'}), "(description='Extract a window from MODIS products')\n", (10860, 10912), False, 'import argparse\n'), ((12275, 12290), 'pathlib.Path', 'Path', (['args.path'], {}), '(args.path)\n', (12279, 12290), False, 'from pathlib import Path\n'), ((848, 859), 'os.path.basename', 'basename', (['x'], {}), '(x)\n', (856, 859), False, 'from os.path import basename\n'), ((2820, 2844), 'osgeo.gdal.Open', 'gdal.Open', (['self.ref_file'], {}), '(self.ref_file)\n', (2829, 2844), False, 'from osgeo import gdal\n'), ((3280, 3295), 'pathlib.Path', 'Path', (['targetdir'], {}), '(targetdir)\n', (3284, 3295), False, 'from pathlib import Path\n'), ((6705, 6723), 'array.array', 'array', (['"""d"""', 'srange'], {}), "('d', srange)\n", (6710, 6723), False, 'from array import array\n'), ((12103, 12114), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (12111, 12114), False, 'import sys\n'), ((1070, 1084), 'modape.utils.dtype_GDNP', 'dtype_GDNP', (['dt'], {}), '(dt)\n', (1080, 1084), False, 'from modape.utils import dtype_GDNP\n'), ((4461, 4509), 'numpy.zeros', 'np.zeros', (['(yd * xd, self.nfiles)'], {'dtype': '"""double"""'}), "((yd * xd, self.nfiles), dtype='double')\n", (4469, 4509), True, 'import numpy as np\n'), ((7484, 7532), 'numpy.zeros', 'np.zeros', (['(yd * xd, self.nfiles)'], {'dtype': '"""double"""'}), "((yd * xd, self.nfiles), dtype='double')\n", (7492, 7532), True, 'import numpy as np\n'), ((7579, 7612), 'numpy.zeros', 'np.zeros', (['(yd * xd)'], {'dtype': '"""double"""'}), "(yd * xd, dtype='double')\n", (7587, 7612), True, 'import numpy as np\n'), ((8861, 8885), 'numpy.log10', 'np.log10', (['sarr[sarr > 0]'], {}), '(sarr[sarr > 0])\n', (8869, 8885), True, 'import numpy as np\n'), ((8941, 8978), 'osgeo.gdal.Open', 'gdal.Open', (['self.sgrid', 'gdal.GA_Update'], {}), '(self.sgrid, gdal.GA_Update)\n', (8950, 8978), False, 'from osgeo import gdal\n'), ((11206, 11217), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11215, 11217), False, 'import os\n'), ((4772, 4798), 'osgeo.gdal.Open', 'gdal.Open', (['self.files[fix]'], {}), '(self.files[fix])\n', (4781, 4798), False, 'from osgeo import gdal\n'), ((5048, 5077), 'numpy.sum', 'np.sum', (['(arr != self.nodata)', '(1)'], {}), '(arr != self.nodata, 1)\n', (5054, 5077), True, 'import numpy as np\n'), ((5106, 5120), 'numpy.where', 'np.where', (['ndix'], {}), '(ndix)\n', (5114, 5120), True, 'import numpy as np\n'), ((5334, 5375), 'modape.whittaker.ws2d', 'ws2d', (['arr[ix, ...]', '(10 ** s)', 'wts[ix, ...]'], {}), '(arr[ix, ...], 10 ** s, wts[ix, ...])\n', (5338, 5375), False, 'from modape.whittaker import ws2d, ws2doptv, ws2doptvp\n'), ((5481, 5521), 'osgeo.gdal.Open', 'gdal.Open', (['outfiles[fix]', 'gdal.GA_Update'], {}), '(outfiles[fix], gdal.GA_Update)\n', (5490, 5521), False, 'from osgeo import gdal\n'), ((7767, 7793), 'osgeo.gdal.Open', 'gdal.Open', (['self.files[fix]'], {}), '(self.files[fix])\n', (7776, 7793), False, 'from osgeo import gdal\n'), ((7978, 8007), 'numpy.sum', 'np.sum', (['(arr != self.nodata)', '(1)'], {}), '(arr != self.nodata, 1)\n', (7984, 8007), True, 'import numpy as np\n'), ((8040, 8054), 'numpy.where', 'np.where', (['ndix'], {}), '(ndix)\n', (8048, 8054), True, 'import numpy as np\n'), ((8538, 8578), 'osgeo.gdal.Open', 'gdal.Open', (['outfiles[fix]', 'gdal.GA_Update'], {}), '(outfiles[fix], gdal.GA_Update)\n', (8547, 8578), False, 'from osgeo import gdal\n'), ((12238, 12254), 'time.localtime', 'time.localtime', ([], {}), '()\n', (12252, 12254), False, 'import time\n'), ((14276, 14292), 'time.localtime', 'time.localtime', ([], {}), '()\n', (14290, 14292), False, 'import time\n'), ((5230, 5250), 'numpy.logical_not', 'np.logical_not', (['ndix'], {}), '(ndix)\n', (5244, 5250), True, 'import numpy as np\n'), ((6233, 6244), 'numpy.log10', 'np.log10', (['s'], {}), '(s)\n', (6241, 6244), True, 'import numpy as np\n'), ((8164, 8184), 'numpy.logical_not', 'np.logical_not', (['ndix'], {}), '(ndix)\n', (8178, 8184), True, 'import numpy as np\n'), ((8304, 8356), 'modape.whittaker.ws2doptvp', 'ws2doptvp', (['arr[ix, ...]', 'wts[ix, ...]', 'srange_arr', 'p'], {}), '(arr[ix, ...], wts[ix, ...], srange_arr, p)\n', (8313, 8356), False, 'from modape.whittaker import ws2d, ws2doptv, ws2doptvp\n'), ((8424, 8472), 'modape.whittaker.ws2doptv', 'ws2doptv', (['arr[ix, ...]', 'wts[ix, ...]', 'srange_arr'], {}), '(arr[ix, ...], wts[ix, ...], srange_arr)\n', (8432, 8472), False, 'from modape.whittaker import ws2d, ws2doptv, ws2doptvp\n'), ((13468, 13490), 'numpy.arange', 'np.arange', (['(0)', '(4.1)', '(0.1)'], {}), '(0, 4.1, 0.1)\n', (13477, 13490), True, 'import numpy as np\n'), ((6078, 6094), 'time.localtime', 'time.localtime', ([], {}), '()\n', (6092, 6094), False, 'import time\n'), ((4020, 4027), 'pathlib.Path', 'Path', (['x'], {}), '(x)\n', (4024, 4027), False, 'from pathlib import Path\n'), ((6897, 6904), 'pathlib.Path', 'Path', (['x'], {}), '(x)\n', (6901, 6904), False, 'from pathlib import Path\n'), ((9501, 9517), 'time.localtime', 'time.localtime', ([], {}), '()\n', (9515, 9517), False, 'import time\n'), ((10142, 10158), 'time.localtime', 'time.localtime', ([], {}), '()\n', (10156, 10158), False, 'import time\n')] |
# -*- coding:Utf-8 -*-
"""
.. currentmodule:: pylayers.antprop.channelc
VectChannel Class
=================
.. autosummary::
:toctree: generated/
VectChannel.__init__
VectChannel.show3_old
VectChannel.show3
ScalChannel Class
=================
.. autosummary::
:toctree: generated/
ScalChannel.__init__
ScalChannel.info
ScalChannel.imshow
ScalChannel.apply
ScalChannel.applywavC
ScalChannel.applywavB
ScalChannel.applywavA
ScalChannel.doddoa
ScalChannel.wavefig
ScalChannel.rayfig
VectLOS Class
=============
.. autosummary::
:toctree: generated/
VectLOS.__init__
VectLOS.cir
"""
import doctest
import pdb
import numpy as np
import scipy as sp
import pylab as plt
import struct as stru
from pylayers.antprop.channel import *
import pylayers.util.pyutil as pyu
import pylayers.signal.bsignal as bs
import pylayers.util.geomutil as geu
from pylayers.antprop.raysc import GrRay3D
from pylayers.util.project import *
class VectChannel(Ctilde):
""" container for a vector representation of the propagation channel
Attributes
-----------
Ctt FUsignal (Nray x Nf canal )
Cpp
Cpt
Ctp
built in vec2scal1
Frt Fusignal (Nray x Nf antenna )
Frp
Ftt
Ftp
fGHz : frequency
tauk : delay
tang : dod
rang : doa
Methods
-------
init(S,itx,irx)
S is a simulation object, itx and irx are index of tx and rx
show(display=False,mode='linear')
display vect channel
doadod()
scatter plot DoA - DoD
vec2scal(fGHz)
build scal channel without antenna
vec2scal1(fGHz)
build scal channel with antenna
"""
def __init__(self, S, itx, irx, transpose=False):
"""
Parameters
----------
S
Simulation
itx
tx number
irx
rx number
transpose
antenna transposition indicator
"""
# .. todo::
#
# a verifier -ravel-
self.fail = False
_filefield = S.dfield[itx][irx]
filefield = pyu.getlong(_filefield,pstruc['DIRTUD'])
_filetauk = S.dtauk[itx][irx]
filetauk = pyu.getlong(_filetauk,pstruc['DIRTUD'])
_filetang = S.dtang[itx][irx]
filetang = pyu.getlong(_filetang,pstruc['DIRTUD'])
_filerang = S.drang[itx][irx]
filerang = pyu.getlong(_filerang,pstruc['DIRTUD'])
"""
.. todo::
Revoir Freq
"""
# old version
#freq = S.freq()
#self.freq = freq
self.fGHz = S.fGHz
#
# pour show3 de gr on a besoin de filetra et indoor
# pas beau
#
self.filetra = S.dtra[itx][irx]
self.L = S.L
#try:
# fo = open(filetauk, "rb")
#except:
# self.fail=True
# print "file ",filetauk, " is unreachable"
# decode filetauk
#if not self.fail:
# nray_tauk = unpack('i',fo.read(4))[0]
# print "nb rayons dans .tauk : ",nray_tauk
# buf = fo.read()
# fo.close()
# nray = len(buf)/8
# print "nb rayons 2: ",nray
# self.tauk = ndarray(shape=nray,buffer=buf)
# if nray_tauk != nray:
# print itx , irx
# print nray_tauk - nray
#self.tauk = self.tauk
Ctilde.__init__(self)
self.load(filefield, transpose)
# decode the angular files (.tang and .rang)
# #try:
# fo = open(filetang, "rb")
# except:
# self.fail=True
# print "file ",filetang, " is unreachable"
# if not self.fail:
# nray_tang = unpack('i',fo.read(4))[0]
# buf = fo.read()
# fo.close()
# # coorectif Bug evalfield
# tmp = ndarray(shape=(nray_tang,2),buffer=buf)
# self.tang = tmp[0:nray,:]
# try:
# fo = open(filerang, "rb")
# except:
# self.fail=True
# print "file ",filerang, " is unreachable"
#
# if not self.fail:
# nray_rang = stru.unpack('i',fo.read(4))[0]
# buf = fo.read()
# fo.close()
# # correctif Bug evalfield
# tmp = ndarray(shape=(nray_rang,2),buffer=buf)
# self.rang = tmp[0:nray,:]
#sh = shape(self.Ctt.y)
"""
.. todo::
Express Ftt and Ftp in global frame from Tt and ant_tx
Express Frt and Frp in global frame from Tt and ant_tx
"""
#self.Ftt = FUsignal(fGHz,np.ones(sh))
#self.Ftp = FUsignal(fGHz,np.zeros(sh))
#self.Frt = FUsignal(fGHz,np.ones(sh))
#self.Frp = FUsignal(fGHz,np.zeros(sh))
def show3_old(self, id=0):
""" geomview visualization old version
This function provides a complete ray tracing vsualization
of the channel structure. The rays are color coded as a fonction
of their energy.
Parameters
----------
id : int
index of filetra
"""
E = self.Ctt.energy() + self.Ctp.energy() + \
self.Cpt.energy() + self.Cpp.energy()
u = argsort(E)
v = u[-1::-1]
Es = E[v]
gr = GrRay3D()
gr.load(self.filetra, self.L)
filename = pyu.getlong("grRay" + str(id) + "_col.list",pstruc['DIRGEOM'])
fo = open(filename, "w")
fo.write("LIST\n")
fo.write("{<strucTxRx.off}\n")
Emax = Es[0]
rayset = len(Emax)
for i in range(rayset):
j = v[i]
r = gr.ray3d[j]
col = np.array([1, 0, 0]) # red
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
k = i + 1
rayset = len(where((Es >= 0.1 * Emax) & (Es < 0.5 * Emax))[0])
for i in range(rayset):
j = v[i + k]
r = gr.ray3d[j]
col = np.array([0, 0, 1]) # blue
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
k = i + 1
rayset = len(where((Es >= 0.01 * Emax) & (Es < 0.1 * Emax))[0])
for i in range(rayset):
j = v[i + k]
r = gr.ray3d[j]
col = np.array([0, 1, 1]) # cyan
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
k = i + 1
rayset = len(where((Es >= 0.001 * Emax) & (Es < 0.01 * Emax))[0])
for i in range(rayset):
j = v[i + k]
r = gr.ray3d[j]
col = np.array([0, 1, 0]) # green
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
k = i + 1
rayset = len(where(Es < 0.001 * Emax)[0])
for i in range(rayset):
j = v[i + k]
r = gr.ray3d[j]
col = np.array([1, 1, 0]) # yellow
fileray = r.show3(False, False, col, j)
fo.write("{< " + fileray + " }\n")
fo.close()
chaine = "geomview " + filename + " 2>/dev/null &"
os.system(chaine)
def show3(self, seuildb=100):
""" geomview vizualization
This function provides a complete ray tracing visualization
of the radio channel. Rays are color coded as a fonction of
their energy.
Parameters
----------
seuildb : float
default 100
"""
E = self.Ctt.energy() + self.Ctp.energy() + \
self.Cpt.energy() + self.Cpp.energy()
u = argsort(E)
v = u[-1::-1]
Es = E[v]
gr = GrRay3D()
gr.load(self.filetra, self.L)
filename = pyu.getlong("grRay" + str(seuildb) + "_col.list", pstruc['DIRGEOM'])
fo = open(filename, "w")
fo.write("LIST\n")
fo.write("{<strucTxRx.off}\n")
Emax = Es[0]
rayset = len(v)
db = 20 * np.log10(Es)
c = 1 - (db > -seuildb) * (db + seuildb) / seuildb
app = round(np.log10(Es / Emax))
lw = app - min(app)
for i in range(rayset):
j = v[i]
r = gr.ray3d[j]
col = np.array([c[i], c[i], c[i]])
l = int(lw[i])
fileray = r.show3(False, False, col, j, l)
#fileray =r.show3(False,False,col,j)
fo.write("{< " + fileray + " }\n")
fo.close()
chaine = "geomview -nopanel -b 1 1 1 " + filename + " 2>/dev/null &"
os.system(chaine)
class ScalChannel(object):
"""
DEPRECATED
ScalChannel Class :
The ScalChannel is obtained from combination of the propagation
channel and the antenna transfer function from both transmitting
and receiving antennas
Members
-------
H : FUDSignal
ray transfer functions (nray,nfreq)
dod :
direction of depature (rad) [theta_t,phi_t] nray x 2
doa :
direction of arrival (rad) [theta_r,phi_r] nray x 2
tauk :
delay ray k in ns
"""
def __init__(self, VC, Ftt, Ftp, Frt, Frp):
self.Ftt = Ftt
self.Ftp = Ftp
self.Frt = Frt
self.Frp = Frp
t1 = VC.Ctt * Frt + VC.Cpt * Frp
t2 = VC.Ctp * Frt + VC.Cpp * Frp
t3 = t1 * Ftt + t2 * Ftp
self.dod = VC.tang
self.doa = VC.rang
self.tau = VC.tauk
self.H = bs.FUDsignal(t3.x, t3.y, VC.tauk)
# thresholding of rays
if (VC.nray > 1):
indices = self.H.enthrsh()
self.dod = self.dod[indices, :]
self.doa = self.doa[indices, :]
self.tau = self.tau[indices, :]
def info(self):
""" display information
"""
#print 'Ftt,Ftp,Frt,Frp'
#print 'dod,doa,tau'
#print 'H - FUDsignal '
print ('tau min , tau max :', min(self.tau), max(self.tau))
self.H.info()
def imshow(self):
""" imshow vizualization of H
"""
self.H
sh = np.shape(self.H.y)
itau = np.arange(len(self.tau))
plt.imshow(abs(self.H.y))
plt.show()
def apply(self, W):
""" Apply a FUsignal W to the ScalChannel.
Parameters
----------
W : Bsignal.FUsignal
It exploits multigrid convolution from Bsignal.
Notes
-----
+ W may have a more important number of points and a smaller frequency band.
+ If the frequency band of the waveform exceeds the one of the ScalChannei, a warning is sent.
+ W is a FUsignal whose shape doesn't need to be homogeneous with FUDsignal H
"""
H = self.H
U = H * W
V = bs.FUDsignal(U.x, U.y, H.tau0)
return(V)
def applywavC(self, w, dxw):
""" apply waveform method C
Parameters
----------
w :
waveform
dxw
Notes
-----
The overall received signal is built in time domain
w is apply on the overall CIR
"""
H = self.H
h = H.ft1(500, 1)
dxh = h.dx()
if (abs(dxh - dxw) > 1e-10):
if (dxh < dxw):
# reinterpolate w
f = interp1d(w.x, w.y)
x_new = arange(w.x[0], w.x[-1], dxh)[0:-1]
y_new = f(x_new)
w = TUsignal(x_new, y_new)
else:
# reinterpolate h
f = interp1d(h.x, h.y)
x_new = arange(h.x[0], h.x[-1], dxw)[0:-1]
y_new = f(x_new)
h = TUsignal(x_new, y_new)
ri = h.convolve(w)
return(ri)
def applywavB(self, Wgam):
""" apply waveform method B (time domain )
Parameters
----------
Wgam :
waveform including gamma factor
Returns
-------
ri : TUDsignal
impulse response for each ray separately
Notes
------
The overall received signal is built in time domain
Wgam is applied on each Ray Transfer function
See Also
--------
pylayers.signal.bsignal.TUDsignal.ft1
"""
#
# return a FUDsignal
#
Y = self.apply(Wgam)
#ri = Y.ft1(500,0)
# Le fftshift est activé
ri = Y.ft1(500, 1)
return(ri)
def applywavA(self, Wgam, Tw):
""" apply waveform method A
Parameters
----------
Wgam :
Tw :
The overall received signal is built in frequency domain
"""
Hab = self.H.ft2(0.001)
HabW = Hab * Wgam
RI = HabW.symHz(10000)
ri = RI.ifft(0, 'natural')
ri.translate(-Tw)
return(ri)
def doddoa(self):
""" doddoa() : DoD / DoA diagram
"""
dod = self.dod
doa = self.doa
#
#col = 1 - (10*np.log10(Etot)-Emin)/(Emax-Emin)
Etot = self.H.energy()
Etot = Etot / max(Etot)
al = 180 / np.pi
col = 10 * np.log10(Etot)
print (len(dod[:, 0]), len(dod[:, 1]), len(col[:]))
plt.subplot(121)
plt.scatter(dod[:, 0] * al, dod[:, 1] * al, s=15, c=col,
cmap=plt.cm.gray_r, edgecolors='none')
a = colorbar()
#a.set_label('dB')
plt.xlabel("$\\theta_t(\degree)$", fontsize=18)
plt.ylabel('$\phi_t(\degree)$', fontsize=18)
title('DoD')
plt.subplot(122)
plt.scatter(doa[:, 0] * al, doa[:, 1] * al, s=15, c=col,
cmap=plt.cm.gray_r, edgecolors='none')
b = colorbar()
b.set_label('dB')
plt.title('DoA')
plt.xlabel("$\\theta_r(\degree)$", fontsize=18)
plt.ylabel("$\phi_r (\degree)$", fontsize=18)
plt.show()
def wavefig(self, w, Nray=5):
""" display
Parameters
----------
w : waveform
Nray : int
number of rays to be displayed
"""
# Construire W
W = w.ft()
# Appliquer W
Y = self.apply(W)
#r.require('graphics')
#r.postscript('fig.eps')
#r('par(mfrow=c(2,2))')
#Y.fig(Nray)
y = Y.iftd(100, 0, 50, 0)
y.fig(Nray)
#r.dev_off()
#os.system("gv fig.eps ")
#y.fidec()
# Sur le FUsignal retourn
# A gauche afficher le signal sur chaque rayon
# A droite le meme signal decal
# En bas a droite le signal resultant
def rayfig(self, k, W, col='red'):
""" build a figure with rays
Parameters
----------
k : ray index
W : waveform (FUsignal)
Notes
-----
W is apply on k-th ray and the received signal is built in time domain
"""
# get the kth Ray Transfer function
Hk = bs.FUDsignal(self.H.x, self.H.y[k, :])
dxh = Hk.dx()
dxw = W.dx()
w0 = W.x[0] # fmin W
hk0 = Hk.x[0] # fmin Hk
# on s'arrange pour que hk0 soit egal a w0 (ou hk0 soit legerement inferieur a w0)
if w0 < hk0:
np = ceil((hk0 - w0) / dxh)
hk0_new = hk0 - np * dxh
x = arange(hk0_new, hk0 + dxh, dxh)[0:-1]
Hk.x = hstack((x, Hk.x))
Hk.y = hstack((zeros(np), Hk.y))
if (abs(dxh - dxw) > 1e-10):
if (dxh < dxw):
# reinterpolate w
print (" resampling w")
x_new = arange(W.x[0], W.x[-1] + dxh, dxh)[0:-1]
Wk = W.resample(x_new)
dx = dxh
else:
# reinterpolate h
print (" resampling h")
x_new = arange(Hk.x[0], Hk.x[-1] + dxw, dxw)[0:-1]
Hk = Hk.resample(x_new)
dx = dxw
Wk = W
# on s'arrange que Hk.x[0]==Wk.x[0]
# if Wk.x[0]!=Hk.x[0]:
# x=arange(Wk.x[0],Hk.x[0],dx)
# if Hk.x[0]!=x[0]:
# Hk.x=hstack((x,Hk.x[1:]))
# nz=len(x)
# Hk.y=hstack((zeros(nz),Hk.y))
# else:
# Hk.x=hstack((x,Hk.x[0:]))
# nz=len(x)
# Hk.y=hstack((zeros(nz),Hk.y))
#
self.Hk = Hk
self.Wk = Wk
Rk = Hk * Wk
self.Rk = Rk
rk = Rk.iftshift()
plot(rk.x, rk.y, col)
return(rk)
class VectLOS(Ctilde):
def __init__(self, d, fmin=2, fmax=11, Nf=180):
self.tauk = np.array([d / 0.3])
fGHz = np.linspace(fmin, fmax, Nf)
c1 = 1.0 / d * np.ones(len(fGHz))
c2 = zeros(len(fGHz))
c1.reshape(1, Nf)
c2.reshape(1, Nf)
self.freq = freq
self.Ctt = bs.FUsignal(fGHz, c1)
self.Ctp = bs.FUsignal(fGHz, c2)
self.Cpt = bs.FUsignal(fGHz, c2)
self.Cpp = bs.FUsignal(fGHz, c1)
self.tang = array([0])
self.rang = array([0])
self.nray = 1
def cir(self, wav):
""" Channel Impulse Response
Parameters
----------
wav :
"""
SCO = self.vec2scal()
ciro = SCO.applywavB(wav.sfg)
return(ciro)
| [
"pylayers.signal.bsignal.FUsignal",
"pylayers.util.pyutil.getlong",
"pylab.show",
"pylab.title",
"pylab.ylabel",
"numpy.shape",
"pylab.subplot",
"pylab.scatter",
"numpy.array",
"pylab.xlabel",
"numpy.linspace",
"pylayers.signal.bsignal.FUDsignal",
"numpy.log10",
"pylayers.antprop.raysc.GrR... | [((2122, 2163), 'pylayers.util.pyutil.getlong', 'pyu.getlong', (['_filefield', "pstruc['DIRTUD']"], {}), "(_filefield, pstruc['DIRTUD'])\n", (2133, 2163), True, 'import pylayers.util.pyutil as pyu\n'), ((2220, 2260), 'pylayers.util.pyutil.getlong', 'pyu.getlong', (['_filetauk', "pstruc['DIRTUD']"], {}), "(_filetauk, pstruc['DIRTUD'])\n", (2231, 2260), True, 'import pylayers.util.pyutil as pyu\n'), ((2317, 2357), 'pylayers.util.pyutil.getlong', 'pyu.getlong', (['_filetang', "pstruc['DIRTUD']"], {}), "(_filetang, pstruc['DIRTUD'])\n", (2328, 2357), True, 'import pylayers.util.pyutil as pyu\n'), ((2414, 2454), 'pylayers.util.pyutil.getlong', 'pyu.getlong', (['_filerang', "pstruc['DIRTUD']"], {}), "(_filerang, pstruc['DIRTUD'])\n", (2425, 2454), True, 'import pylayers.util.pyutil as pyu\n'), ((5396, 5405), 'pylayers.antprop.raysc.GrRay3D', 'GrRay3D', ([], {}), '()\n', (5403, 5405), False, 'from pylayers.antprop.raysc import GrRay3D\n'), ((7796, 7805), 'pylayers.antprop.raysc.GrRay3D', 'GrRay3D', ([], {}), '()\n', (7803, 7805), False, 'from pylayers.antprop.raysc import GrRay3D\n'), ((9556, 9589), 'pylayers.signal.bsignal.FUDsignal', 'bs.FUDsignal', (['t3.x', 't3.y', 'VC.tauk'], {}), '(t3.x, t3.y, VC.tauk)\n', (9568, 9589), True, 'import pylayers.signal.bsignal as bs\n'), ((10171, 10189), 'numpy.shape', 'np.shape', (['self.H.y'], {}), '(self.H.y)\n', (10179, 10189), True, 'import numpy as np\n'), ((10272, 10282), 'pylab.show', 'plt.show', ([], {}), '()\n', (10280, 10282), True, 'import pylab as plt\n'), ((10863, 10893), 'pylayers.signal.bsignal.FUDsignal', 'bs.FUDsignal', (['U.x', 'U.y', 'H.tau0'], {}), '(U.x, U.y, H.tau0)\n', (10875, 10893), True, 'import pylayers.signal.bsignal as bs\n'), ((13323, 13339), 'pylab.subplot', 'plt.subplot', (['(121)'], {}), '(121)\n', (13334, 13339), True, 'import pylab as plt\n'), ((13348, 13447), 'pylab.scatter', 'plt.scatter', (['(dod[:, 0] * al)', '(dod[:, 1] * al)'], {'s': '(15)', 'c': 'col', 'cmap': 'plt.cm.gray_r', 'edgecolors': '"""none"""'}), "(dod[:, 0] * al, dod[:, 1] * al, s=15, c=col, cmap=plt.cm.gray_r,\n edgecolors='none')\n", (13359, 13447), True, 'import pylab as plt\n'), ((13522, 13570), 'pylab.xlabel', 'plt.xlabel', (['"""$\\\\theta_t(\\\\degree)$"""'], {'fontsize': '(18)'}), "('$\\\\theta_t(\\\\degree)$', fontsize=18)\n", (13532, 13570), True, 'import pylab as plt\n'), ((13578, 13624), 'pylab.ylabel', 'plt.ylabel', (['"""$\\\\phi_t(\\\\degree)$"""'], {'fontsize': '(18)'}), "('$\\\\phi_t(\\\\degree)$', fontsize=18)\n", (13588, 13624), True, 'import pylab as plt\n'), ((13652, 13668), 'pylab.subplot', 'plt.subplot', (['(122)'], {}), '(122)\n', (13663, 13668), True, 'import pylab as plt\n'), ((13677, 13776), 'pylab.scatter', 'plt.scatter', (['(doa[:, 0] * al)', '(doa[:, 1] * al)'], {'s': '(15)', 'c': 'col', 'cmap': 'plt.cm.gray_r', 'edgecolors': '"""none"""'}), "(doa[:, 0] * al, doa[:, 1] * al, s=15, c=col, cmap=plt.cm.gray_r,\n edgecolors='none')\n", (13688, 13776), True, 'import pylab as plt\n'), ((13850, 13866), 'pylab.title', 'plt.title', (['"""DoA"""'], {}), "('DoA')\n", (13859, 13866), True, 'import pylab as plt\n'), ((13875, 13923), 'pylab.xlabel', 'plt.xlabel', (['"""$\\\\theta_r(\\\\degree)$"""'], {'fontsize': '(18)'}), "('$\\\\theta_r(\\\\degree)$', fontsize=18)\n", (13885, 13923), True, 'import pylab as plt\n'), ((13931, 13978), 'pylab.ylabel', 'plt.ylabel', (['"""$\\\\phi_r (\\\\degree)$"""'], {'fontsize': '(18)'}), "('$\\\\phi_r (\\\\degree)$', fontsize=18)\n", (13941, 13978), True, 'import pylab as plt\n'), ((13985, 13995), 'pylab.show', 'plt.show', ([], {}), '()\n', (13993, 13995), True, 'import pylab as plt\n'), ((15064, 15102), 'pylayers.signal.bsignal.FUDsignal', 'bs.FUDsignal', (['self.H.x', 'self.H.y[k, :]'], {}), '(self.H.x, self.H.y[k, :])\n', (15076, 15102), True, 'import pylayers.signal.bsignal as bs\n'), ((16728, 16747), 'numpy.array', 'np.array', (['[d / 0.3]'], {}), '([d / 0.3])\n', (16736, 16747), True, 'import numpy as np\n'), ((16763, 16790), 'numpy.linspace', 'np.linspace', (['fmin', 'fmax', 'Nf'], {}), '(fmin, fmax, Nf)\n', (16774, 16790), True, 'import numpy as np\n'), ((16959, 16980), 'pylayers.signal.bsignal.FUsignal', 'bs.FUsignal', (['fGHz', 'c1'], {}), '(fGHz, c1)\n', (16970, 16980), True, 'import pylayers.signal.bsignal as bs\n'), ((17000, 17021), 'pylayers.signal.bsignal.FUsignal', 'bs.FUsignal', (['fGHz', 'c2'], {}), '(fGHz, c2)\n', (17011, 17021), True, 'import pylayers.signal.bsignal as bs\n'), ((17041, 17062), 'pylayers.signal.bsignal.FUsignal', 'bs.FUsignal', (['fGHz', 'c2'], {}), '(fGHz, c2)\n', (17052, 17062), True, 'import pylayers.signal.bsignal as bs\n'), ((17082, 17103), 'pylayers.signal.bsignal.FUsignal', 'bs.FUsignal', (['fGHz', 'c1'], {}), '(fGHz, c1)\n', (17093, 17103), True, 'import pylayers.signal.bsignal as bs\n'), ((5775, 5794), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (5783, 5794), True, 'import numpy as np\n'), ((6096, 6115), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (6104, 6115), True, 'import numpy as np\n'), ((6419, 6438), 'numpy.array', 'np.array', (['[0, 1, 1]'], {}), '([0, 1, 1])\n', (6427, 6438), True, 'import numpy as np\n'), ((6744, 6763), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (6752, 6763), True, 'import numpy as np\n'), ((7046, 7065), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (7054, 7065), True, 'import numpy as np\n'), ((8097, 8109), 'numpy.log10', 'np.log10', (['Es'], {}), '(Es)\n', (8105, 8109), True, 'import numpy as np\n'), ((8189, 8208), 'numpy.log10', 'np.log10', (['(Es / Emax)'], {}), '(Es / Emax)\n', (8197, 8208), True, 'import numpy as np\n'), ((8338, 8366), 'numpy.array', 'np.array', (['[c[i], c[i], c[i]]'], {}), '([c[i], c[i], c[i]])\n', (8346, 8366), True, 'import numpy as np\n'), ((13240, 13254), 'numpy.log10', 'np.log10', (['Etot'], {}), '(Etot)\n', (13248, 13254), True, 'import numpy as np\n')] |
import torch
import numpy as np
import torchvision.transforms.functional as F
from PIL import Image
class Resize(object):
def __init__(self, size):
self.size = size
def __call__(self, sample):
image, label = sample['image'], sample['label']
image = F.to_pil_image(image)
# image = Image.fromarray(image)
# resize = transforms.Resize(self.size)
# image = resize(image)
image = F.resize(image, self.size)
return {'image': np.array(image), 'label': label}
class Normalize(object):
def __call__(self, sample):
image, label = sample['image'], sample['label']
image = np.true_divide(image, 255)
return {'image': image, 'label': label}
class Gray2RGB(object):
def __call__(self, sample):
image, label = sample['image'], sample['label']
image = np.repeat(image, 3, axis=0)
return {'image': image, 'label': label}
class ToTensor(object):
def __call__(self, sample):
image, label = sample['image'], sample['label']
return {'image': torch.from_numpy(image),
'label': torch.from_numpy(np.array([label]))} | [
"torch.from_numpy",
"numpy.true_divide",
"torchvision.transforms.functional.resize",
"torchvision.transforms.functional.to_pil_image",
"numpy.array",
"numpy.repeat"
] | [((287, 308), 'torchvision.transforms.functional.to_pil_image', 'F.to_pil_image', (['image'], {}), '(image)\n', (301, 308), True, 'import torchvision.transforms.functional as F\n'), ((446, 472), 'torchvision.transforms.functional.resize', 'F.resize', (['image', 'self.size'], {}), '(image, self.size)\n', (454, 472), True, 'import torchvision.transforms.functional as F\n'), ((662, 688), 'numpy.true_divide', 'np.true_divide', (['image', '(255)'], {}), '(image, 255)\n', (676, 688), True, 'import numpy as np\n'), ((867, 894), 'numpy.repeat', 'np.repeat', (['image', '(3)'], {'axis': '(0)'}), '(image, 3, axis=0)\n', (876, 894), True, 'import numpy as np\n'), ((499, 514), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (507, 514), True, 'import numpy as np\n'), ((1083, 1106), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (1099, 1106), False, 'import torch\n'), ((1150, 1167), 'numpy.array', 'np.array', (['[label]'], {}), '([label])\n', (1158, 1167), True, 'import numpy as np\n')] |
import h5py
import numpy as np
import scipy as sp
import pandas as pd
import scipy.interpolate as intp
import scipy.signal as sg
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from tqdm import tqdm
from sklearn import manifold
from scipy.optimize import least_squares
from dechorate import constants
from dechorate.dataset import DechorateDataset, SyntheticDataset
from dechorate.calibration_and_mds import *
from dechorate.utils.file_utils import save_to_matlab, load_from_pickle, save_to_pickle
from dechorate.utils.dsp_utils import envelope, normalize
from dechorate.utils.mds_utils import edm
from risotto import deconvolution as deconv
ox, oy, oz = constants['offset_beacon']
Fs = constants['Fs'] # Sampling frequency
recording_offset = constants['recording_offset']
Rx, Ry, Rz = constants['room_size']
speed_of_sound = constants['speed_of_sound'] # speed of sound
ACC_ECHO_THR_SAMLPS = 5
ACC_ECHO_THR_SECNDS = ACC_ECHO_THR_SAMLPS/Fs
ACC_ECHO_THR_SECNDS = 0.05e-3
ACC_ECHO_THR_METERS = ACC_ECHO_THR_SECNDS/speed_of_sound # meters
dataset_dir = './data/dECHORATE/'
path_to_processed = './data/processed/'
def load_rirs(path_to_dataset_rir, dataset, K, dataset_id, mics_pos, srcs_pos):
f_rir = h5py.File(path_to_dataset_rir, 'r')
all_src_ids = np.unique(dataset['src_id'])
all_src_ids = all_src_ids[~np.isnan(all_src_ids)]
all_mic_ids = np.unique(dataset['mic_id'])
all_mic_ids = all_mic_ids[~np.isnan(all_mic_ids)]
I = len(all_mic_ids)
J = len(all_src_ids)
if mics_pos is None:
mics_pos = np.zeros([3, I])
if srcs_pos is None:
srcs_pos = np.zeros([3, J])
toa_sym = np.zeros([7, I, J])
amp_sym = np.zeros([7, I, J])
ord_sym = np.zeros([7, I, J])
wal_sym = np.chararray([7, I, J])
L = int(0.5*Fs) # max length of the filter
rirs = np.zeros([L, I, J])
for j in tqdm(range(J)):
for i in range(I):
# find recording correspondent to mic i and src j
entry = dataset.loc[(dataset['src_id'] == j+1) & (dataset['mic_id'] == i+1)]
assert len(entry) == 1
wavefile = entry['filename'].values[0]
rir = f_rir['rir/%s/%d' % (wavefile, i)][()].squeeze()
rir = rir/np.max(np.abs(rir))
rirs[:, i, j] = np.abs(rir)
# compute the theoretical distance
if np.allclose(mics_pos[:, i], 0):
print('mic', i, 'from manual annotation')
mic_pos = [entry['mic_pos_x'].values, entry['mic_pos_y'].values, entry['mic_pos_z'].values]
# apply offset
mic_pos[0] = mic_pos[0] + constants['offset_beacon'][0]
mic_pos[1] = mic_pos[1] + constants['offset_beacon'][1]
mic_pos[2] = mic_pos[2] + constants['offset_beacon'][2]
mics_pos[:, i] = np.array(mic_pos).squeeze()
if np.allclose(srcs_pos[:, j], 0):
print('src', j, 'from manual annotation')
src_pos = [entry['src_pos_x'].values, entry['src_pos_y'].values, entry['src_pos_z'].values]
# apply offset
src_pos[0] = src_pos[0] + constants['offset_beacon'][0]
src_pos[1] = src_pos[1] + constants['offset_beacon'][1]
src_pos[2] = src_pos[2] + constants['offset_beacon'][2]
srcs_pos[:, j] = np.array(src_pos).squeeze()
synth_dset = SyntheticDataset()
synth_dset.set_room_size(constants['room_size'])
synth_dset.set_dataset(dataset_id, absb=1, refl=0)
synth_dset.set_c(speed_of_sound)
synth_dset.set_k_order(1)
synth_dset.set_k_reflc(7)
synth_dset.set_mic(mics_pos[0, i], mics_pos[1, i], mics_pos[2, i])
synth_dset.set_src(srcs_pos[0, j], srcs_pos[1, j], srcs_pos[2, j])
tau, amp = synth_dset.get_note()
toa_sym[:, i, j] = tau
amp_sym[:, i, j] = amp
wal_sym[:, i, j] = 0 #FIXME
ord_sym[:, i, j] = 0 #FIXME
return rirs, toa_sym, mics_pos, srcs_pos
def iterative_calibration(dataset_id, mics_pos, srcs_pos, K, toa_peak):
refl_order = constants['refl_order_pyroom']
curr_reflectors = constants['refl_order_calibr'][:K+1]
d = constants['datasets'].index(dataset_id)
path_to_dataset_rir = path_to_processed + '%s_rir_data.hdf5' % dataset_id
path_to_database = dataset_dir + 'annotations/dECHORATE_database.csv'
dataset = pd.read_csv(path_to_database)
# select dataset with entries according to session_id
f, c, w, e, n, s = [int(i) for i in list(dataset_id)]
dataset = dataset.loc[
(dataset['room_rfl_floor'] == f)
& (dataset['room_rfl_ceiling'] == c)
& (dataset['room_rfl_west'] == w)
& (dataset['room_rfl_east'] == e)
& (dataset['room_rfl_north'] == n)
& (dataset['room_rfl_south'] == s)
& (dataset['room_fornitures'] == False)
& (dataset['src_signal'] == 'chirp')
& (dataset['src_id'] < 5)
]
# LOAD MEASURED RIRs
# and COMPUTED PYROOM ANNOTATION
rirs, toa_sym, mics_pos, srcs_pos = load_rirs(path_to_dataset_rir, dataset, K, dataset_id, mics_pos, srcs_pos)
acc = np.sum(np.abs(toa_sym[:7, :, :] - toa_peak[:7, :, :])<ACC_ECHO_THR_SECNDS)/np.size(toa_peak[:7, :, :])
print('---- iter %d -----', K)
print('ACCURACY input', acc)
print('--------------------')
print(toa_sym[:7, 0, 0])
print(toa_peak[:7, 0, 0])
print(toa_peak.shape, toa_sym.shape)
assert toa_peak.shape == toa_sym.shape
assert toa_peak.shape[1] == mics_pos.shape[1]
assert toa_peak.shape[2] == srcs_pos.shape[1]
L, I, J = rirs.shape
D, I = mics_pos.shape
D, J = srcs_pos.shape
rirs_skyline = rirs.transpose([0, 2, 1]).reshape([L, I*J])
plt.imshow(rirs_skyline, extent=[0, I*J, 0, L], aspect='auto')
for j in range(J):
plt.axvline(j*30, color='C7')
plt.axhline(y=L-recording_offset, label='Time of Emission')
for k in range(K+1):
print(curr_reflectors)
wall = curr_reflectors[k]
r = refl_order.index(wall)
plt.scatter(np.arange(I*J)+0.5, L - recording_offset - toa_peak[r,:,:].T.flatten()*Fs,
c='C%d'%(k+2), marker='x', label='Peak Picking')
plt.scatter(np.arange(I*J)+0.5, L - recording_offset - toa_sym[r, :, :].T.flatten()*Fs,
marker='o', facecolors='none', edgecolors='C%d'%(k+2), label='Pyroom')
plt.tight_layout()
plt.legend()
plt.title('RIR SKYLINE K = %d' % K)
plt.savefig('./reports/figures/rir_skyline.pdf')
acc = np.sum(np.abs(toa_sym[:7, :, :] - toa_peak[:7, :, :])<ACC_ECHO_THR_SECNDS)/np.size(toa_peak[:7, :, :])
print('ACCURACY', acc)
plt.show()
# plt.close()
# ## MULTIDIMENSIONAL SCALING
# select sub set of microphones and sources
# # nonlinear least square problem with good initialization
X = mics_pos
A = srcs_pos
Dgeo = edm(X, A)
if K == 0:
curr_refl_name = 'd'
r = refl_order.index(curr_refl_name)
Dtoa = toa_peak[r, :I, :J] * speed_of_sound
Dsym = toa_sym[r, :I, :J] * speed_of_sound
if K == 1:
Dtoa = toa_peak[0, :I, :J] * speed_of_sound
Dsym = toa_sym[0, :I, :J] * speed_of_sound
wall = curr_reflectors[1]
r = refl_order.index(wall)
De_c = toa_peak[r, :I, :J] * speed_of_sound
# if K == 2:
# Dobs = toa_peak[0, :I, :J] * speed_of_sound
# Dsym = toa_sym[0, :I, :J] * speed_of_sound
# wall = curr_reflectors[1]
# r = refl_order.index(wall)
# print(wall, r)
# De_c = toa_peak[r, :I, :J] * speed_of_sound
# wall = curr_reflectors[2]
# r = refl_order.index(wall)
# print(wall, r)
# De_f = toa_peak[r, :I, :J] * speed_of_sound
assert np.allclose(Dgeo, Dsym)
# plt.subplot(141)
# plt.imshow(Dgeo, aspect='auto')
# plt.title('Geometry init')
# plt.subplot(142)
# plt.imshow(Dsym, aspect='auto')
# plt.title('Pyroom init')
# plt.subplot(143)
# plt.imshow(Dobs, aspect='auto')
# plt.title('Peak picking')
# plt.subplot(144)
# plt.imshow(np.abs(Dobs - Dsym), aspect='auto')
# plt.title('Diff')
# plt.show()
rmse = lambda x, y : np.sqrt(np.mean(np.abs(x - y)**2))
print('# GEOM/SIGNAL MISSMATCH')
Dgeo = Dgeo.copy()
Dsgn = Dtoa.copy()
Dcal = Dtoa.copy()
print('## Before unfolding')
print('### calib vs geo')
me_i = np.max(np.abs(Dcal - Dgeo))
mae_i = np.mean(np.abs(Dcal - Dgeo))
rmse_i = rmse(Dcal, Dgeo)
std_i = np.std(np.abs(Dcal - Dgeo))
print(np.size(Dcal))
print(np.shape(Dcal))
print('- input ME', me_i)
print('- input MAE', mae_i)
print('- input RMSE', rmse_i)
print('- input std', std_i)
print('### calib vs sig')
me_i = np.max(np.abs(Dcal - Dsgn))
mae_i = np.mean(np.abs(Dcal - Dsgn))
rmse_i = rmse(Dcal, Dsgn)
std_i = np.std(np.abs(Dcal - Dsgn))
print('- input ME', me_i)
print('- input MAE', mae_i)
print('- input RMSE', rmse_i)
print('- input std', std_i)
print('UNFOLDING')
if K == 0:
X_est, A_est = nlls_mds_array(Dsgn, X, A)
# X_est, A_est = nlls_mds(Dsgn, X, A)
elif K == 1:
X_est, A_est = nlls_mds_array_ceiling(Dsgn, De_c, X, A)
# X_est, A_est = nlls_mds_ceiling(Dsgn, De_c, X, A)
elif K == 2:
X_est, A_est = nlls_mds_array_images(Dsgn, De_c, De_f, X, A)
# X_est, A_est = nlls_mds_images(Dsgn, De_c, De_f, X, A)
else:
pass
# mics_pos_est, srcs_pos_est = nlls_mds_array(Dtof, X, A)
# mics_pos_est, srcs_pos_est = crcc_mds(D, init={'X': X, 'A': A}
Dcal = edm(X_est, A_est)
print('## AFTER unfolding')
print('### calib vs geo')
me_o = np.max(np.abs(Dgeo - Dcal))
mae_o = np.mean(np.abs(Dgeo - Dcal))
rmse_o = rmse(Dgeo, Dcal)
std_o = np.std(np.abs(Dgeo - Dcal))
print('- output ME', me_o)
print('- output MAE', mae_o)
print('- output RMSE', rmse_o)
print('- output std', std_o)
print('### calib vs sig')
me_o = np.max(np.abs(Dsgn - Dcal))
mae_o = np.mean(np.abs(Dsgn - Dcal))
rmse_o = rmse(Dsgn, Dcal)
std_o = np.std(np.abs(Dsgn - Dcal))
print('- output ME', me_o)
print('- output MAE', mae_o)
print('- output RMSE', rmse_o)
print('- output std', std_o)
mics_pos_est = X_est
srcs_pos_est = A_est
return mics_pos_est, srcs_pos_est, mics_pos, srcs_pos, toa_sym, rirs
if __name__ == "__main__":
datasets = constants['datasets']
## INITIALIZATION
mics_pos_est = None
srcs_pos_est = None
dataset_id = '011111'
# LOAD MANUAL ANNOTATION
annotation_file = '20200508_16h29_gui_annotation.pkl'
annotation_file = '20200508_18h34_gui_annotation.pkl' # good one, but the following problems:
# src 0: bad south, west
# src 1: bad north, east
# src 2: bad south, west, east
path_to_manual_annotation = './data/processed/rirs_manual_annotation/' + annotation_file
manual_note = load_from_pickle(path_to_manual_annotation)
# we select only the reflection of order 0
# namely one reflection coming from each wall
toa_peak = manual_note['toa'][:7, :, :4, 0]
## K = 1: direct path estimation
K = 0
mics_pos_est, srcs_pos_est, mics_pos, srcs_pos, toa_sym, rirs \
= iterative_calibration(dataset_id, mics_pos_est, srcs_pos_est, K, toa_peak)
print(mics_pos.shape)
print(mics_pos_est.shape)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(mics_pos[0, :], mics_pos[1, :], mics_pos[2, :], marker='o', label='mics init')
ax.scatter(srcs_pos[0, :], srcs_pos[1, :], srcs_pos[2, :], marker='o', label='srcs init')
ax.scatter(mics_pos_est[0, :], mics_pos_est[1, :], mics_pos_est[2, :], marker='x', label='mics est')
ax.scatter(srcs_pos_est[0, :], srcs_pos_est[1, :], srcs_pos_est[2, :], marker='x', label='srcs est')
ax.set_xlim([0, Rx])
ax.set_ylim([0, Ry])
ax.set_zlim([0, Rz])
plt.legend()
plt.savefig('./reports/figures/cal_positioning3D_k0.pdf')
plt.show()
## K = 1: echo 1 -- from the ceiling
K = 1
mics_pos_est, srcs_pos_est, mics_pos, srcs_pos, toa_sym, rirs \
= iterative_calibration(dataset_id, mics_pos_est, srcs_pos_est, K, toa_peak)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(mics_pos[0, :], mics_pos[1, :], mics_pos[2, :], marker='o', label='mics init')
ax.scatter(srcs_pos[0, :], srcs_pos[1, :], srcs_pos[2, :], marker='o', label='srcs init')
ax.scatter(mics_pos_est[0, :], mics_pos_est[1, :], mics_pos_est[2, :], marker='x', label='mics est')
ax.scatter(srcs_pos_est[0, :], srcs_pos_est[1, :], srcs_pos_est[2, :], marker='x', label='srcs est')
ax.set_xlim([0, Rx])
ax.set_ylim([0, Ry])
ax.set_zlim([0, Rz])
plt.legend()
plt.savefig('./reports/figures/cal_positioning3D_k1.pdf')
plt.show()
calib_res = {
'mics': mics_pos_est,
'srcs': srcs_pos_est,
'toa_pck' : toa_peak,
'toa_sym' : toa_sym,
'rirs' : rirs,
}
save_to_pickle('./data/processed/post2_calibration/calib_output_mics_srcs_pos.pkl', calib_res)
# FINALLY, RIR SKYLINE WITH ALL THE ECHOES
curr_reflectors = constants['refl_order_calibr'][:7]
refl_order = constants['refl_order_pyroom']
L, I, J = rirs.shape
rirs_skyline = rirs.transpose([0, 2, 1]).reshape([L, I*J])
plt.imshow(rirs_skyline, extent=[0, I*J, 0, L], aspect='auto')
# plot srcs boundaries
for j in range(J):
plt.axvline(j*30, color='C7')
# plot time of emission
# plt.axhline(y=L-recording_offset, label='Time of Emission')
for k in range(7):
print(curr_reflectors)
wall = curr_reflectors[k]
r = refl_order.index(wall)
print(r)
# plot peak annotation
plt.scatter(np.arange(I*J)+0.5, L - recording_offset - toa_peak[r, :, :].T.flatten()*Fs,
c='C%d' % (k+2), marker='x', label='%s Picking' % wall)
# plot simulated peak
plt.scatter(np.arange(I*J)+0.5, L - recording_offset - toa_sym[r, :, :].T.flatten()*Fs,
marker='o', facecolors='none', edgecolors='C%d' % (k+2), label='%s Pyroom' % wall)
# plt.xticks(ticks=[0, J, 2*J, 3*J], lables=['source #1', 'source #2', 'source #3', 'source #4'])
plt.xlabel('microphone index per source')
plt.ylabel('Time [samples]')
plt.tight_layout()
plt.legend(ncol=7)
# plt.title('RIR SKYLINE K = %d' % K)
plt.savefig('./reports/figures/rir_skyline_final.pdf')
plt.show()
## save here for the GUI later
new_manual_note = manual_note.copy()
new_manual_note['toa'][:7, :, :4, 0] = toa_sym[:7, :, :4]
save_to_pickle('./data/processed/post2_calibration/calib_output_toa_pck.pkl', new_manual_note)
new_manual_note['toa'][:7, :, :4, 0] = toa_peak[:7, :, :4]
save_to_pickle('./data/processed/post2_calibration/calib_output_toa_sym.pkl', new_manual_note)
# ## K = 2: echo 1,2 -- from the ceiling and the floor
# K = 2
print('Here K=', K)
mics_pos_est, srcs_pos_est, mics_pos, srcs_pos, toa_sym, rirs \
= iterative_calibration(dataset_id, mics_pos_est, srcs_pos_est, K, toa_peak)
pass
| [
"matplotlib.pyplot.title",
"numpy.abs",
"pandas.read_csv",
"numpy.allclose",
"numpy.isnan",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.tight_layout",
"numpy.unique",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.imshow",
"dec... | [((1242, 1277), 'h5py.File', 'h5py.File', (['path_to_dataset_rir', '"""r"""'], {}), "(path_to_dataset_rir, 'r')\n", (1251, 1277), False, 'import h5py\n'), ((1297, 1325), 'numpy.unique', 'np.unique', (["dataset['src_id']"], {}), "(dataset['src_id'])\n", (1306, 1325), True, 'import numpy as np\n'), ((1398, 1426), 'numpy.unique', 'np.unique', (["dataset['mic_id']"], {}), "(dataset['mic_id'])\n", (1407, 1426), True, 'import numpy as np\n'), ((1671, 1690), 'numpy.zeros', 'np.zeros', (['[7, I, J]'], {}), '([7, I, J])\n', (1679, 1690), True, 'import numpy as np\n'), ((1706, 1725), 'numpy.zeros', 'np.zeros', (['[7, I, J]'], {}), '([7, I, J])\n', (1714, 1725), True, 'import numpy as np\n'), ((1740, 1759), 'numpy.zeros', 'np.zeros', (['[7, I, J]'], {}), '([7, I, J])\n', (1748, 1759), True, 'import numpy as np\n'), ((1774, 1797), 'numpy.chararray', 'np.chararray', (['[7, I, J]'], {}), '([7, I, J])\n', (1786, 1797), True, 'import numpy as np\n'), ((1857, 1876), 'numpy.zeros', 'np.zeros', (['[L, I, J]'], {}), '([L, I, J])\n', (1865, 1876), True, 'import numpy as np\n'), ((4504, 4533), 'pandas.read_csv', 'pd.read_csv', (['path_to_database'], {}), '(path_to_database)\n', (4515, 4533), True, 'import pandas as pd\n'), ((5853, 5917), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rirs_skyline'], {'extent': '[0, I * J, 0, L]', 'aspect': '"""auto"""'}), "(rirs_skyline, extent=[0, I * J, 0, L], aspect='auto')\n", (5863, 5917), True, 'import matplotlib.pyplot as plt\n'), ((5985, 6046), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(L - recording_offset)', 'label': '"""Time of Emission"""'}), "(y=L - recording_offset, label='Time of Emission')\n", (5996, 6046), True, 'import matplotlib.pyplot as plt\n'), ((6527, 6545), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6543, 6545), True, 'import matplotlib.pyplot as plt\n'), ((6550, 6562), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6560, 6562), True, 'import matplotlib.pyplot as plt\n'), ((6567, 6602), 'matplotlib.pyplot.title', 'plt.title', (["('RIR SKYLINE K = %d' % K)"], {}), "('RIR SKYLINE K = %d' % K)\n", (6576, 6602), True, 'import matplotlib.pyplot as plt\n'), ((6607, 6655), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./reports/figures/rir_skyline.pdf"""'], {}), "('./reports/figures/rir_skyline.pdf')\n", (6618, 6655), True, 'import matplotlib.pyplot as plt\n'), ((6802, 6812), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6810, 6812), True, 'import matplotlib.pyplot as plt\n'), ((7025, 7034), 'dechorate.utils.mds_utils.edm', 'edm', (['X', 'A'], {}), '(X, A)\n', (7028, 7034), False, 'from dechorate.utils.mds_utils import edm\n'), ((7906, 7929), 'numpy.allclose', 'np.allclose', (['Dgeo', 'Dsym'], {}), '(Dgeo, Dsym)\n', (7917, 7929), True, 'import numpy as np\n'), ((9793, 9810), 'dechorate.utils.mds_utils.edm', 'edm', (['X_est', 'A_est'], {}), '(X_est, A_est)\n', (9796, 9810), False, 'from dechorate.utils.mds_utils import edm\n'), ((11149, 11192), 'dechorate.utils.file_utils.load_from_pickle', 'load_from_pickle', (['path_to_manual_annotation'], {}), '(path_to_manual_annotation)\n', (11165, 11192), False, 'from dechorate.utils.file_utils import save_to_matlab, load_from_pickle, save_to_pickle\n'), ((11607, 11619), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11617, 11619), True, 'import matplotlib.pyplot as plt\n'), ((12144, 12156), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12154, 12156), True, 'import matplotlib.pyplot as plt\n'), ((12161, 12218), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./reports/figures/cal_positioning3D_k0.pdf"""'], {}), "('./reports/figures/cal_positioning3D_k0.pdf')\n", (12172, 12218), True, 'import matplotlib.pyplot as plt\n'), ((12223, 12233), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12231, 12233), True, 'import matplotlib.pyplot as plt\n'), ((12450, 12462), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12460, 12462), True, 'import matplotlib.pyplot as plt\n'), ((12987, 12999), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (12997, 12999), True, 'import matplotlib.pyplot as plt\n'), ((13004, 13061), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./reports/figures/cal_positioning3D_k1.pdf"""'], {}), "('./reports/figures/cal_positioning3D_k1.pdf')\n", (13015, 13061), True, 'import matplotlib.pyplot as plt\n'), ((13066, 13076), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13074, 13076), True, 'import matplotlib.pyplot as plt\n'), ((13250, 13353), 'dechorate.utils.file_utils.save_to_pickle', 'save_to_pickle', (['"""./data/processed/post2_calibration/calib_output_mics_srcs_pos.pkl"""', 'calib_res'], {}), "(\n './data/processed/post2_calibration/calib_output_mics_srcs_pos.pkl',\n calib_res)\n", (13264, 13353), False, 'from dechorate.utils.file_utils import save_to_matlab, load_from_pickle, save_to_pickle\n'), ((13591, 13655), 'matplotlib.pyplot.imshow', 'plt.imshow', (['rirs_skyline'], {'extent': '[0, I * J, 0, L]', 'aspect': '"""auto"""'}), "(rirs_skyline, extent=[0, I * J, 0, L], aspect='auto')\n", (13601, 13655), True, 'import matplotlib.pyplot as plt\n'), ((14523, 14564), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""microphone index per source"""'], {}), "('microphone index per source')\n", (14533, 14564), True, 'import matplotlib.pyplot as plt\n'), ((14569, 14597), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Time [samples]"""'], {}), "('Time [samples]')\n", (14579, 14597), True, 'import matplotlib.pyplot as plt\n'), ((14602, 14620), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14618, 14620), True, 'import matplotlib.pyplot as plt\n'), ((14625, 14643), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'ncol': '(7)'}), '(ncol=7)\n', (14635, 14643), True, 'import matplotlib.pyplot as plt\n'), ((14690, 14744), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./reports/figures/rir_skyline_final.pdf"""'], {}), "('./reports/figures/rir_skyline_final.pdf')\n", (14701, 14744), True, 'import matplotlib.pyplot as plt\n'), ((14749, 14759), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14757, 14759), True, 'import matplotlib.pyplot as plt\n'), ((14903, 15001), 'dechorate.utils.file_utils.save_to_pickle', 'save_to_pickle', (['"""./data/processed/post2_calibration/calib_output_toa_pck.pkl"""', 'new_manual_note'], {}), "('./data/processed/post2_calibration/calib_output_toa_pck.pkl',\n new_manual_note)\n", (14917, 15001), False, 'from dechorate.utils.file_utils import save_to_matlab, load_from_pickle, save_to_pickle\n'), ((15065, 15163), 'dechorate.utils.file_utils.save_to_pickle', 'save_to_pickle', (['"""./data/processed/post2_calibration/calib_output_toa_sym.pkl"""', 'new_manual_note'], {}), "('./data/processed/post2_calibration/calib_output_toa_sym.pkl',\n new_manual_note)\n", (15079, 15163), False, 'from dechorate.utils.file_utils import save_to_matlab, load_from_pickle, save_to_pickle\n'), ((1577, 1593), 'numpy.zeros', 'np.zeros', (['[3, I]'], {}), '([3, I])\n', (1585, 1593), True, 'import numpy as np\n'), ((1638, 1654), 'numpy.zeros', 'np.zeros', (['[3, J]'], {}), '([3, J])\n', (1646, 1654), True, 'import numpy as np\n'), ((5332, 5359), 'numpy.size', 'np.size', (['toa_peak[:7, :, :]'], {}), '(toa_peak[:7, :, :])\n', (5339, 5359), True, 'import numpy as np\n'), ((5951, 5982), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(j * 30)'], {'color': '"""C7"""'}), "(j * 30, color='C7')\n", (5962, 5982), True, 'import matplotlib.pyplot as plt\n'), ((6742, 6769), 'numpy.size', 'np.size', (['toa_peak[:7, :, :]'], {}), '(toa_peak[:7, :, :])\n', (6749, 6769), True, 'import numpy as np\n'), ((8578, 8597), 'numpy.abs', 'np.abs', (['(Dcal - Dgeo)'], {}), '(Dcal - Dgeo)\n', (8584, 8597), True, 'import numpy as np\n'), ((8619, 8638), 'numpy.abs', 'np.abs', (['(Dcal - Dgeo)'], {}), '(Dcal - Dgeo)\n', (8625, 8638), True, 'import numpy as np\n'), ((8689, 8708), 'numpy.abs', 'np.abs', (['(Dcal - Dgeo)'], {}), '(Dcal - Dgeo)\n', (8695, 8708), True, 'import numpy as np\n'), ((8720, 8733), 'numpy.size', 'np.size', (['Dcal'], {}), '(Dcal)\n', (8727, 8733), True, 'import numpy as np\n'), ((8745, 8759), 'numpy.shape', 'np.shape', (['Dcal'], {}), '(Dcal)\n', (8753, 8759), True, 'import numpy as np\n'), ((8938, 8957), 'numpy.abs', 'np.abs', (['(Dcal - Dsgn)'], {}), '(Dcal - Dsgn)\n', (8944, 8957), True, 'import numpy as np\n'), ((8979, 8998), 'numpy.abs', 'np.abs', (['(Dcal - Dsgn)'], {}), '(Dcal - Dsgn)\n', (8985, 8998), True, 'import numpy as np\n'), ((9049, 9068), 'numpy.abs', 'np.abs', (['(Dcal - Dsgn)'], {}), '(Dcal - Dsgn)\n', (9055, 9068), True, 'import numpy as np\n'), ((9892, 9911), 'numpy.abs', 'np.abs', (['(Dgeo - Dcal)'], {}), '(Dgeo - Dcal)\n', (9898, 9911), True, 'import numpy as np\n'), ((9933, 9952), 'numpy.abs', 'np.abs', (['(Dgeo - Dcal)'], {}), '(Dgeo - Dcal)\n', (9939, 9952), True, 'import numpy as np\n'), ((10003, 10022), 'numpy.abs', 'np.abs', (['(Dgeo - Dcal)'], {}), '(Dgeo - Dcal)\n', (10009, 10022), True, 'import numpy as np\n'), ((10205, 10224), 'numpy.abs', 'np.abs', (['(Dsgn - Dcal)'], {}), '(Dsgn - Dcal)\n', (10211, 10224), True, 'import numpy as np\n'), ((10246, 10265), 'numpy.abs', 'np.abs', (['(Dsgn - Dcal)'], {}), '(Dsgn - Dcal)\n', (10252, 10265), True, 'import numpy as np\n'), ((10316, 10335), 'numpy.abs', 'np.abs', (['(Dsgn - Dcal)'], {}), '(Dsgn - Dcal)\n', (10322, 10335), True, 'import numpy as np\n'), ((13717, 13748), 'matplotlib.pyplot.axvline', 'plt.axvline', (['(j * 30)'], {'color': '"""C7"""'}), "(j * 30, color='C7')\n", (13728, 13748), True, 'import matplotlib.pyplot as plt\n'), ((1357, 1378), 'numpy.isnan', 'np.isnan', (['all_src_ids'], {}), '(all_src_ids)\n', (1365, 1378), True, 'import numpy as np\n'), ((1458, 1479), 'numpy.isnan', 'np.isnan', (['all_mic_ids'], {}), '(all_mic_ids)\n', (1466, 1479), True, 'import numpy as np\n'), ((2313, 2324), 'numpy.abs', 'np.abs', (['rir'], {}), '(rir)\n', (2319, 2324), True, 'import numpy as np\n'), ((2388, 2418), 'numpy.allclose', 'np.allclose', (['mics_pos[:, i]', '(0)'], {}), '(mics_pos[:, i], 0)\n', (2399, 2418), True, 'import numpy as np\n'), ((2911, 2941), 'numpy.allclose', 'np.allclose', (['srcs_pos[:, j]', '(0)'], {}), '(srcs_pos[:, j], 0)\n', (2922, 2941), True, 'import numpy as np\n'), ((3443, 3461), 'dechorate.dataset.SyntheticDataset', 'SyntheticDataset', ([], {}), '()\n', (3459, 3461), False, 'from dechorate.dataset import DechorateDataset, SyntheticDataset\n'), ((5264, 5310), 'numpy.abs', 'np.abs', (['(toa_sym[:7, :, :] - toa_peak[:7, :, :])'], {}), '(toa_sym[:7, :, :] - toa_peak[:7, :, :])\n', (5270, 5310), True, 'import numpy as np\n'), ((6191, 6207), 'numpy.arange', 'np.arange', (['(I * J)'], {}), '(I * J)\n', (6200, 6207), True, 'import numpy as np\n'), ((6355, 6371), 'numpy.arange', 'np.arange', (['(I * J)'], {}), '(I * J)\n', (6364, 6371), True, 'import numpy as np\n'), ((6674, 6720), 'numpy.abs', 'np.abs', (['(toa_sym[:7, :, :] - toa_peak[:7, :, :])'], {}), '(toa_sym[:7, :, :] - toa_peak[:7, :, :])\n', (6680, 6720), True, 'import numpy as np\n'), ((14034, 14050), 'numpy.arange', 'np.arange', (['(I * J)'], {}), '(I * J)\n', (14043, 14050), True, 'import numpy as np\n'), ((14237, 14253), 'numpy.arange', 'np.arange', (['(I * J)'], {}), '(I * J)\n', (14246, 14253), True, 'import numpy as np\n'), ((2272, 2283), 'numpy.abs', 'np.abs', (['rir'], {}), '(rir)\n', (2278, 2283), True, 'import numpy as np\n'), ((8369, 8382), 'numpy.abs', 'np.abs', (['(x - y)'], {}), '(x - y)\n', (8375, 8382), True, 'import numpy as np\n'), ((2867, 2884), 'numpy.array', 'np.array', (['mic_pos'], {}), '(mic_pos)\n', (2875, 2884), True, 'import numpy as np\n'), ((3389, 3406), 'numpy.array', 'np.array', (['src_pos'], {}), '(src_pos)\n', (3397, 3406), True, 'import numpy as np\n')] |
import numpy as np
def transform_data(m, theta):
"""Transforms 2D power spectrum to polar coordinate system"""
thetadelta = np.pi / theta
im_center = int(m.shape[0] // 2)
angles = np.arange(0, np.pi, thetadelta+1e-5)
radiuses = np.arange(0, im_center+1e-5)
A, R = np.meshgrid(angles, radiuses)
imx = R * np.cos(A) + im_center
imy = R * np.sin(A) + im_center
return m[imy.astype(int)-1, imx.astype(int)-1]
class FeatRPS:
"""Feature extraction for Radial Power Spectrum"""
def __init__(self, blk_size=32, foreground_ratio=0.8):
"""Initialize
:param blk_size: Size of individual blocks
:param foreground_ratio : Ratio of minimal mask pixels to determine foreground
"""
self.rad = 10
self.theta = 100
self.fmin = 0.06
self.fmax = 0.18
self.blk_size = blk_size
self.foreground_ratio = foreground_ratio
def rps(self, image):
"""Divides the input image into individual blocks and calculates the SF metric
:param image: Input fingerprint image
:param maskim: Input fingerprint segmentation mask
:return: Resulting quality map in form of a matrix
"""
r, c = image.shape # r, c
if r != c:
d = max(r, c)
imagepadded = np.ones(shape=(d, d), dtype=np.uint8) * 127
cx = int(np.fix(d / 2 - c / 2))
ry = int(np.fix(d / 2 - r / 2))
imagepadded[ry:ry + r, cx:cx + c] = image
else:
imagepadded = image
imdimension = max(imagepadded.shape)
h = np.blackman(imagepadded.shape[0])
filt = np.expand_dims(h, axis=1)
filt = filt * filt.T
imagewindowed = imagepadded * filt
f_response = np.fft.fft2(imagewindowed)
fs_response = np.fft.fftshift(f_response)
power = np.log(1 + np.abs(fs_response))
fsmin = np.max([np.floor(imdimension * self.fmin), 1])
fsmax = np.min([np.ceil(imdimension * self.fmax), power.shape[0]])
power_polar = transform_data(power, self.theta)
roi_power_polar = power_polar[int(fsmin):int(fsmax) + 1]
roi_power_sum = roi_power_polar.sum(axis=1)
m = len(roi_power_sum) % self.rad
if len(roi_power_sum) <= self.rad:
radial_ps = roi_power_sum
else:
tmp = roi_power_sum[0:len(roi_power_sum) - m]
radial_ps = np.reshape(tmp, newshape=(self.rad, int((len(roi_power_sum) - m) / self.rad))).sum(axis=1)
return radial_ps.max()
| [
"numpy.meshgrid",
"numpy.abs",
"numpy.ceil",
"numpy.fix",
"numpy.floor",
"numpy.expand_dims",
"numpy.ones",
"numpy.fft.fftshift",
"numpy.arange",
"numpy.fft.fft2",
"numpy.cos",
"numpy.sin",
"numpy.blackman"
] | [((199, 238), 'numpy.arange', 'np.arange', (['(0)', 'np.pi', '(thetadelta + 1e-05)'], {}), '(0, np.pi, thetadelta + 1e-05)\n', (208, 238), True, 'import numpy as np\n'), ((251, 282), 'numpy.arange', 'np.arange', (['(0)', '(im_center + 1e-05)'], {}), '(0, im_center + 1e-05)\n', (260, 282), True, 'import numpy as np\n'), ((292, 321), 'numpy.meshgrid', 'np.meshgrid', (['angles', 'radiuses'], {}), '(angles, radiuses)\n', (303, 321), True, 'import numpy as np\n'), ((1616, 1649), 'numpy.blackman', 'np.blackman', (['imagepadded.shape[0]'], {}), '(imagepadded.shape[0])\n', (1627, 1649), True, 'import numpy as np\n'), ((1666, 1691), 'numpy.expand_dims', 'np.expand_dims', (['h'], {'axis': '(1)'}), '(h, axis=1)\n', (1680, 1691), True, 'import numpy as np\n'), ((1787, 1813), 'numpy.fft.fft2', 'np.fft.fft2', (['imagewindowed'], {}), '(imagewindowed)\n', (1798, 1813), True, 'import numpy as np\n'), ((1836, 1863), 'numpy.fft.fftshift', 'np.fft.fftshift', (['f_response'], {}), '(f_response)\n', (1851, 1863), True, 'import numpy as np\n'), ((336, 345), 'numpy.cos', 'np.cos', (['A'], {}), '(A)\n', (342, 345), True, 'import numpy as np\n'), ((372, 381), 'numpy.sin', 'np.sin', (['A'], {}), '(A)\n', (378, 381), True, 'import numpy as np\n'), ((1326, 1363), 'numpy.ones', 'np.ones', ([], {'shape': '(d, d)', 'dtype': 'np.uint8'}), '(shape=(d, d), dtype=np.uint8)\n', (1333, 1363), True, 'import numpy as np\n'), ((1391, 1412), 'numpy.fix', 'np.fix', (['(d / 2 - c / 2)'], {}), '(d / 2 - c / 2)\n', (1397, 1412), True, 'import numpy as np\n'), ((1435, 1456), 'numpy.fix', 'np.fix', (['(d / 2 - r / 2)'], {}), '(d / 2 - r / 2)\n', (1441, 1456), True, 'import numpy as np\n'), ((1892, 1911), 'numpy.abs', 'np.abs', (['fs_response'], {}), '(fs_response)\n', (1898, 1911), True, 'import numpy as np\n'), ((1938, 1971), 'numpy.floor', 'np.floor', (['(imdimension * self.fmin)'], {}), '(imdimension * self.fmin)\n', (1946, 1971), True, 'import numpy as np\n'), ((2001, 2033), 'numpy.ceil', 'np.ceil', (['(imdimension * self.fmax)'], {}), '(imdimension * self.fmax)\n', (2008, 2033), True, 'import numpy as np\n')] |
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
import pandas as pd
from utility.sklearnbasemodel import BaseModel
import numpy as np
from utility.datafilepath import g_singletonDataFilePath
from preprocess.preparedata import HoldoutSplitMethod
from sklearn.metrics import mean_squared_error
from evaluation.sklearnmape import mean_absolute_percentage_error_xgboost
import matplotlib.pyplot as plt
from xgboost import XGBRegressor
class XGBoostSklearnModel(BaseModel):
def __init__(self):
BaseModel.__init__(self)
# self.save_final_model = True
# self.do_cross_val = False
return
def setClf(self):
self.clf = XGBRegressor(max_depth=7, learning_rate=0.01, n_estimators=100)
return
def get_fit_params(self):
eval_set=[(self.X_train, self.y_train), (self.X_test, self.y_test)]
early_stopping_rounds = 3
extra_fit_params ={'eval_set': eval_set, 'eval_metric': mean_absolute_percentage_error_xgboost, 'early_stopping_rounds': early_stopping_rounds, 'verbose':True}
return extra_fit_params
def after_test(self):
# scores_test=[]
# scores_train=[]
# scores_test_mse = []
# scores_train_mse = []
# for i, y_pred in enumerate(self.clf.staged_predict(self.X_test)):
# scores_test.append(mean_absolute_percentage_error(self.y_test, y_pred))
# scores_test_mse.append(mean_squared_error(self.y_test, y_pred))
#
# for i, y_pred in enumerate(self.clf.staged_predict(self.X_train)):
# scores_train.append(mean_absolute_percentage_error(self.y_train, y_pred))
# scores_train_mse.append(mean_squared_error(self.y_train, y_pred))
#
# pd.DataFrame({'scores_train': scores_train, 'scores_test': scores_test,'scores_train_mse': scores_train_mse, 'scores_test_mse': scores_test_mse}).to_csv('temp/trend.csv')
# df = pd.DataFrame({'scores_train': scores_train, 'scores_test': scores_test})
# print "Test set MAPE minimum: {}".format(np.array(scores_test).min())
# df.plot()
# plt.show()
return
def __get_intial_model_param(self):
return {'max_depth': [8],'max_features': [9], 'subsample':[0.8], 'learning_rate':[0.1], 'n_estimators': np.arange(20, 81, 10)}
def __get_model_param(self):
return {'max_depth': np.arange(3,15,1),'subsample': np.linspace(0.5, 1.0,6), 'learning_rate':[0.15,0.1,0.08,0.06,0.04,0.02, 0.01], 'n_estimators': [1000,1300,1500,1800,2000]}
def getTunedParamterOptions(self):
# tuned_parameters = self.__get_intial_model_param()
tuned_parameters = self.__get_model_param()
# tuned_parameters = [{'learning_rate': [0.1,0.05,0.01,0.002],'subsample': [1.0,0.5], 'n_estimators':[15000]}]
# tuned_parameters = [{'n_estimators': [2]}]
return tuned_parameters
if __name__ == "__main__":
obj= XGBoostSklearnModel()
obj.run() | [
"os.path.abspath",
"numpy.arange",
"xgboost.XGBRegressor",
"numpy.linspace",
"utility.sklearnbasemodel.BaseModel.__init__"
] | [((40, 61), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (55, 61), False, 'import os\n'), ((520, 544), 'utility.sklearnbasemodel.BaseModel.__init__', 'BaseModel.__init__', (['self'], {}), '(self)\n', (538, 544), False, 'from utility.sklearnbasemodel import BaseModel\n'), ((676, 739), 'xgboost.XGBRegressor', 'XGBRegressor', ([], {'max_depth': '(7)', 'learning_rate': '(0.01)', 'n_estimators': '(100)'}), '(max_depth=7, learning_rate=0.01, n_estimators=100)\n', (688, 739), False, 'from xgboost import XGBRegressor\n'), ((2317, 2338), 'numpy.arange', 'np.arange', (['(20)', '(81)', '(10)'], {}), '(20, 81, 10)\n', (2326, 2338), True, 'import numpy as np\n'), ((2402, 2421), 'numpy.arange', 'np.arange', (['(3)', '(15)', '(1)'], {}), '(3, 15, 1)\n', (2411, 2421), True, 'import numpy as np\n'), ((2433, 2457), 'numpy.linspace', 'np.linspace', (['(0.5)', '(1.0)', '(6)'], {}), '(0.5, 1.0, 6)\n', (2444, 2457), True, 'import numpy as np\n')] |
# This is the fastest python implementation of the ForceAtlas2 plugin from Gephi
# intended to be used with networkx, but is in theory independent of
# it since it only relies on the adjacency matrix. This
# implementation is based directly on the Gephi plugin:
#
# https://github.com/gephi/gephi/blob/master/modules/LayoutPlugin/src/main/java/org/gephi/layout/plugin/forceAtlas2/ForceAtlas2.java
#
# For simplicity and for keeping code in sync with upstream, I have
# reused as many of the variable/function names as possible, even when
# they are in a more java-like style (e.g. camelcase)
#
# I wrote this because I wanted an almost feature complete and fast implementation
# of ForceAtlas2 algorithm in python
#
# NOTES: Currently, this only works for weighted undirected graphs.
#
# Copyright (C) 2017 <NAME> <<EMAIL>>
#
# Available under the GPLv3
import random
import time
import numpy
import scipy
from tqdm import tqdm
from . import fa2util
class Timer:
def __init__(self, name="Timer"):
self.name = name
self.start_time = 0.0
self.total_time = 0.0
def start(self):
self.start_time = time.time()
def stop(self):
self.total_time += (time.time() - self.start_time)
def display(self):
print(self.name, " took ", "%.2f" % self.total_time, " seconds")
class ForceAtlas2:
def __init__(
self,
# Behavior alternatives
outboundAttractionDistribution=False, # Dissuade hubs
linLogMode=False, # NOT IMPLEMENTED
adjustSizes=False, # Prevent overlap (NOT IMPLEMENTED)
edgeWeightInfluence=1.0,
# Performance
jitterTolerance=1.0, # Tolerance
barnesHutOptimize=True,
barnesHutTheta=1.2,
multiThreaded=False, # NOT IMPLEMENTED
# Tuning
scalingRatio=2.0,
strongGravityMode=False,
gravity=1.0,
# Log
verbose=True):
assert linLogMode == adjustSizes == multiThreaded == False, "You selected a feature that has not been implemented yet..."
self.outboundAttractionDistribution = outboundAttractionDistribution
self.linLogMode = linLogMode
self.adjustSizes = adjustSizes
self.edgeWeightInfluence = edgeWeightInfluence
self.jitterTolerance = jitterTolerance
self.barnesHutOptimize = barnesHutOptimize
self.barnesHutTheta = barnesHutTheta
self.scalingRatio = scalingRatio
self.strongGravityMode = strongGravityMode
self.gravity = gravity
self.verbose = verbose
def init(
self,
# a graph in 2D numpy ndarray format (or) scipy sparse matrix format
G,
pos=None # Array of initial positions
):
isSparse = False
if isinstance(G, numpy.ndarray):
# Check our assumptions
assert G.shape == (G.shape[0], G.shape[0]), "G is not 2D square"
assert numpy.all(
G.T == G
), "G is not symmetric. Currently only undirected graphs are supported"
assert isinstance(
pos, numpy.ndarray) or (pos is None), "Invalid node positions"
elif scipy.sparse.issparse(G):
# Check our assumptions
assert G.shape == (G.shape[0], G.shape[0]), "G is not 2D square"
assert isinstance(
pos, numpy.ndarray) or (pos is None), "Invalid node positions"
G = G.tolil()
isSparse = True
else:
assert False, "G is not numpy ndarray or scipy sparse matrix"
# Put nodes into a data structure we can understand
nodes = []
for i in range(0, G.shape[0]):
n = fa2util.Node()
if isSparse:
n.mass = 1 + len(G.rows[i])
else:
n.mass = 1 + numpy.count_nonzero(G[i])
n.old_dx = 0
n.old_dy = 0
n.dx = 0
n.dy = 0
if pos is None:
n.x = random.random()
n.y = random.random()
else:
n.x = pos[i][0]
n.y = pos[i][1]
nodes.append(n)
# Put edges into a data structure we can understand
edges = []
es = numpy.asarray(G.nonzero()).T
for e in es: # Iterate through edges
if e[1] <= e[0]:
continue # Avoid duplicate edges
edge = fa2util.Edge()
edge.node1 = e[0] # The index of the first node in `nodes`
edge.node2 = e[1] # The index of the second node in `nodes`
edge.weight = G[tuple(e)]
edges.append(edge)
return nodes, edges
# Given an adjacency matrix, this function computes the node positions
# according to the ForceAtlas2 layout algorithm. It takes the same
# arguments that one would give to the ForceAtlas2 algorithm in Gephi.
# Not all of them are implemented. See below for a description of
# each parameter and whether or not it has been implemented.
#
# This function will return a list of X-Y coordinate tuples, ordered
# in the same way as the rows/columns in the input matrix.
#
# The only reason you would want to run this directly is if you don't
# use networkx. In this case, you'll likely need to convert the
# output to a more usable format. If you do use networkx, use the
# "forceatlas2_networkx_layout" function below.
#
# Currently, only undirected graphs are supported so the adjacency matrix
# should be symmetric.
def forceatlas2(
self,
# a graph in 2D numpy ndarray format (or) scipy sparse matrix format
G,
pos=None, # Array of initial positions
iterations=30 # Number of times to iterate the main loop
):
# Initializing, initAlgo()
# ================================================================
# speed and speedEfficiency describe a scaling factor of dx and dy
# before x and y are adjusted. These are modified as the
# algorithm runs to help ensure convergence.
speed = 1.0
speedEfficiency = 1.0
nodes, edges = self.init(G, pos)
outboundAttCompensation = 1.0
if self.outboundAttractionDistribution:
outboundAttCompensation = numpy.mean([n.mass for n in nodes])
# ================================================================
# Main loop, i.e. goAlgo()
# ================================================================
barneshut_timer = Timer(name="BarnesHut Approximation")
repulsion_timer = Timer(name="Repulsion forces")
gravity_timer = Timer(name="Gravitational forces")
attraction_timer = Timer(name="Attraction forces")
applyforces_timer = Timer(name="AdjustSpeedAndApplyForces step")
# Each iteration of this loop represents a call to goAlgo().
niters = range(iterations)
if self.verbose:
niters = tqdm(niters)
for _i in niters:
for n in nodes:
n.old_dx = n.dx
n.old_dy = n.dy
n.dx = 0
n.dy = 0
# Barnes Hut optimization
if self.barnesHutOptimize:
barneshut_timer.start()
rootRegion = fa2util.Region(nodes)
rootRegion.buildSubRegions()
barneshut_timer.stop()
# Charge repulsion forces
repulsion_timer.start()
# parallelization should be implemented here
if self.barnesHutOptimize:
rootRegion.applyForceOnNodes(nodes, self.barnesHutTheta,
self.scalingRatio)
else:
fa2util.apply_repulsion(nodes, self.scalingRatio)
repulsion_timer.stop()
# Gravitational forces
gravity_timer.start()
fa2util.apply_gravity(nodes,
self.gravity,
useStrongGravity=self.strongGravityMode)
gravity_timer.stop()
# If other forms of attraction were implemented they would be selected here.
attraction_timer.start()
fa2util.apply_attraction(nodes, edges,
self.outboundAttractionDistribution,
outboundAttCompensation,
self.edgeWeightInfluence)
attraction_timer.stop()
# Adjust speeds and apply forces
applyforces_timer.start()
values = fa2util.adjustSpeedAndApplyForces(nodes, speed,
speedEfficiency,
self.jitterTolerance)
speed = values['speed']
speedEfficiency = values['speedEfficiency']
applyforces_timer.stop()
if self.verbose:
if self.barnesHutOptimize:
barneshut_timer.display()
repulsion_timer.display()
gravity_timer.display()
attraction_timer.display()
applyforces_timer.display()
# ================================================================
return [(n.x, n.y) for n in nodes]
# A layout for NetworkX.
#
# This function returns a NetworkX layout, which is really just a
# dictionary of node positions (2D X-Y tuples) indexed by the node name.
def forceatlas2_networkx_layout(self, G, pos=None, iterations=100):
import networkx
assert isinstance(G,
networkx.classes.graph.Graph), "Not a networkx graph"
assert isinstance(pos, dict) or (
pos is
None), "pos must be specified as a dictionary, as in networkx"
M = networkx.to_scipy_sparse_matrix(G, dtype='f', format='lil')
if pos is None:
l = self.forceatlas2(M, pos=None, iterations=iterations)
else:
poslist = numpy.asarray([pos[i] for i in G.nodes()])
l = self.forceatlas2(M, pos=poslist, iterations=iterations)
return dict(zip(G.nodes(), l))
# A layout for igraph.
#
# This function returns an igraph layout
def forceatlas2_igraph_layout(self,
G,
pos=None,
iterations=100,
weight_attr=None):
from scipy.sparse import csr_matrix
import igraph
def to_sparse(graph, weight_attr=None):
edges = graph.get_edgelist()
if weight_attr is None:
weights = [1] * len(edges)
else:
weights = graph.es[weight_attr]
if not graph.is_directed():
edges.extend([(v, u) for u, v in edges])
weights.extend(weights)
return csr_matrix((weights, zip(*edges)))
assert isinstance(G, igraph.Graph), "Not a igraph graph"
assert isinstance(pos, (list, numpy.ndarray)) or (
pos is None), "pos must be a list or numpy array"
if isinstance(pos, list):
pos = numpy.array(pos)
adj = to_sparse(G, weight_attr)
coords = self.forceatlas2(adj, pos=pos, iterations=iterations)
return igraph.layout.Layout(coords, 2)
| [
"networkx.to_scipy_sparse_matrix",
"tqdm.tqdm",
"numpy.count_nonzero",
"scipy.sparse.issparse",
"time.time",
"random.random",
"numpy.mean",
"numpy.array",
"igraph.layout.Layout",
"numpy.all"
] | [((1140, 1151), 'time.time', 'time.time', ([], {}), '()\n', (1149, 1151), False, 'import time\n'), ((9989, 10048), 'networkx.to_scipy_sparse_matrix', 'networkx.to_scipy_sparse_matrix', (['G'], {'dtype': '"""f"""', 'format': '"""lil"""'}), "(G, dtype='f', format='lil')\n", (10020, 10048), False, 'import networkx\n'), ((11515, 11546), 'igraph.layout.Layout', 'igraph.layout.Layout', (['coords', '(2)'], {}), '(coords, 2)\n', (11535, 11546), False, 'import igraph\n'), ((1201, 1212), 'time.time', 'time.time', ([], {}), '()\n', (1210, 1212), False, 'import time\n'), ((3012, 3031), 'numpy.all', 'numpy.all', (['(G.T == G)'], {}), '(G.T == G)\n', (3021, 3031), False, 'import numpy\n'), ((3256, 3280), 'scipy.sparse.issparse', 'scipy.sparse.issparse', (['G'], {}), '(G)\n', (3277, 3280), False, 'import scipy\n'), ((6417, 6452), 'numpy.mean', 'numpy.mean', (['[n.mass for n in nodes]'], {}), '([n.mass for n in nodes])\n', (6427, 6452), False, 'import numpy\n'), ((7103, 7115), 'tqdm.tqdm', 'tqdm', (['niters'], {}), '(niters)\n', (7107, 7115), False, 'from tqdm import tqdm\n'), ((11370, 11386), 'numpy.array', 'numpy.array', (['pos'], {}), '(pos)\n', (11381, 11386), False, 'import numpy\n'), ((4081, 4096), 'random.random', 'random.random', ([], {}), '()\n', (4094, 4096), False, 'import random\n'), ((4119, 4134), 'random.random', 'random.random', ([], {}), '()\n', (4132, 4134), False, 'import random\n'), ((3913, 3938), 'numpy.count_nonzero', 'numpy.count_nonzero', (['G[i]'], {}), '(G[i])\n', (3932, 3938), False, 'import numpy\n')] |
from .core import IPStructure
from .core import Subnet
from .core import Interface
from .core import IPAddress
from .core import bin_add
import numpy as np
class Encoder:
"""
Encoder class
"""
def __init__(self, ip_structure, subnet):
"""
constructor
:param ip_structure: ip structure containing the fields and their # of bits
:type ip_structure: IPStructure
:param subnet: subnet of the specific encoder, e.g. Conv Layer encoder, Pooling Layer encoder
:type subnet: Subnet
"""
self.ip_structure = ip_structure
self.subnet = subnet
def encode_2_interface(self, field_values):
"""
encode the values of a list of fields into an interface including the IP and subnet
:param field_values: (filed, value) dict
:type field_values: dict
:return: an interface including the IP and the subnet
:rtype: Interface
"""
bin_ip = ''
fields = self.ip_structure.fields
for field_name in fields:
try:
v = field_values[field_name]
num_of_bits = fields[field_name]
v_bin = np.binary_repr(v)
if len(v_bin) > num_of_bits:
raise Exception('field value is out of the allowed bound')
v_bin = v_bin.zfill(num_of_bits)
bin_ip += v_bin
except KeyError as e:
raise Exception('fields and field_values does not match')
except Exception as e:
raise e
subnet_bin_ip = self.subnet.bin_ip
bin_ip = bin_add(bin_ip, subnet_bin_ip)
ip_addr = IPAddress(length=int(len(bin_ip)/8), bin_ip=bin_ip)
interface = Interface(ip=ip_addr, subnet=self.subnet, ip_structure=self.ip_structure)
return interface
| [
"numpy.binary_repr"
] | [((1191, 1208), 'numpy.binary_repr', 'np.binary_repr', (['v'], {}), '(v)\n', (1205, 1208), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from sklearn.utils import Bunch
def gen_random_spikes(N, T, firerate, framerate, seed=None):
if seed is not None:
np.random.seed(seed)
true_spikes = np.random.rand(N, T) < firerate / float(framerate)
return true_spikes
def gen_sinusoidal_spikes(N, T, firerate, framerate, seed=None):
if seed is not None:
np.random.seed(seed)
true_spikes = np.random.rand(N, T) < firerate / float(framerate) * \
np.sin(np.arange(T) // 50)**3 * 4
return true_spikes
def make_calcium(true_spikes, g):
truth = true_spikes.astype(float)
for i in range(2, truth.shape[1]):
if len(g) == 2:
truth[:, i] += g[0] * truth[:, i - 1] + g[1] * truth[:, i - 2]
else:
truth[:, i] += g[0] * truth[:, i - 1]
return truth
def add_noise(truth, b, sn):
noise = sn * np.random.randn(*truth.shape)
return b + truth + noise
def gen_data(g=[.95], sn=.3, T=3000, framerate=30, firerate=.5, b=0, N=20, seed=None):
"""
Generate data from homogenous Poisson Process
Parameters
----------
g : array, shape (p,), optional, default=[.95]
Parameter(s) of the AR(p) process that models the fluorescence impulse response.
sn : float, optional, default .3
Noise standard deviation.
T : int, optional, default 3000
Duration.
framerate : int, optional, default 30
Frame rate.
firerate : int, optional, default .5
Neural firing rate.
b : int, optional, default 0
Baseline.
N : int, optional, default 20
Number of generated traces.
seed : int, optional, default 13
Seed of random number generator.
Returns
-------
y : array, shape (N, T)
Noisy fluorescence data.
c : array, shape (N, T)
Calcium traces (without sn).
s : array, shape (N, T)
Spike trains.
"""
if seed is not None:
np.random.seed(seed)
true_spikes = gen_random_spikes(
N=N,
T=T,
firerate=firerate,
framerate=framerate,
)
true_calcium = make_calcium(true_spikes, g)
observed = add_noise(true_calcium, b, sn)
return observed, true_calcium, true_spikes
def gen_sinusoidal_data(
g=(.95,),
sn=.3,
T=3000,
framerate=30,
firerate=.5,
b=0,
N=20,
seed=None,
):
"""
Generate data from inhomogenous Poisson Process with sinusoidal instantaneous activity
Parameters
----------
g : array, shape (p,), optional, default=[.95]
Parameter(s) of the AR(p) process that models the fluorescence impulse response.
sn : float, optional, default .3
Noise standard deviation.
T : int, optional, default 3000
Duration.
framerate : int, optional, default 30
Frame rate.
firerate : float, optional, default .5
Neural firing rate.
b : float, optional, default 0
Baseline.
N : int, optional, default 20
Number of generated traces.
seed : int, optional, default 13
Seed of random number generator.
Returns
-------
y : array, shape (N, T)
Noisy fluorescence data.
c : array, shape (N, T)
Calcium traces (without sn).
s : array, shape (N, T)
Spike trains.
"""
if seed is not None:
np.random.seed(seed)
true_spikes = gen_sinusoidal_spikes(
N=N,
T=T,
firerate=firerate,
framerate=framerate,
)
true_calcium = make_calcium(true_spikes, g)
observed = add_noise(true_calcium, b, sn)
return observed, true_calcium, true_spikes
def make_calcium_traces(
neuron_ids=('a','b','c'),
duration=60.0,
sampling_rate=30.0,
oscillation=True,
):
n_neurons = len(neuron_ids)
gen_params = dict(
g=[.95],
sn=.3,
T=int(sampling_rate*duration),
framerate=sampling_rate,
firerate=.5,
b=0,
N=n_neurons,
seed=13,
)
if oscillation:
make_traces = gen_sinusoidal_data
else:
make_traces = gen_data
traces, _, spikes = map(np.squeeze, make_traces(**gen_params))
time = np.arange(0, traces.shape[1]/sampling_rate, 1/sampling_rate)
traces = pd.DataFrame(traces.T, index=time, columns=neuron_ids)
spikes = pd.DataFrame(spikes.T, index=time, columns=neuron_ids)
return Bunch(
traces=traces,
spikes=spikes,
)
| [
"pandas.DataFrame",
"numpy.random.seed",
"numpy.random.randn",
"sklearn.utils.Bunch",
"numpy.arange",
"numpy.random.rand"
] | [((4203, 4267), 'numpy.arange', 'np.arange', (['(0)', '(traces.shape[1] / sampling_rate)', '(1 / sampling_rate)'], {}), '(0, traces.shape[1] / sampling_rate, 1 / sampling_rate)\n', (4212, 4267), True, 'import numpy as np\n'), ((4278, 4332), 'pandas.DataFrame', 'pd.DataFrame', (['traces.T'], {'index': 'time', 'columns': 'neuron_ids'}), '(traces.T, index=time, columns=neuron_ids)\n', (4290, 4332), True, 'import pandas as pd\n'), ((4346, 4400), 'pandas.DataFrame', 'pd.DataFrame', (['spikes.T'], {'index': 'time', 'columns': 'neuron_ids'}), '(spikes.T, index=time, columns=neuron_ids)\n', (4358, 4400), True, 'import pandas as pd\n'), ((4413, 4448), 'sklearn.utils.Bunch', 'Bunch', ([], {'traces': 'traces', 'spikes': 'spikes'}), '(traces=traces, spikes=spikes)\n', (4418, 4448), False, 'from sklearn.utils import Bunch\n'), ((169, 189), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (183, 189), True, 'import numpy as np\n'), ((209, 229), 'numpy.random.rand', 'np.random.rand', (['N', 'T'], {}), '(N, T)\n', (223, 229), True, 'import numpy as np\n'), ((384, 404), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (398, 404), True, 'import numpy as np\n'), ((424, 444), 'numpy.random.rand', 'np.random.rand', (['N', 'T'], {}), '(N, T)\n', (438, 444), True, 'import numpy as np\n'), ((889, 918), 'numpy.random.randn', 'np.random.randn', (['*truth.shape'], {}), '(*truth.shape)\n', (904, 918), True, 'import numpy as np\n'), ((1965, 1985), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1979, 1985), True, 'import numpy as np\n'), ((3362, 3382), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (3376, 3382), True, 'import numpy as np\n'), ((494, 506), 'numpy.arange', 'np.arange', (['T'], {}), '(T)\n', (503, 506), True, 'import numpy as np\n')] |
import numpy as np
import torch
from collections import namedtuple
from torch.nn.parallel import DistributedDataParallel as DDP
# from torch.nn.parallel import DistributedDataParallelCPU as DDPC # Deprecated
from rlpyt.agents.base import BaseAgent, AgentStep
from rlpyt.models.qpg.mlp import QofMuMlpModel, PiMlpModel
from rlpyt.utils.quick_args import save__init__args
from rlpyt.distributions.gaussian import Gaussian, DistInfoStd
from rlpyt.utils.buffer import buffer_to
from rlpyt.utils.logging import logger
from rlpyt.models.utils import update_state_dict
from rlpyt.utils.collections import namedarraytuple
from rlpyt.utils.buffer import numpify_buffer
AgentInfo = namedarraytuple("AgentInfo", ["dist_info"])
Models = namedtuple("Models", ["pi", "q1", "q2", "v"])
class PIDAgent(BaseAgent):
"""Agent for expert demonstrations of LunarLanderContinuous based on PID found manually"""
def __init__(self, *args,**kwargs):
"""Saves input arguments; network defaults stored within."""
super().__init__(*args,**kwargs)
self.kp_alt = 27.14893426 # proportional altitude
self.kd_alt = -17.60568096 # derivative altitude
self.kp_ang = -40.33336571 # proportional angle
self.kd_ang = 24.34188735 # derivative angle
save__init__args(locals())
def initialize(self, env_spaces, share_memory=False, **kwargs):
"""
Instantiates the neural net model(s) according to the environment
interfaces.
Uses shared memory as needed--e.g. in CpuSampler, workers have a copy
of the agent for action-selection. The workers automatically hold
up-to-date parameters in ``model``, because they exist in shared
memory, constructed here before worker processes fork. Agents with
additional model components (beyond ``self.model``) for
action-selection should extend this method to share those, as well.
Typically called in the sampler during startup.
Args:
env_spaces: passed to ``make_env_to_model_kwargs()``, typically namedtuple of 'observation' and 'action'.
share_memory (bool): whether to use shared memory for model parameters.
"""
self.env_model_kwargs = self.make_env_to_model_kwargs(env_spaces)
# self.model = self.ModelCls(**self.env_model_kwargs,
# **self.model_kwargs)
# if share_memory:
# self.model.share_memory()
# # Store the shared_model (CPU) under a separate name, in case the
# # model gets moved to GPU later:
# self.shared_model = self.model
# if self.initial_model_state_dict is not None:
# self.model.load_state_dict(self.initial_model_state_dict)
self.env_spaces = env_spaces
self.share_memory = share_memory
def give_min_itr_learn(self, min_itr_learn):
self.min_itr_learn = min_itr_learn # From algo.
def make_env_to_model_kwargs(self, env_spaces):
assert len(env_spaces.action.shape) == 1
return dict(
observation_shape=env_spaces.observation.shape,
action_size=env_spaces.action.shape[0],
)
@torch.no_grad()
def step(self, observation, prev_action, prev_reward):
# Calculate setpoints (target values)
observation = numpify_buffer(observation)
reshape = False
if len(observation.shape)==1:
reshape = True
observation=np.expand_dims(observation,0)
alt_tgt = np.abs(observation[:,0])
ang_tgt = (.25 * np.pi) * (observation[:,0] + observation[:,2])
# Calculate error values
alt_error = (alt_tgt - observation[:,1])
ang_error = (ang_tgt - observation[:,4])
# Use PID to get adjustments
alt_adj = self.kp_alt * alt_error + self.kd_alt * observation[:,3]
ang_adj = self.kp_ang * ang_error + self.kd_ang * observation[:,5]
# Gym wants them as np array (-1,1)
a = np.array([alt_adj, ang_adj])
a = np.clip(a, -1, +1)
# If the legs are on the ground we made it, kill engines
a = np.logical_not(observation[:,6:8])*a.T
# model_inputs = buffer_to((observation, prev_action, prev_reward),
# device=self.device)
if reshape:
a = np.squeeze(a)
a = torch.tensor(a)
dist_info = DistInfoStd(mean=a, log_std=a*0)
return AgentStep(action=a, agent_info=AgentInfo(dist_info = dist_info))
def sample_mode(self, itr):
"""Go into sampling mode."""
self._mode = "sample"
def eval_mode(self, itr):
"""Go into evaluation mode. Example use could be to adjust epsilon-greedy."""
self._mode = "eval" | [
"numpy.abs",
"numpy.logical_not",
"rlpyt.utils.buffer.numpify_buffer",
"numpy.expand_dims",
"numpy.clip",
"numpy.array",
"collections.namedtuple",
"rlpyt.distributions.gaussian.DistInfoStd",
"numpy.squeeze",
"torch.tensor",
"torch.no_grad",
"rlpyt.utils.collections.namedarraytuple"
] | [((676, 719), 'rlpyt.utils.collections.namedarraytuple', 'namedarraytuple', (['"""AgentInfo"""', "['dist_info']"], {}), "('AgentInfo', ['dist_info'])\n", (691, 719), False, 'from rlpyt.utils.collections import namedarraytuple\n'), ((729, 774), 'collections.namedtuple', 'namedtuple', (['"""Models"""', "['pi', 'q1', 'q2', 'v']"], {}), "('Models', ['pi', 'q1', 'q2', 'v'])\n", (739, 774), False, 'from collections import namedtuple\n'), ((3190, 3205), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3203, 3205), False, 'import torch\n'), ((3333, 3360), 'rlpyt.utils.buffer.numpify_buffer', 'numpify_buffer', (['observation'], {}), '(observation)\n', (3347, 3360), False, 'from rlpyt.utils.buffer import numpify_buffer\n'), ((3522, 3547), 'numpy.abs', 'np.abs', (['observation[:, 0]'], {}), '(observation[:, 0])\n', (3528, 3547), True, 'import numpy as np\n'), ((3996, 4024), 'numpy.array', 'np.array', (['[alt_adj, ang_adj]'], {}), '([alt_adj, ang_adj])\n', (4004, 4024), True, 'import numpy as np\n'), ((4037, 4055), 'numpy.clip', 'np.clip', (['a', '(-1)', '(+1)'], {}), '(a, -1, +1)\n', (4044, 4055), True, 'import numpy as np\n'), ((4367, 4382), 'torch.tensor', 'torch.tensor', (['a'], {}), '(a)\n', (4379, 4382), False, 'import torch\n'), ((4403, 4437), 'rlpyt.distributions.gaussian.DistInfoStd', 'DistInfoStd', ([], {'mean': 'a', 'log_std': '(a * 0)'}), '(mean=a, log_std=a * 0)\n', (4414, 4437), False, 'from rlpyt.distributions.gaussian import Gaussian, DistInfoStd\n'), ((3474, 3504), 'numpy.expand_dims', 'np.expand_dims', (['observation', '(0)'], {}), '(observation, 0)\n', (3488, 3504), True, 'import numpy as np\n'), ((4134, 4169), 'numpy.logical_not', 'np.logical_not', (['observation[:, 6:8]'], {}), '(observation[:, 6:8])\n', (4148, 4169), True, 'import numpy as np\n'), ((4341, 4354), 'numpy.squeeze', 'np.squeeze', (['a'], {}), '(a)\n', (4351, 4354), True, 'import numpy as np\n')] |
import hashlib
import os
import warnings
from multiprocessing import cpu_count, get_context
from typing import (
Iterator, Iterable, List, Mapping, Optional, Tuple, Union)
import lmdb
import numpy as np
from .backends import BACKEND_ACCESSOR_MAP
from .backends import backend_decoder, backend_from_heuristics
from .context import TxnRegister
from .records import parsing
from .records.queries import RecordQuery
from .utils import cm_weakref_obj_proxy, is_suitable_user_key
class ArraysetDataReader(object):
"""Class implementing get access to data in a arrayset.
The methods implemented here are common to the :class:`ArraysetDataWriter`
accessor class as well as to this ``"read-only"`` method. Though minimal,
the behavior of read and write checkouts is slightly unique, with the main
difference being that ``"read-only"`` checkouts implement both thread and
process safe access methods. This is not possible for ``"write-enabled"``
checkouts, and attempts at multiprocess/threaded writes will generally
fail with cryptic error messages.
"""
def __init__(self,
repo_pth: os.PathLike,
aset_name: str,
default_schema_hash: str,
samplesAreNamed: bool,
isVar: bool,
varMaxShape: list,
varDtypeNum: int,
dataenv: lmdb.Environment,
hashenv: lmdb.Environment,
mode: str,
*args, **kwargs):
"""Developer documentation for init method
The location of the data references can be transparently specified by
feeding in a different dataenv argument. For staged reads -> ``dataenv =
lmdb.Environment(STAGING_DB)``. For commit read -> ``dataenv =
lmdb.Environment(COMMIT_DB)``.
Parameters
----------
repo_pth : os.PathLike
path to the repository on disk.
aset_name : str
name of the arrayset
schema_hashes : list of str
list of all schemas referenced in the arrayset
samplesAreNamed : bool
do samples have names or not.
isVar : bool
is the arrayset schema variable shape or not
varMaxShape : list or tuple of int
schema size (max) of the arrayset data
varDtypeNum : int
datatype numeric code of the arrayset data
dataenv : lmdb.Environment
environment of the arrayset references to read
hashenv : lmdb.Environment
environment of the repository hash records
mode : str, optional
mode to open the file handles in. 'r' for read only, 'a' for read/write, defaults
to 'r'
"""
self._mode = mode
self._path = repo_pth
self._asetn = aset_name
self._schema_variable = isVar
self._schema_dtype_num = varDtypeNum
self._samples_are_named = samplesAreNamed
self._schema_max_shape = tuple(varMaxShape)
self._default_schema_hash = default_schema_hash
self._is_conman: bool = False
self._index_expr_factory = np.s_
self._index_expr_factory.maketuple = False
self._contains_partial_remote_data: bool = False
# ------------------------ backend setup ------------------------------
self._fs = {}
for backend, accessor in BACKEND_ACCESSOR_MAP.items():
if accessor is not None:
self._fs[backend] = accessor(
repo_path=self._path,
schema_shape=self._schema_max_shape,
schema_dtype=np.typeDict[self._schema_dtype_num])
self._fs[backend].open(self._mode)
# -------------- Sample backend specification parsing -----------------
self._sspecs = {}
_TxnRegister = TxnRegister()
hashTxn = _TxnRegister.begin_reader_txn(hashenv)
try:
asetNamesSpec = RecordQuery(dataenv).arrayset_data_records(self._asetn)
for asetNames, dataSpec in asetNamesSpec:
hashKey = parsing.hash_data_db_key_from_raw_key(dataSpec.data_hash)
hash_ref = hashTxn.get(hashKey)
be_loc = backend_decoder(hash_ref)
self._sspecs[asetNames.data_name] = be_loc
if (be_loc.backend == '50') and (not self._contains_partial_remote_data):
warnings.warn(
f'Arrayset: {self._asetn} contains `reference-only` samples, with '
f'actual data residing on a remote server. A `fetch-data` '
f'operation is required to access these samples.', UserWarning)
self._contains_partial_remote_data = True
finally:
_TxnRegister.abort_reader_txn(hashenv)
def __enter__(self):
self._is_conman = True
return self
def __exit__(self, *exc):
self._is_conman = False
return
def __getitem__(self, key: Union[str, int]) -> np.ndarray:
"""Retrieve a sample with a given key. Convenience method for dict style access.
.. seealso:: :meth:`get`
Parameters
----------
key : Union[str, int]
sample key to retrieve from the arrayset
Returns
-------
np.ndarray
sample array data corresponding to the provided key
"""
return self.get(key)
def __iter__(self) -> Iterator[Union[str, int]]:
return self.keys()
def __len__(self) -> int:
"""Check how many samples are present in a given arrayset
Returns
-------
int
number of samples the arrayset contains
"""
return len(self._sspecs)
def __contains__(self, key: Union[str, int]) -> bool:
"""Determine if a key is a valid sample name in the arrayset
Parameters
----------
key : Union[str, int]
name to check if it is a sample in the arrayset
Returns
-------
bool
True if key exists, else False
"""
exists = key in self._sspecs
return exists
def _repr_pretty_(self, p, cycle):
res = f'Hangar {self.__class__.__name__} \
\n Arrayset Name : {self._asetn}\
\n Schema Hash : {self._default_schema_hash}\
\n Variable Shape : {bool(int(self._schema_variable))}\
\n (max) Shape : {self._schema_max_shape}\
\n Datatype : {np.typeDict[self._schema_dtype_num]}\
\n Named Samples : {bool(self._samples_are_named)}\
\n Access Mode : {self._mode}\
\n Number of Samples : {self.__len__()}\
\n Partial Remote Data Refs : {bool(self._contains_partial_remote_data)}\n'
p.text(res)
def __repr__(self):
res = f'{self.__class__}('\
f'repo_pth={self._path}, '\
f'aset_name={self._asetn}, '\
f'default_schema_hash={self._default_schema_hash}, '\
f'isVar={self._schema_variable}, '\
f'varMaxShape={self._schema_max_shape}, '\
f'varDtypeNum={self._schema_dtype_num}, '\
f'mode={self._mode})'
return res
def _open(self):
for val in self._fs.values():
val.open(mode=self._mode)
def _close(self):
for val in self._fs.values():
val.close()
@property
def name(self) -> str:
"""Name of the arrayset. Read-Only attribute.
"""
return self._asetn
@property
def dtype(self) -> np.dtype:
"""Datatype of the arrayset schema. Read-only attribute.
"""
return np.typeDict[self._schema_dtype_num]
@property
def shape(self) -> Tuple[int]:
"""Shape (or `max_shape`) of the arrayset sample tensors. Read-only attribute.
"""
return self._schema_max_shape
@property
def variable_shape(self) -> bool:
"""Bool indicating if arrayset schema is variable sized. Read-only attribute.
"""
return self._schema_variable
@property
def named_samples(self) -> bool:
"""Bool indicating if samples are named. Read-only attribute.
"""
return self._samples_are_named
@property
def iswriteable(self) -> bool:
"""Bool indicating if this arrayset object is write-enabled. Read-only attribute.
"""
return False if self._mode == 'r' else True
@property
def contains_remote_references(self) -> bool:
"""Bool indicating if all samples exist locally or if some reference remote sources.
"""
return bool(self._contains_partial_remote_data)
@property
def remote_reference_sample_keys(self) -> List[str]:
"""Returns sample names whose data is stored in a remote server reference.
Returns
-------
List[str]
list of sample keys in the arrayset.
"""
remote_keys = []
if self.contains_remote_references is True:
for sampleName, beLoc in self._sspecs.items():
if beLoc.backend == '50':
remote_keys.append(sampleName)
return remote_keys
def keys(self) -> Iterator[Union[str, int]]:
"""generator which yields the names of every sample in the arrayset
For write enabled checkouts, is technically possible to iterate over the
arrayset object while adding/deleting data, in order to avoid internal
python runtime errors (``dictionary changed size during iteration`` we
have to make a copy of they key list before beginning the loop.) While
not necessary for read checkouts, we perform the same operation for both
read and write checkouts in order in order to avoid differences.
Yields
------
Iterator[Union[str, int]]
keys of one sample at a time inside the arrayset
"""
for name in tuple(self._sspecs.keys()):
yield name
def values(self) -> Iterator[np.ndarray]:
"""generator which yields the tensor data for every sample in the arrayset
For write enabled checkouts, is technically possible to iterate over the
arrayset object while adding/deleting data, in order to avoid internal
python runtime errors (``dictionary changed size during iteration`` we
have to make a copy of they key list before beginning the loop.) While
not necessary for read checkouts, we perform the same operation for both
read and write checkouts in order in order to avoid differences.
Yields
------
Iterator[np.ndarray]
values of one sample at a time inside the arrayset
"""
for name in tuple(self._sspecs.keys()):
yield self.get(name)
def items(self) -> Iterator[Tuple[Union[str, int], np.ndarray]]:
"""generator yielding two-tuple of (name, tensor), for every sample in the arrayset.
For write enabled checkouts, is technically possible to iterate over the
arrayset object while adding/deleting data, in order to avoid internal
python runtime errors (``dictionary changed size during iteration`` we
have to make a copy of they key list before beginning the loop.) While
not necessary for read checkouts, we perform the same operation for both
read and write checkouts in order in order to avoid differences.
Yields
------
Iterator[Tuple[Union[str, int], np.ndarray]]
sample name and stored value for every sample inside the arrayset
"""
for name in tuple(self._sspecs.keys()):
yield (name, self.get(name))
def get(self, name: Union[str, int]) -> np.ndarray:
"""Retrieve a sample in the arrayset with a specific name.
The method is thread/process safe IF used in a read only checkout. Use
this if the calling application wants to manually manage multiprocess
logic for data retrieval. Otherwise, see the :py:meth:`get_batch` method
to retrieve multiple data samples simultaneously. This method uses
multiprocess pool of workers (managed by hangar) to drastically increase
access speed and simplify application developer workflows.
.. note::
in most situations, we have observed little to no performance
improvements when using multithreading. However, access time can be
nearly linearly decreased with the number of CPU cores / workers if
multiprocessing is used.
Parameters
----------
name : Union[str, int]
Name of the sample to retrieve data for.
Returns
-------
np.ndarray
Tensor data stored in the arrayset archived with provided name(s).
Raises
------
KeyError
if the arrayset does not contain data with the provided name
"""
try:
spec = self._sspecs[name]
data = self._fs[spec.backend].read_data(spec)
return data
except KeyError:
raise KeyError(f'HANGAR KEY ERROR:: data: {name} not in aset: {self._asetn}')
def get_batch(self,
names: Iterable[Union[str, int]],
*,
n_cpus: int = None,
start_method: str = 'spawn') -> List[np.ndarray]:
"""Retrieve a batch of sample data with the provided names.
This method is (technically) thread & process safe, though it should not
be called in parallel via multithread/process application code; This
method has been seen to drastically decrease retrieval time of sample
batches (as compared to looping over single sample names sequentially).
Internally it implements a multiprocess pool of workers (managed by
hangar) to simplify application developer workflows.
Parameters
----------
name : Iterable[Union[str, int]]
list/tuple of sample names to retrieve data for.
n_cpus : int, kwarg-only
if not None, uses num_cpus / 2 of the system for retrieval. Setting
this value to ``1`` will not use a multiprocess pool to perform the
work. Default is None
start_method : str, kwarg-only
One of 'spawn', 'fork', 'forkserver' specifying the process pool
start method. Not all options are available on all platforms. see
python multiprocess docs for details. Default is 'spawn'.
Returns
-------
List[np.ndarray]
Tensor data stored in the arrayset archived with provided name(s).
If a single sample name is passed in as the, the corresponding
np.array data will be returned.
If a list/tuple of sample names are pass in the ``names`` argument,
a tuple of size ``len(names)`` will be returned where each element
is an np.array containing data at the position it's name listed in
the ``names`` parameter.
Raises
------
KeyError
if the arrayset does not contain data with the provided name
"""
n_jobs = n_cpus if isinstance(n_cpus, int) else int(cpu_count() / 2)
with get_context(start_method).Pool(n_jobs) as p:
data = p.map(self.get, names)
return data
class ArraysetDataWriter(ArraysetDataReader):
"""Class implementing methods to write data to a arrayset.
Writer specific methods are contained here, and while read functionality is
shared with the methods common to :class:`ArraysetDataReader`. Write-enabled
checkouts are not thread/process safe for either ``writes`` OR ``reads``,
a restriction we impose for ``write-enabled`` checkouts in order to ensure
data integrity above all else.
.. seealso:: :class:`ArraysetDataReader`
"""
def __init__(self,
stagehashenv: lmdb.Environment,
default_schema_backend: str,
*args, **kwargs):
"""Developer documentation for init method.
Extends the functionality of the ArraysetDataReader class. The __init__
method requires quite a number of ``**kwargs`` to be passed along to the
:class:`ArraysetDataReader` class.
Parameters
----------
stagehashenv : lmdb.Environment
db where the newly added staged hash data records are stored
default_schema_backend : str
backend code to act as default where new data samples are added.
**kwargs:
See args of :class:`ArraysetDataReader`
"""
super().__init__(*args, **kwargs)
self._stagehashenv = stagehashenv
self._dflt_backend: str = default_schema_backend
self._dataenv: lmdb.Environment = kwargs['dataenv']
self._hashenv: lmdb.Environment = kwargs['hashenv']
self._TxnRegister = TxnRegister()
self._hashTxn: Optional[lmdb.Transaction] = None
self._dataTxn: Optional[lmdb.Transaction] = None
def __enter__(self):
self._is_conman = True
self._hashTxn = self._TxnRegister.begin_writer_txn(self._hashenv)
self._dataTxn = self._TxnRegister.begin_writer_txn(self._dataenv)
self._stageHashTxn = self._TxnRegister.begin_writer_txn(self._stagehashenv)
for k in self._fs.keys():
self._fs[k].__enter__()
return self
def __exit__(self, *exc):
self._is_conman = False
self._hashTxn = self._TxnRegister.commit_writer_txn(self._hashenv)
self._dataTxn = self._TxnRegister.commit_writer_txn(self._dataenv)
self._stageHashTxn = self._TxnRegister.commit_writer_txn(self._stagehashenv)
for k in self._fs.keys():
self._fs[k].__exit__(*exc)
def __setitem__(self, key: Union[str, int], value: np.ndarray) -> Union[str, int]:
"""Store a piece of data in a arrayset. Convenience method to :meth:`add`.
.. seealso:: :meth:`add`
Parameters
----------
key : Union[str, int]
name of the sample to add to the arrayset
value : np.array
tensor data to add as the sample
Returns
-------
Union[str, int]
sample name of the stored data (assuming operation was successful)
"""
self.add(value, key)
return key
def __delitem__(self, key: Union[str, int]) -> Union[str, int]:
"""Remove a sample from the arrayset. Convenience method to :meth:`remove`.
.. seealso:: :meth:`remove`
Parameters
----------
key : Union[str, int]
Name of the sample to remove from the arrayset
Returns
-------
Union[str, int]
Name of the sample removed from the arrayset (assuming operation successful)
"""
return self.remove(key)
@property
def _backend(self) -> str:
"""The default backend for the arrayset which can be written to
Returns
-------
str
numeric format code of the default backend.
"""
return self._dflt_backend
def add(self, data: np.ndarray, name: Union[str, int] = None,
**kwargs) -> Union[str, int]:
"""Store a piece of data in a arrayset
Parameters
----------
data : np.ndarray
data to store as a sample in the arrayset.
name : Union[str, int], optional
name to assign to the same (assuming the arrayset accepts named
samples), If str, can only contain alpha-numeric ascii characters
(in addition to '-', '.', '_'). Integer key must be >= 0. by default
None
Returns
-------
Union[str, int]
sample name of the stored data (assuming the operation was successful)
Raises
------
ValueError
If no `name` arg was provided for arrayset requiring named samples.
ValueError
If input data tensor rank exceeds specified rank of arrayset samples.
ValueError
For variable shape arraysets, if a dimension size of the input data
tensor exceeds specified max dimension size of the arrayset samples.
ValueError
For fixed shape arraysets, if input data dimensions do not exactly match
specified arrayset dimensions.
ValueError
If type of `data` argument is not an instance of np.ndarray.
ValueError
If `data` is not "C" contiguous array layout.
ValueError
If the datatype of the input data does not match the specified data type of
the arrayset
"""
# ------------------------ argument type checking ---------------------
try:
if self._samples_are_named and not is_suitable_user_key(name):
raise ValueError(
f'Name provided: `{name}` type: {type(name)} is invalid. Can only contain '
f'alpha-numeric or "." "_" "-" ascii characters (no whitespace) or int >= 0')
elif not self._samples_are_named:
name = kwargs['bulkn'] if 'bulkn' in kwargs else parsing.generate_sample_name()
if not isinstance(data, np.ndarray):
raise ValueError(f'`data` argument type: {type(data)} != `np.ndarray`')
elif data.dtype.num != self._schema_dtype_num:
raise ValueError(
f'dtype: {data.dtype} != aset: {np.typeDict[self._schema_dtype_num]}.')
elif not data.flags.c_contiguous:
raise ValueError(f'`data` must be "C" contiguous array.')
if self._schema_variable is True:
if data.ndim != len(self._schema_max_shape):
raise ValueError(
f'`data` rank: {data.ndim} != aset rank: {len(self._schema_max_shape)}')
for dDimSize, schDimSize in zip(data.shape, self._schema_max_shape):
if dDimSize > schDimSize:
raise ValueError(
f'dimensions of `data`: {data.shape} exceed variable max '
f'dims of aset: {self._asetn} specified max dimensions: '
f'{self._schema_max_shape}: SIZE: {dDimSize} > {schDimSize}')
elif data.shape != self._schema_max_shape:
raise ValueError(
f'`data` shape: {data.shape} != fixed aset shape: {self._schema_max_shape}')
except ValueError as e:
raise e from None
# --------------------- add data to storage backend -------------------
try:
tmpconman = not self._is_conman
if tmpconman:
self.__enter__()
full_hash = hashlib.blake2b(data.tobytes(), digest_size=20).hexdigest()
hashKey = parsing.hash_data_db_key_from_raw_key(full_hash)
# check if data record already exists with given key
dataRecKey = parsing.data_record_db_key_from_raw_key(self._asetn, name)
existingDataRecVal = self._dataTxn.get(dataRecKey, default=False)
if existingDataRecVal:
# check if data record already with same key & hash value
existingDataRec = parsing.data_record_raw_val_from_db_val(existingDataRecVal)
if full_hash == existingDataRec.data_hash:
return name
# write new data if data hash does not exist
existingHashVal = self._hashTxn.get(hashKey, default=False)
if existingHashVal is False:
hashVal = self._fs[self._dflt_backend].write_data(data)
self._hashTxn.put(hashKey, hashVal)
self._stageHashTxn.put(hashKey, hashVal)
self._sspecs[name] = backend_decoder(hashVal)
else:
self._sspecs[name] = backend_decoder(existingHashVal)
# add the record to the db
dataRecVal = parsing.data_record_db_val_from_raw_val(full_hash)
self._dataTxn.put(dataRecKey, dataRecVal)
if existingDataRecVal is False:
asetCountKey = parsing.arrayset_record_count_db_key_from_raw_key(self._asetn)
asetCountVal = self._dataTxn.get(asetCountKey, default='0'.encode())
newAsetCount = parsing.arrayset_record_count_raw_val_from_db_val(asetCountVal) + 1
newAsetCountVal = parsing.arrayset_record_count_db_val_from_raw_val(newAsetCount)
self._dataTxn.put(asetCountKey, newAsetCountVal)
finally:
if tmpconman:
self.__exit__()
return name
def remove(self, name: Union[str, int]) -> Union[str, int]:
"""Remove a sample with the provided name from the arrayset.
.. Note::
This operation will NEVER actually remove any data from disk. If
you commit a tensor at any point in time, **it will always remain
accessible by checking out a previous commit** when the tensor was
present. This is just a way to tell Hangar that you don't want some
piece of data to clutter up the current version of the repository.
.. Warning::
Though this may change in a future release, in the current version of
Hangar, we cannot recover references to data which was added to the
staging area, written to disk, but then removed **before** a commit
operation was run. This would be a similar sequence of events as:
checking out a `git` branch, changing a bunch of text in the file, and
immediately performing a hard reset. If it was never committed, git
doesn't know about it, and (at the moment) neither does Hangar.
Parameters
----------
name : Union[str, int]
name of the sample to remove.
Returns
-------
Union[str, int]
If the operation was successful, name of the data sample deleted.
Raises
------
KeyError
If a sample with the provided name does not exist in the arrayset.
"""
if not self._is_conman:
self._dataTxn = self._TxnRegister.begin_writer_txn(self._dataenv)
dataKey = parsing.data_record_db_key_from_raw_key(self._asetn, name)
try:
isRecordDeleted = self._dataTxn.delete(dataKey)
if isRecordDeleted is False:
raise KeyError(f'No sample: {name} type: {type(name)} exists in: {self._asetn}')
del self._sspecs[name]
asetDataCountKey = parsing.arrayset_record_count_db_key_from_raw_key(self._asetn)
asetDataCountVal = self._dataTxn.get(asetDataCountKey)
newAsetDataCount = parsing.arrayset_record_count_raw_val_from_db_val(asetDataCountVal) - 1
# if this is the last data piece existing in a arrayset, remove the arrayset
if newAsetDataCount == 0:
asetSchemaKey = parsing.arrayset_record_schema_db_key_from_raw_key(self._asetn)
self._dataTxn.delete(asetDataCountKey)
self._dataTxn.delete(asetSchemaKey)
totalNumAsetsKey = parsing.arrayset_total_count_db_key()
totalNumAsetsVal = self._dataTxn.get(totalNumAsetsKey)
newTotalNumAsets = parsing.arrayset_total_count_raw_val_from_db_val(totalNumAsetsVal) - 1
# if no more arraysets exist, delete the indexing key
if newTotalNumAsets == 0:
self._dataTxn.delete(totalNumAsetsKey)
# otherwise just decrement the count of asets
else:
newTotalNumAsetsVal = parsing.arrayset_total_count_db_val_from_raw_val(newTotalNumAsets)
self._dataTxn.put(newTotalNumAsetsVal)
# otherwise just decrement the arrayset record count
else:
newAsetDataCountVal = parsing.arrayset_record_count_db_val_from_raw_val(newAsetDataCount)
self._dataTxn.put(asetDataCountKey, newAsetDataCountVal)
except KeyError as e:
raise e from None
finally:
if not self._is_conman:
self._TxnRegister.commit_writer_txn(self._dataenv)
return name
"""
Constructor and Interaction Class for Arraysets
--------------------------------------------------
"""
class Arraysets(object):
"""Common access patterns and initialization/removal of arraysets in a checkout.
This object is the entry point to all tensor data stored in their individual
arraysets. Each arrayset contains a common schema which dictates the general
shape, dtype, and access patters which the backends optimize access for. The
methods contained within allow us to create, remove, query, and access these
collections of common tensors.
"""
def __init__(self,
mode: str,
repo_pth: os.PathLike,
arraysets: Mapping[str, Union[ArraysetDataReader, ArraysetDataWriter]],
hashenv: Optional[lmdb.Environment] = None,
dataenv: Optional[lmdb.Environment] = None,
stagehashenv: Optional[lmdb.Environment] = None):
"""Developer documentation for init method.
.. warning::
This class should not be instantiated directly. Instead use the factory
functions :py:meth:`_from_commit` or :py:meth:`_from_staging` to return
a pre-initialized class instance appropriately constructed for either a
read-only or write-enabled checkout.
Parameters
----------
mode : str
one of 'r' or 'a' to indicate read or write mode
repo_pth : os.PathLike
path to the repository on disk
arraysets : Mapping[str, Union[ArraysetDataReader, ArraysetDataWriter]]
dictionary of ArraysetData objects
hashenv : Optional[lmdb.Environment]
environment handle for hash records
dataenv : Optional[lmdb.Environment]
environment handle for the unpacked records. `data` is means to refer to
the fact that the stageenv is passed in for for write-enabled, and a
cmtrefenv for read-only checkouts.
stagehashenv : Optional[lmdb.Environment]
environment handle for newly added staged data hash records.
"""
self._mode = mode
self._repo_pth = repo_pth
self._arraysets = arraysets
self._is_conman = False
self._contains_partial_remote_data: bool = False
if (mode == 'a'):
self._hashenv = hashenv
self._dataenv = dataenv
self._stagehashenv = stagehashenv
self.__setup()
def __setup(self):
"""Do not allow users to use internal functions
"""
self._from_commit = None # should never be able to access
self._from_staging_area = None # should never be able to access
if self._mode == 'r':
self.init_arrayset = None
self.remove_aset = None
self.multi_add = None
self.__delitem__ = None
self.__setitem__ = None
def _open(self):
for v in self._arraysets.values():
v._open()
def _close(self):
for v in self._arraysets.values():
v._close()
# ------------- Methods Available To Both Read & Write Checkouts ------------------
def _repr_pretty_(self, p, cycle):
res = f'Hangar {self.__class__.__name__}\
\n Writeable: {bool(0 if self._mode == "r" else 1)}\
\n Arrayset Names / Partial Remote References:\
\n - ' + '\n - '.join(
f'{asetn} / {aset.contains_remote_references}'
for asetn, aset in self._arraysets.items())
p.text(res)
def __repr__(self):
res = f'{self.__class__}('\
f'repo_pth={self._repo_pth}, '\
f'arraysets={self._arraysets}, '\
f'mode={self._mode})'
return res
def _ipython_key_completions_(self):
"""Let ipython know that any key based access can use the arrayset keys
Since we don't want to inherit from dict, nor mess with `__dir__` for the
sanity of developers, this is the best way to ensure users can autocomplete
keys.
Returns
-------
list
list of strings, each being one of the arrayset keys for access.
"""
return self.keys()
def __getitem__(self, key: str) -> Union[ArraysetDataReader, ArraysetDataWriter]:
"""Dict style access to return the arrayset object with specified key/name.
Parameters
----------
key : string
name of the arrayset object to get.
Returns
-------
:class:`ArraysetDataReader` or :class:`ArraysetDataWriter`
The object which is returned depends on the mode of checkout specified.
If the arrayset was checked out with write-enabled, return writer object,
otherwise return read only object.
"""
return self.get(key)
def __setitem__(self, key, value):
"""Specifically prevent use dict style setting for arrayset objects.
Arraysets must be created using the factory function :py:meth:`init_arrayset`.
Raises
------
PermissionError
This operation is not allowed under any circumstance
"""
msg = f'HANGAR NOT ALLOWED:: To add a arrayset use `init_arrayset` method.'
raise PermissionError(msg)
def __contains__(self, key: str) -> bool:
"""Determine if a arrayset with a particular name is stored in the checkout
Parameters
----------
key : str
name of the arrayset to check for
Returns
-------
bool
True if a arrayset with the provided name exists in the checkout,
otherwise False.
"""
return True if key in self._arraysets else False
def __len__(self) -> int:
return len(self._arraysets)
def __iter__(self) -> Iterable[str]:
return iter(self._arraysets)
@property
def iswriteable(self) -> bool:
"""Bool indicating if this arrayset object is write-enabled. Read-only attribute.
"""
return False if self._mode == 'r' else True
@property
def contains_remote_references(self) -> Mapping[str, bool]:
"""Dict of bool indicating data reference locality in each arrayset.
Returns
-------
Mapping[str, bool]
For each arrayset name key, boolean value where False indicates all
samples in arrayset exist locally, True if some reference remote
sources.
"""
res: Mapping[str, bool] = {}
for asetn, aset in self._arraysets.items():
res[asetn] = aset.contains_remote_references
return res
@property
def remote_sample_keys(self) -> Mapping[str, Iterable[Union[int, str]]]:
"""Determine arraysets samples names which reference remote sources.
Returns
-------
Mapping[str, Iterable[Union[int, str]]]
dict where keys are arrayset names and values are iterables of
samples in the arrayset containing remote references
"""
res: Mapping[str, Iterable[Union[int, str]]] = {}
for asetn, aset in self._arraysets.items():
res[asetn] = aset.remote_reference_sample_keys
return res
def keys(self) -> List[str]:
"""list all arrayset keys (names) in the checkout
Returns
-------
List[str]
list of arrayset names
"""
return list(self._arraysets.keys())
def values(self) -> Iterable[Union[ArraysetDataReader, ArraysetDataWriter]]:
"""yield all arrayset object instances in the checkout.
Yields
-------
Iterable[Union[ArraysetDataReader, ArraysetDataWriter]]
Generator of ArraysetData accessor objects (set to read or write mode
as appropriate)
"""
for asetObj in self._arraysets.values():
wr = cm_weakref_obj_proxy(asetObj)
yield wr
def items(self) -> Iterable[Tuple[str, Union[ArraysetDataReader, ArraysetDataWriter]]]:
"""generator providing access to arrayset_name, :class:`Arraysets`
Yields
------
Iterable[Tuple[str, Union[ArraysetDataReader, ArraysetDataWriter]]]
returns two tuple of all all arrayset names/object pairs in the checkout.
"""
for asetN, asetObj in self._arraysets.items():
wr = cm_weakref_obj_proxy(asetObj)
yield (asetN, wr)
def get(self, name: str) -> Union[ArraysetDataReader, ArraysetDataWriter]:
"""Returns a arrayset access object.
This can be used in lieu of the dictionary style access.
Parameters
----------
name : str
name of the arrayset to return
Returns
-------
Union[ArraysetDataReader, ArraysetDataWriter]
ArraysetData accessor (set to read or write mode as appropriate) which
governs interaction with the data
Raises
------
KeyError
If no arrayset with the given name exists in the checkout
"""
try:
wr = cm_weakref_obj_proxy(self._arraysets[name])
return wr
except KeyError:
e = KeyError(f'No arrayset exists with name: {name}')
raise e from None
# ------------------------ Writer-Enabled Methods Only ------------------------------
def __delitem__(self, key: str) -> str:
"""remove a arrayset and all data records if write-enabled process.
Parameters
----------
key : str
Name of the arrayset to remove from the repository. This will remove
all records from the staging area (though the actual data and all
records are still accessible) if they were previously committed
Returns
-------
str
If successful, the name of the removed arrayset.
Raises
------
PermissionError
If this is a read-only checkout, no operation is permitted.
"""
return self.remove_aset(key)
def __enter__(self):
self._is_conman = True
for dskey in list(self._arraysets):
self._arraysets[dskey].__enter__()
return self
def __exit__(self, *exc):
self._is_conman = False
for dskey in list(self._arraysets):
self._arraysets[dskey].__exit__(*exc)
def multi_add(self, mapping: Mapping[str, np.ndarray]) -> str:
"""Add related samples to un-named arraysets with the same generated key.
If you have multiple arraysets in a checkout whose samples are related to
each other in some manner, there are two ways of associating samples
together:
1) using named arraysets and setting each tensor in each arrayset to the
same sample "name" using un-named arraysets.
2) using this "add" method. which accepts a dictionary of "arrayset
names" as keys, and "tensors" (ie. individual samples) as values.
When method (2) - this method - is used, the internally generated sample
ids will be set to the same value for the samples in each arrayset. That
way a user can iterate over the arrayset key's in one sample, and use
those same keys to get the other related tensor samples in another
arrayset.
Parameters
----------
mapping: Mapping[str, np.ndarray]
Dict mapping (any number of) arrayset names to tensor data (samples)
which to add. The arraysets must exist, and must be set to accept
samples which are not named by the user
Returns
-------
str
generated id (key) which each sample is stored under in their
corresponding arrayset. This is the same for all samples specified in
the input dictionary.
Raises
------
KeyError
If no arrayset with the given name exists in the checkout
"""
try:
tmpconman = not self._is_conman
if tmpconman:
self.__enter__()
if not all([k in self._arraysets for k in mapping.keys()]):
raise KeyError(
f'some key(s): {mapping.keys()} not in aset(s): {self._arraysets.keys()}')
data_name = parsing.generate_sample_name()
for k, v in mapping.items():
self._arraysets[k].add(v, bulkn=data_name)
except KeyError as e:
raise e from None
finally:
if tmpconman:
self.__exit__()
return data_name
def init_arrayset(self,
name: str,
shape: Union[int, Tuple[int]] = None,
dtype: np.dtype = None,
prototype: np.ndarray = None,
named_samples: bool = True,
variable_shape: bool = False,
*,
backend: str = None) -> ArraysetDataWriter:
"""Initializes a arrayset in the repository.
Arraysets are groups of related data pieces (samples). All samples within
a arrayset have the same data type, and number of dimensions. The size of
each dimension can be either fixed (the default behavior) or variable
per sample.
For fixed dimension sizes, all samples written to the arrayset must have
the same size that was initially specified upon arrayset initialization.
Variable size arraysets on the other hand, can write samples with
dimensions of any size less than a maximum which is required to be set
upon arrayset creation.
Parameters
----------
name : str
The name assigned to this arrayset.
shape : Union[int, Tuple[int]]
The shape of the data samples which will be written in this arrayset.
This argument and the `dtype` argument are required if a `prototype`
is not provided, defaults to None.
dtype : np.dtype
The datatype of this arrayset. This argument and the `shape` argument
are required if a `prototype` is not provided., defaults to None.
prototype : np.ndarray
A sample array of correct datatype and shape which will be used to
initialize the arrayset storage mechanisms. If this is provided, the
`shape` and `dtype` arguments must not be set, defaults to None.
named_samples : bool, optional
If the samples in the arrayset have names associated with them. If set,
all samples must be provided names, if not, no name will be assigned.
defaults to True, which means all samples should have names.
variable_shape : bool, optional
If this is a variable sized arrayset. If true, a the maximum shape is
set from the provided `shape` or `prototype` argument. Any sample
added to the arrayset can then have dimension sizes <= to this
initial specification (so long as they have the same rank as what
was specified) defaults to False.
backend : DEVELOPER USE ONLY. str, optional, kwarg only
Backend which should be used to write the arrayset files on disk.
Returns
-------
:class:`ArraysetDataWriter`
instance object of the initialized arrayset.
Raises
------
ValueError
If provided name contains any non ascii, non alpha-numeric characters.
ValueError
If required `shape` and `dtype` arguments are not provided in absence of
`prototype` argument.
ValueError
If `prototype` argument is not a C contiguous ndarray.
LookupError
If a arrayset already exists with the provided name.
ValueError
If rank of maximum tensor shape > 31.
ValueError
If zero sized dimension in `shape` argument
ValueError
If the specified backend is not valid.
"""
# ------------- Checks for argument validity --------------------------
try:
if not is_suitable_user_key(name):
raise ValueError(
f'Arrayset name provided: `{name}` is invalid. Can only contain '
f'alpha-numeric or "." "_" "-" ascii characters (no whitespace).')
if name in self._arraysets:
raise LookupError(f'KEY EXISTS: arrayset already exists with name: {name}.')
if prototype is not None:
if not isinstance(prototype, np.ndarray):
raise ValueError(
f'If specified (not None) `prototype` argument be `np.ndarray`-like.'
f'Invalid value: {prototype} of type: {type(prototype)}')
elif not prototype.flags.c_contiguous:
raise ValueError(f'`prototype` must be "C" contiguous array.')
elif isinstance(shape, (tuple, list, int)) and (dtype is not None):
prototype = np.zeros(shape, dtype=dtype)
else:
raise ValueError(f'`shape` & `dtype` args required if no `prototype` set.')
if (0 in prototype.shape) or (prototype.ndim > 31):
raise ValueError(
f'Invalid shape specification with ndim: {prototype.ndim} and shape: '
f'{prototype.shape}. Array rank > 31 dimensions not allowed AND '
'all dimension sizes must be > 0.')
if backend is not None:
if backend not in BACKEND_ACCESSOR_MAP:
raise ValueError(f'Backend specifier: {backend} not known')
else:
backend = backend_from_heuristics(prototype)
except (ValueError, LookupError) as e:
raise e from None
# ----------- Determine schema format details -------------------------
schema_format = np.array(
(*prototype.shape, prototype.size, prototype.dtype.num), dtype=np.uint64)
schema_hash = hashlib.blake2b(schema_format.tobytes(), digest_size=6).hexdigest()
asetCountKey = parsing.arrayset_record_count_db_key_from_raw_key(name)
asetCountVal = parsing.arrayset_record_count_db_val_from_raw_val(0)
asetSchemaKey = parsing.arrayset_record_schema_db_key_from_raw_key(name)
asetSchemaVal = parsing.arrayset_record_schema_db_val_from_raw_val(
schema_hash=schema_hash,
schema_is_var=variable_shape,
schema_max_shape=prototype.shape,
schema_dtype=prototype.dtype.num,
schema_is_named=named_samples,
schema_default_backend=backend)
# -------- set vals in lmdb only after schema is sure to exist --------
dataTxn = TxnRegister().begin_writer_txn(self._dataenv)
hashTxn = TxnRegister().begin_writer_txn(self._hashenv)
numAsetsCountKey = parsing.arrayset_total_count_db_key()
numAsetsCountVal = dataTxn.get(numAsetsCountKey, default=('0'.encode()))
numAsets_count = parsing.arrayset_total_count_raw_val_from_db_val(numAsetsCountVal)
numAsetsCountVal = parsing.arrayset_record_count_db_val_from_raw_val(numAsets_count + 1)
hashSchemaKey = parsing.hash_schema_db_key_from_raw_key(schema_hash)
hashSchemaVal = asetSchemaVal
dataTxn.put(asetCountKey, asetCountVal)
dataTxn.put(numAsetsCountKey, numAsetsCountVal)
dataTxn.put(asetSchemaKey, asetSchemaVal)
hashTxn.put(hashSchemaKey, hashSchemaVal, overwrite=False)
TxnRegister().commit_writer_txn(self._dataenv)
TxnRegister().commit_writer_txn(self._hashenv)
self._arraysets[name] = ArraysetDataWriter(
stagehashenv=self._stagehashenv,
repo_pth=self._repo_pth,
aset_name=name,
default_schema_hash=schema_hash,
samplesAreNamed=named_samples,
isVar=variable_shape,
varMaxShape=prototype.shape,
varDtypeNum=prototype.dtype.num,
hashenv=self._hashenv,
dataenv=self._dataenv,
mode='a',
default_schema_backend=backend)
return self.get(name)
def remove_aset(self, aset_name: str) -> str:
"""remove the arrayset and all data contained within it from the repository.
Parameters
----------
aset_name : str
name of the arrayset to remove
Returns
-------
str
name of the removed arrayset
Raises
------
KeyError
If a arrayset does not exist with the provided name
"""
datatxn = TxnRegister().begin_writer_txn(self._dataenv)
try:
if aset_name not in self._arraysets:
e = KeyError(f'Cannot remove: {aset_name}. Key does not exist.')
raise e from None
self._arraysets[aset_name]._close()
self._arraysets.__delitem__(aset_name)
asetCountKey = parsing.arrayset_record_count_db_key_from_raw_key(aset_name)
numAsetsKey = parsing.arrayset_total_count_db_key()
arraysInAset = datatxn.get(asetCountKey)
recordsToDelete = parsing.arrayset_total_count_raw_val_from_db_val(arraysInAset)
recordsToDelete = recordsToDelete + 1 # depends on num subkeys per array recy
with datatxn.cursor() as cursor:
cursor.set_key(asetCountKey)
for i in range(recordsToDelete):
cursor.delete()
cursor.close()
asetSchemaKey = parsing.arrayset_record_schema_db_key_from_raw_key(aset_name)
datatxn.delete(asetSchemaKey)
numAsetsVal = datatxn.get(numAsetsKey)
numAsets = parsing.arrayset_total_count_raw_val_from_db_val(numAsetsVal) - 1
if numAsets == 0:
datatxn.delete(numAsetsKey)
else:
numAsetsVal = parsing.arrayset_total_count_db_val_from_raw_val(numAsets)
datatxn.put(numAsetsKey, numAsetsVal)
finally:
TxnRegister().commit_writer_txn(self._dataenv)
return aset_name
# ------------------------ Class Factory Functions ------------------------------
@classmethod
def _from_staging_area(cls, repo_pth, hashenv, stageenv, stagehashenv):
"""Class method factory to checkout :class:`Arraysets` in write-enabled mode
This is not a user facing operation, and should never be manually called
in normal operation. Once you get here, we currently assume that
verification of the write lock has passed, and that write operations are
safe.
Parameters
----------
repo_pth : string
directory path to the hangar repository on disk
hashenv : lmdb.Environment
environment where tensor data hash records are open in write mode.
stageenv : lmdb.Environment
environment where staging records (dataenv) are opened in write mode.
stagehashenv: lmdb.Environment
environment where the staged hash records are stored in write mode
Returns
-------
:class:`Arraysets`
Interface class with write-enabled attributes activated and any
arraysets existing initialized in write mode via
:class:`.arrayset.ArraysetDataWriter`.
"""
arraysets = {}
query = RecordQuery(stageenv)
stagedSchemaSpecs = query.schema_specs()
for asetName, schemaSpec in stagedSchemaSpecs.items():
arraysets[asetName] = ArraysetDataWriter(
stagehashenv=stagehashenv,
repo_pth=repo_pth,
aset_name=asetName,
default_schema_hash=schemaSpec.schema_hash,
samplesAreNamed=schemaSpec.schema_is_named,
isVar=schemaSpec.schema_is_var,
varMaxShape=schemaSpec.schema_max_shape,
varDtypeNum=schemaSpec.schema_dtype,
hashenv=hashenv,
dataenv=stageenv,
mode='a',
default_schema_backend=schemaSpec.schema_default_backend)
return cls('a', repo_pth, arraysets, hashenv, stageenv, stagehashenv)
@classmethod
def _from_commit(cls, repo_pth, hashenv, cmtrefenv):
"""Class method factory to checkout :class:`.arrayset.Arraysets` in read-only mode
This is not a user facing operation, and should never be manually called
in normal operation. For read mode, no locks need to be verified, but
construction should occur through the interface to the
:class:`Arraysets` class.
Parameters
----------
repo_pth : string
directory path to the hangar repository on disk
hashenv : lmdb.Environment
environment where tensor data hash records are open in read-only mode.
cmtrefenv : lmdb.Environment
environment where staging checkout records are opened in read-only mode.
Returns
-------
:class:`Arraysets`
Interface class with all write-enabled attributes deactivated
arraysets initialized in read mode via :class:`.arrayset.ArraysetDataReader`.
"""
arraysets = {}
query = RecordQuery(cmtrefenv)
cmtSchemaSpecs = query.schema_specs()
for asetName, schemaSpec in cmtSchemaSpecs.items():
arraysets[asetName] = ArraysetDataReader(
repo_pth=repo_pth,
aset_name=asetName,
default_schema_hash=schemaSpec.schema_hash,
samplesAreNamed=schemaSpec.schema_is_named,
isVar=schemaSpec.schema_is_var,
varMaxShape=schemaSpec.schema_max_shape,
varDtypeNum=schemaSpec.schema_dtype,
dataenv=cmtrefenv,
hashenv=hashenv,
mode='r')
return cls('r', repo_pth, arraysets, None, None, None)
| [
"numpy.zeros",
"multiprocessing.get_context",
"numpy.array",
"warnings.warn",
"multiprocessing.cpu_count"
] | [((47092, 47179), 'numpy.array', 'np.array', (['(*prototype.shape, prototype.size, prototype.dtype.num)'], {'dtype': 'np.uint64'}), '((*prototype.shape, prototype.size, prototype.dtype.num), dtype=np.\n uint64)\n', (47100, 47179), True, 'import numpy as np\n'), ((4465, 4672), 'warnings.warn', 'warnings.warn', (['f"""Arrayset: {self._asetn} contains `reference-only` samples, with actual data residing on a remote server. A `fetch-data` operation is required to access these samples."""', 'UserWarning'], {}), "(\n f'Arrayset: {self._asetn} contains `reference-only` samples, with actual data residing on a remote server. A `fetch-data` operation is required to access these samples.'\n , UserWarning)\n", (4478, 4672), False, 'import warnings\n'), ((15592, 15603), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (15601, 15603), False, 'from multiprocessing import cpu_count, get_context\n'), ((15622, 15647), 'multiprocessing.get_context', 'get_context', (['start_method'], {}), '(start_method)\n', (15633, 15647), False, 'from multiprocessing import cpu_count, get_context\n'), ((46185, 46213), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (46193, 46213), True, 'import numpy as np\n')] |
#
# imgAnalyserTest.py
#
# MIT License - CCD_CAPTURE
#
# Copyright (c) 2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
''' Unit Tests for the imgAnalyser module '''
import unittest
import numpy as np
import cv2
import matplotlib.pyplot as plt
import imgAnalyser
import sys
class TestImgAnalyser(unittest.TestCase):
def setUp(self):
self.ia = imgAnalyser.ImgAnalyser()
self.testImg = np.array([[0,1,2],[3,4,5],[6,7,8],[9,10,11]])
def test_setImg(self):
self.ia.setImg(self.testImg)
self.assertEqual(self.ia.imgSizeX,3,'incorrect imgSizeX')
self.assertEqual(self.ia.imgSizeY,4,'incorrect imgSizeY')
def test_setRoi(self):
self.ia.setImg(self.testImg)
# Test basic ROI setting
self.ia.setRoi((1,1,2,2))
self.assertEqual(self.ia.roi[0],1,'roi Xorigin incorrect')
self.assertEqual(self.ia.roi[1],1,'roi Yorigin incorrect')
self.assertEqual(self.ia.roi[2],2,'roi Xsize incorrect')
self.assertEqual(self.ia.roi[3],2,'roi Ysize incorrect')
# Test negative Yorigin
self.ia.setRoi((-1,1,2,2))
self.assertEqual(self.ia.roi[0],0,'roi Xorigin incorrect')
self.assertEqual(self.ia.roi[1],1,'roi Yorigin incorrect')
self.assertEqual(self.ia.roi[2],2,'roi Xsize incorrect')
self.assertEqual(self.ia.roi[3],2,'roi Ysize incorrect')
# Test negative Yorigin
self.ia.setRoi((0,-1,2,2))
self.assertEqual(self.ia.roi[0],0,'roi Xorigin incorrect')
self.assertEqual(self.ia.roi[1],0,'roi Yorigin incorrect')
self.assertEqual(self.ia.roi[2],2,'roi Xsize incorrect')
self.assertEqual(self.ia.roi[3],2,'roi Ysize incorrect')
# Test XSize overflow
self.ia.setRoi((1,1,4,2))
self.assertEqual(self.ia.roi[0],1,'roi Xorigin incorrect')
self.assertEqual(self.ia.roi[1],1,'roi Yorigin incorrect')
self.assertEqual(self.ia.roi[2],2,'roi Xsize incorrect')
self.assertEqual(self.ia.roi[3],2,'roi Ysize incorrect')
# Test YSize overflow
self.ia.setRoi((1,1,4,4))
self.assertEqual(self.ia.roi[0],1,'roi Xorigin incorrect')
self.assertEqual(self.ia.roi[1],1,'roi Yorigin incorrect')
self.assertEqual(self.ia.roi[2],2,'roi Xsize incorrect')
self.assertEqual(self.ia.roi[3],3,'roi Ysize incorrect')
# Test Non-integer ROI
self.ia.setRoi((1.5,1,4,4))
self.assertEqual(self.ia.roi[0],1,'roi Xorigin incorrect')
self.assertEqual(self.ia.roi[1],1,'roi Yorigin incorrect')
self.assertEqual(self.ia.roi[2],2,'roi Xsize incorrect')
self.assertEqual(self.ia.roi[3],3,'roi Ysize incorrect')
def test_setSetProfiles(self):
self.ia.setImg(self.testImg)
self.ia.setRoi((0,0,2,2))
self.ia.setProfiles()
self.assertEqual(self.ia.xProfileWidth,1,'xProfileWidth incorrect')
self.assertEqual(self.ia.yProfileWidth,1,'yProfileWidth incorrect')
self.ia.setProfiles(3,1)
self.assertEqual(self.ia.xProfileWidth,2,'xProfileWidth incorrect')
self.assertEqual(self.ia.yProfileWidth,1,'yProfileWidth incorrect')
self.ia.setProfiles(1,3)
self.assertEqual(self.ia.xProfileWidth,1,'xProfileWidth incorrect')
self.assertEqual(self.ia.yProfileWidth,2,'yProfileWidth incorrect')
def test_getXProfileData(self):
self.ia.setImg(self.testImg)
self.ia.setRoi((0,0,3,3),(2,2))
xProfile = self.ia.getXProfile()
#print(xProfile)
correctxProfile = np.array([[1.5,2.5,3.5]])
#print(correctxProfile)
self.ia.setRoi((0,0,3,4),(2,2))
yProfile = self.ia.getYProfile()
print("yProfile=",yProfile)
correctProfile = np.array([0.5,3.5,6.5,9.5])
print("correctProfile=",correctProfile)
sys.stdout.flush()
self.assertEqual(np.allclose(xProfile,correctxProfile),True,"Xprofile wrong")
self.assertEqual(np.allclose(yProfile,correctProfile),True,"Yprofile wrong")
def test_getRoi(self):
self.ia.setImg(self.testImg)
self.ia.setRoi((0,0,2,2))
roi = self.ia.getRoi()
#print(roi)
correctProfile = np.array([[0,1],[3,4]])
#print(correctProfile)
self.assertEqual(np.allclose(roi,correctProfile),True,"roi wrong")
def test_realImage(self):
self.ia.setImg("./test_image.tif")
self.ia.setRoi((380,350,200,1800))
roiImg = self.ia.resizeImgForWeb(self.ia.getRoiImg())
cv2.imshow("roiImg",roiImg)
cv2.waitKey(0)
roiStats = self.ia.getRoiStats()
#print(roiStats)
xStats = self.ia.getXProfileStats()
#print(xStats)
yStats = self.ia.getYProfileStats()
#print(yStats)
self.assertAlmostEqual(roiStats[3]/roiStats[1],0.167,3,"ROI SD%")
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"imgAnalyser.ImgAnalyser",
"cv2.waitKey",
"numpy.allclose",
"numpy.array",
"sys.stdout.flush",
"cv2.imshow"
] | [((5941, 5956), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5954, 5956), False, 'import unittest\n'), ((1383, 1408), 'imgAnalyser.ImgAnalyser', 'imgAnalyser.ImgAnalyser', ([], {}), '()\n', (1406, 1408), False, 'import imgAnalyser\n'), ((1432, 1488), 'numpy.array', 'np.array', (['[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]]'], {}), '([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11]])\n', (1440, 1488), True, 'import numpy as np\n'), ((4613, 4640), 'numpy.array', 'np.array', (['[[1.5, 2.5, 3.5]]'], {}), '([[1.5, 2.5, 3.5]])\n', (4621, 4640), True, 'import numpy as np\n'), ((4814, 4844), 'numpy.array', 'np.array', (['[0.5, 3.5, 6.5, 9.5]'], {}), '([0.5, 3.5, 6.5, 9.5])\n', (4822, 4844), True, 'import numpy as np\n'), ((4898, 4916), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (4914, 4916), False, 'import sys\n'), ((5263, 5289), 'numpy.array', 'np.array', (['[[0, 1], [3, 4]]'], {}), '([[0, 1], [3, 4]])\n', (5271, 5289), True, 'import numpy as np\n'), ((5582, 5610), 'cv2.imshow', 'cv2.imshow', (['"""roiImg"""', 'roiImg'], {}), "('roiImg', roiImg)\n", (5592, 5610), False, 'import cv2\n'), ((5618, 5632), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (5629, 5632), False, 'import cv2\n'), ((4942, 4980), 'numpy.allclose', 'np.allclose', (['xProfile', 'correctxProfile'], {}), '(xProfile, correctxProfile)\n', (4953, 4980), True, 'import numpy as np\n'), ((5028, 5065), 'numpy.allclose', 'np.allclose', (['yProfile', 'correctProfile'], {}), '(yProfile, correctProfile)\n', (5039, 5065), True, 'import numpy as np\n'), ((5343, 5375), 'numpy.allclose', 'np.allclose', (['roi', 'correctProfile'], {}), '(roi, correctProfile)\n', (5354, 5375), True, 'import numpy as np\n')] |
from numpy import random
from numpy import sqrt
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import load_model
if __name__ == "__main__":
# Change the path as per your local directory
df = read_csv('/Volumes/G-DRIVE mobile/Data/FannieMae/2019Q1/Acquisition_2019Q1.txt',
delimiter='|', index_col=False,
names=['loan_identifier', 'channel', 'seller_name', 'original_interest_rate',
'original_upb', 'original_loan_term', 'origination_date', 'first_paymane_date',
'ltv', 'cltv', 'number_of_borrowers', 'dti', 'borrower_credit_score',
'first_time_home_buyer_indicator', 'loan_purpose', 'property_type', 'number_of_units',
'occupancy_status', 'property_state', 'zip_3_digit', 'mortgage_insurance_percentage',
'product_type', 'co_borrower_credit_score', 'mortgage_insurance_type',
'relocation_mortgage_indicator'])
# Get the training data set form the original data
df_reg = df[['original_upb', 'cltv', 'dti', 'borrower_credit_score',
'occupancy_status', 'property_state', 'original_interest_rate']]
# Transform categorical values
le_occupancy_status = LabelEncoder()
le_property_state = LabelEncoder()
le_occupancy_status.fit_transform(df['occupancy_status'])
le_property_state.fit_transform(df['property_state'])
df_reg['occupancy_status'] = le_occupancy_status.transform(df_reg['occupancy_status'])
df_reg['property_state'] = le_property_state.transform(df_reg['property_state'])
# random sampling
rnd = random.randint(0, df_reg.index.__len__(), 1000)
df_reg = df_reg.iloc[rnd, :]
# split into input and output columns
X, y = df_reg.values[:, :-1], df_reg.values[:, -1]
# split into train and test datasets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
# determine the number of input features
n_features = X_train.shape[1]
# define model
model = Sequential()
model.add(Dense(20, activation='relu', kernel_initializer='he_normal', input_shape=(n_features,)))
model.add(Dense(16, activation='relu', kernel_initializer='he_normal'))
model.add(Dense(1))
# compile the model
model.compile(optimizer='adam', loss='mse')
# fit the model
history = model.fit(X_train, y_train, epochs=50, batch_size=32, verbose=0)
# evaluate the model
error = model.evaluate(X_test, y_test, verbose=0)
print('MSE: %.3f, RMSE: %.3f' % (error, sqrt(error)))
# make a prediction
row = [100000.0, 85.0, 39.0, 652.0, le_occupancy_status.transform(['I'])[0], le_property_state.transform(['NJ'])[0]]
yhat = model.predict([row])
print('Predicted: %.3f' % yhat)
# save model to file
model.save('model.h5')
# load the model from file
model = load_model('model.h5')
# make a prediction
row = [150000.00, 90.00, 40.0, 720.0, 0, 32]
yhat = model.predict([row])
print('Predicted: %.3f' % yhat[0])
| [
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.Dense",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.LabelEncoder",
"tensorflow.keras.Sequential",
"numpy.sqrt"
] | [((392, 1047), 'pandas.read_csv', 'read_csv', (['"""/Volumes/G-DRIVE mobile/Data/FannieMae/2019Q1/Acquisition_2019Q1.txt"""'], {'delimiter': '"""|"""', 'index_col': '(False)', 'names': "['loan_identifier', 'channel', 'seller_name', 'original_interest_rate',\n 'original_upb', 'original_loan_term', 'origination_date',\n 'first_paymane_date', 'ltv', 'cltv', 'number_of_borrowers', 'dti',\n 'borrower_credit_score', 'first_time_home_buyer_indicator',\n 'loan_purpose', 'property_type', 'number_of_units', 'occupancy_status',\n 'property_state', 'zip_3_digit', 'mortgage_insurance_percentage',\n 'product_type', 'co_borrower_credit_score', 'mortgage_insurance_type',\n 'relocation_mortgage_indicator']"}), "('/Volumes/G-DRIVE mobile/Data/FannieMae/2019Q1/Acquisition_2019Q1.txt'\n , delimiter='|', index_col=False, names=['loan_identifier', 'channel',\n 'seller_name', 'original_interest_rate', 'original_upb',\n 'original_loan_term', 'origination_date', 'first_paymane_date', 'ltv',\n 'cltv', 'number_of_borrowers', 'dti', 'borrower_credit_score',\n 'first_time_home_buyer_indicator', 'loan_purpose', 'property_type',\n 'number_of_units', 'occupancy_status', 'property_state', 'zip_3_digit',\n 'mortgage_insurance_percentage', 'product_type',\n 'co_borrower_credit_score', 'mortgage_insurance_type',\n 'relocation_mortgage_indicator'])\n", (400, 1047), False, 'from pandas import read_csv\n'), ((1470, 1484), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1482, 1484), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((1509, 1523), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (1521, 1523), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((2112, 2150), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.33)'}), '(X, y, test_size=0.33)\n', (2128, 2150), False, 'from sklearn.model_selection import train_test_split\n'), ((2330, 2342), 'tensorflow.keras.Sequential', 'Sequential', ([], {}), '()\n', (2340, 2342), False, 'from tensorflow.keras import Sequential\n'), ((3168, 3190), 'tensorflow.keras.models.load_model', 'load_model', (['"""model.h5"""'], {}), "('model.h5')\n", (3178, 3190), False, 'from tensorflow.keras.models import load_model\n'), ((2357, 2449), 'tensorflow.keras.layers.Dense', 'Dense', (['(20)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'input_shape': '(n_features,)'}), "(20, activation='relu', kernel_initializer='he_normal', input_shape=(\n n_features,))\n", (2362, 2449), False, 'from tensorflow.keras.layers import Dense\n'), ((2460, 2520), 'tensorflow.keras.layers.Dense', 'Dense', (['(16)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""'}), "(16, activation='relu', kernel_initializer='he_normal')\n", (2465, 2520), False, 'from tensorflow.keras.layers import Dense\n'), ((2536, 2544), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (2541, 2544), False, 'from tensorflow.keras.layers import Dense\n'), ((2842, 2853), 'numpy.sqrt', 'sqrt', (['error'], {}), '(error)\n', (2846, 2853), False, 'from numpy import sqrt\n')] |
import numpy as np
import re
import os
import matplotlib.pyplot as plt
au_to_ev = 27.21139
def get_high_symm_points(kpt_file):
#high symmetry points have a descriptive character in their 5th column
# these characters are read here
#get the labels of each k-point
kpt_label = []
with open(kpt_file,'r',) as original_k:
for idx, line in enumerate(original_k):
if idx > 0: #skip header
line_label = ''
if len(line[41:-1])>0: #read everything after 4th column which ends at index 40 (hardcoded :ugly:)
line_label = line[41:-1].replace(" ","") #remove whitespace
kpt_label.append(line_label)
#find high symmertry points in labels
high_symm_points = []
for idx,lbl in enumerate(kpt_label):
if len(lbl) >0:
my_lbl = lbl
if 'Gamma' in lbl:
my_lbl = '$\Gamma$'
high_symm_points.append([idx,my_lbl])
return high_symm_points
def sort_energies(en_data):
#transform the 1D en_data list into a 2D list
#where en_plot[1:nkpts][1:nBands]
en_plot = []
k_old = []
for idx, en_val in enumerate(en_data):
id = int(en_val[0])
kpt = en_val[1:4]
en = en_val[4]
#print('[sort_energies]: new kpt:'+str(kpt)," new en:"+str(en))
#init k_old & append first value
if idx == 0:
id_old = id
k_old = kpt
en_plot.append( [])
#in case of new kpt jump in first dimension
if np.linalg.norm(kpt -k_old) > 1e-8:
en_plot.append([])
k_old = kpt
if id == id_old:
print("[plot_bandstruct/sort_energies]: WARNING unexpected new kpt found")
#print('new k_old=',k_old,' idx=',idx)
#append value to second dimension
en_plot[-1].append(en)
return en_plot
def get_en_plot(en_data):
#sort energies by kpt and band index
en_plot = sort_energies(en_data)
nBands = len(en_plot[0])
#plot each band
#
return nBands, en_plot
def read_data(target_dir):
k_plot = []
en_data = []
k_ticks = []
k_labels = []
#
kpt_file = target_dir + '/kpts'
en_file = target_dir + '/out/eBands.dat'
#
if os.path.isfile( kpt_file):
kpt_data = np.genfromtxt(kpt_file, skip_header=1, usecols= (0,1,2) )
#linspace for plotting
k_plot = np.linspace( 0.0,1.0,len(kpt_data) )
print('[plot_bandstruct/read_data]: found '+str(len(kpt_data))+' kpts')
high_symm_points = get_high_symm_points(kpt_file)
print('[plot_bandstruct/read_data]: high symm pts:'+str(high_symm_points))
#
#
for symm_point in high_symm_points:
k_ticks.append( k_plot[ symm_point[0] ] )
k_labels.append( symm_point[1] )
#
else:
print("[plot_bandstruct/read_data]: ERROR did not find kpt_file "+kpt_file)
stop
if os.path.isfile( en_file):
en_data = np.genfromtxt(en_file, skip_header=3, usecols=(0,1,2,3,4) )
else:
print("[plot_bandstruct/pread_data]: ERROR did not find en_file "+en_file)
stop
return k_plot, k_ticks, k_labels, en_data
def check_length(length, lst, default):
orig_length = len(lst)
if not (orig_length == length):
print('[plot_bandstruct/check_length]: lst length was not of size '+str(length)+' (lst has actual size '+str(orig_length)+')')
print('[plot_bandstruct/check_length]: lst will be overwritten with default value = '+str(default))
lst = []
for i in range(length):
lst.append(default)
if not (len(lst) == orig_length):
print('[plot_bandstruct/check_length]: new lst size:'+str(len(lst)))
return lst
def plot_bandstruct(target_dir_lst, id_str,id_formula,line_style, plot_color, pdf_out_file, label_size=14, y_tick_size=12, plot_in_ev=False):
#kpt_data = np.genfromtxt(kpt_file,skip_header=1,dtype=(float,float,float,float,str), missing_values='',filling_values='none')
print("[plot_bandstruct]: hello there! will search for data id: "+id_str)
#this should be a unique identifier
id_lst = []
line_style = check_length(len(target_dir_lst), line_style, "-" )
plot_color = check_length(len(target_dir_lst), plot_color, "black" )
#PLOTTING
fig, ax = plt.subplots(1,1)
for dir_idx, next_dir in enumerate(target_dir_lst):
if os.path.isdir(next_dir):
print("[plot_bandstruct]: NEW FOLDER FOUND ",next_dir," dir idx"+str(dir_idx))
#
# print info on next_dir
id_label = ''
try:
id_lst.append( float(next_dir.split(id_str)[1]) )
id_label = id_formula+'='+'{:01.2f}'.format(id_lst[-1])
print("[plot_bandstruct]: intepreted as "+str(id_str)+"="+id_label)
except:
print("[plot_bandstruct]: could not id the folder "+next_dir)
#
#
k_plot, k_ticks, k_labels, en_data = read_data(next_dir)
#
#plot_color = 'black'
#line_style = '-'
#
#
nBands, en_plot = get_en_plot(en_data)
print('[plot_bandstruct]: detected nBands='+str(nBands))
for band in range(nBands):
en_band = []
for idx,kpt in enumerate(k_plot):
en_band.append( en_plot[idx][band] )
if plot_in_ev:
en_band = np.array(en_band) * au_to_ev
if band == 0:
plt.plot(k_plot, en_band, line_style[dir_idx],color=plot_color[dir_idx], label=id_label)
else:
plt.plot(k_plot, en_band, line_style[dir_idx], color=plot_color[dir_idx])
else:
print("[plot_bandstruct]: WARNING expected folder ",next_dir," was not found!")
#x-axis
ax.set_xlim([k_plot[0],k_plot[-1]])
ax.set_xticks(k_ticks)
ax.set_xticklabels(k_labels,fontsize=label_size)
ax.grid(axis='x', alpha=.5, linewidth=.8, color='black')
#y-axis
ax.set_ylim([-14,6.3])
ax.set_yticks([0.],minor=True)
ax.set_yticks([-12,-9,-6,-3,0,3,6],minor=False)
plt.tick_params(axis='y', which='major',left=True,right=True, direction='in',labelsize=y_tick_size)
plt.tick_params(axis='y', which='minor',left=True,right=True, direction='in',labelsize=y_tick_size-2)
ax.grid(which='minor',axis='y')
if plot_in_ev:
plt.ylabel(r'$E \,(eV)$',fontsize=label_size)
else:
plt.ylabel(r'$E \,(E_h)$',fontsize=label_size)
plt.legend(loc=(.26,.05),framealpha=1, shadow=False)
#save file
plt.tight_layout()
#try:
plt.savefig(pdf_out_file,bbox_inches='tight')
print('saved band_structure: '+pdf_out_file)
#except:
# print('Error while saving the plot, try to show plot now in order to manually save it')
# plt.show()
def unit_test():
# out file
pdf_target = './bands.pdf'
# id data
target_dir_lst =[ 'delta0.900',
'delta0.950',
'delta1.000',
'delta1.050',
'delta1.100'
]
id_str = 'delta'
id_label = r'$\delta$'
#
# colors
min_col = 'orangered'
bas_col = 'black'
max_col = 'deepskyblue'
colors =[min_col,min_col, bas_col, max_col, max_col]
#
# line style
prime = '-'
opt = ':'
line_style = [prime, opt, prime, opt, prime]
#
#
plot_bandstruct( target_dir_lst,
id_str, id_label,
line_style, colors ,
pdf_target,
label_size=14, y_tick_size=12,
plot_in_ev=True
)
#plot_bandstruct(['./'],'./kpts','./eBands.dat',
# './bands.pdf',
# label_size=14,
# y_tick_size=12,
# plot_in_ev=False
# )
unit_test()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| [
"matplotlib.pyplot.plot",
"os.path.isdir",
"matplotlib.pyplot.legend",
"numpy.genfromtxt",
"matplotlib.pyplot.subplots",
"os.path.isfile",
"numpy.linalg.norm",
"numpy.array",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savef... | [((1970, 1994), 'os.path.isfile', 'os.path.isfile', (['kpt_file'], {}), '(kpt_file)\n', (1984, 1994), False, 'import os\n'), ((2582, 2605), 'os.path.isfile', 'os.path.isfile', (['en_file'], {}), '(en_file)\n', (2596, 2605), False, 'import os\n'), ((3902, 3920), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (3914, 3920), True, 'import matplotlib.pyplot as plt\n'), ((5422, 5529), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""y"""', 'which': '"""major"""', 'left': '(True)', 'right': '(True)', 'direction': '"""in"""', 'labelsize': 'y_tick_size'}), "(axis='y', which='major', left=True, right=True, direction=\n 'in', labelsize=y_tick_size)\n", (5437, 5529), True, 'import matplotlib.pyplot as plt\n'), ((5523, 5634), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""y"""', 'which': '"""minor"""', 'left': '(True)', 'right': '(True)', 'direction': '"""in"""', 'labelsize': '(y_tick_size - 2)'}), "(axis='y', which='minor', left=True, right=True, direction=\n 'in', labelsize=y_tick_size - 2)\n", (5538, 5634), True, 'import matplotlib.pyplot as plt\n'), ((5781, 5837), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(0.26, 0.05)', 'framealpha': '(1)', 'shadow': '(False)'}), '(loc=(0.26, 0.05), framealpha=1, shadow=False)\n', (5791, 5837), True, 'import matplotlib.pyplot as plt\n'), ((5848, 5866), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5864, 5866), True, 'import matplotlib.pyplot as plt\n'), ((5875, 5921), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pdf_out_file'], {'bbox_inches': '"""tight"""'}), "(pdf_out_file, bbox_inches='tight')\n", (5886, 5921), True, 'import matplotlib.pyplot as plt\n'), ((2011, 2068), 'numpy.genfromtxt', 'np.genfromtxt', (['kpt_file'], {'skip_header': '(1)', 'usecols': '(0, 1, 2)'}), '(kpt_file, skip_header=1, usecols=(0, 1, 2))\n', (2024, 2068), True, 'import numpy as np\n'), ((2621, 2683), 'numpy.genfromtxt', 'np.genfromtxt', (['en_file'], {'skip_header': '(3)', 'usecols': '(0, 1, 2, 3, 4)'}), '(en_file, skip_header=3, usecols=(0, 1, 2, 3, 4))\n', (2634, 2683), True, 'import numpy as np\n'), ((3980, 4003), 'os.path.isdir', 'os.path.isdir', (['next_dir'], {}), '(next_dir)\n', (3993, 4003), False, 'import os\n'), ((5677, 5723), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$E \\\\,(eV)$"""'], {'fontsize': 'label_size'}), "('$E \\\\,(eV)$', fontsize=label_size)\n", (5687, 5723), True, 'import matplotlib.pyplot as plt\n'), ((5732, 5779), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$E \\\\,(E_h)$"""'], {'fontsize': 'label_size'}), "('$E \\\\,(E_h)$', fontsize=label_size)\n", (5742, 5779), True, 'import matplotlib.pyplot as plt\n'), ((1332, 1359), 'numpy.linalg.norm', 'np.linalg.norm', (['(kpt - k_old)'], {}), '(kpt - k_old)\n', (1346, 1359), True, 'import numpy as np\n'), ((4855, 4948), 'matplotlib.pyplot.plot', 'plt.plot', (['k_plot', 'en_band', 'line_style[dir_idx]'], {'color': 'plot_color[dir_idx]', 'label': 'id_label'}), '(k_plot, en_band, line_style[dir_idx], color=plot_color[dir_idx],\n label=id_label)\n', (4863, 4948), True, 'import matplotlib.pyplot as plt\n'), ((4959, 5032), 'matplotlib.pyplot.plot', 'plt.plot', (['k_plot', 'en_band', 'line_style[dir_idx]'], {'color': 'plot_color[dir_idx]'}), '(k_plot, en_band, line_style[dir_idx], color=plot_color[dir_idx])\n', (4967, 5032), True, 'import matplotlib.pyplot as plt\n'), ((4803, 4820), 'numpy.array', 'np.array', (['en_band'], {}), '(en_band)\n', (4811, 4820), True, 'import numpy as np\n')] |
# OK. So we've gathered our sample. We've run the MCMC to measure SLFV.
# We've also done simple prewhitening to give us a prior on frequency.
# Now let's jointly model the SLFV and pulsations with a GP
import numpy as np
import pandas as pd
from TESStools import *
import os
import warnings
from multiprocessing import Pool, cpu_count
from scipy.stats import multivariate_normal
from tqdm.auto import tqdm
import h5py as h5
import pymc3 as pm
import pymc3_ext as pmx
import aesara_theano_fallback.tensor as tt
from celerite2.theano import terms, GaussianProcess
from pymc3_ext.utils import eval_in_model
import arviz as az
import exoplanet
cool_sgs = pd.read_csv('sample.csv',index_col=0)
slfv_emcee = pd.read_csv('slfv_params.csv')
prewhitening_summary = pd.read_csv('prewhitening.csv')
# Here's a function that maximizes the likelihood of a GP + arbitrary number of sinusoids
def pm_fit_gp_sin(tic, fs=None, amps=None, phases=None, model=None, return_var=False, thin=50):
"""
Use PyMC3 to do a maximum likelihood fit for a GP + multiple periodic signals
Inputs
------
time : array-like
Times of observations
flux : array-like
Observed fluxes
err : array-like
Observational uncertainties
fs : array-like, elements are PyMC3 distributions
Array with frequencies to fit, default None (i.e., only the GP is fit)
amps : array-like, elements are PyMC3 distributions
Array with amplitudes to fit, default None (i.e., only the GP is fit)
phases : array-like, elements are PyMC3 distributions
Array with phases to fit, default None (i.e., only the GP is fit)
model : `pymc3.model.Model`
PyMC3 Model object, will fail unless given
return_var : bool, default True
If True, returns the variance of the GP
thin : integer, default 50
Calculate the variance of the GP every `thin` points.
Returns
-------
map_soln : dict
Contains best-fit parameters and the gp predictions
logp : float
The log-likelihood of the model
bic : float
The Bayesian Information Criterion, -2 ln P + m ln N
var : float
If `return_var` is True, returns the variance of the GP
"""
assert model is not None, "Must provide a PyMC3 model object"
#Extract LC
lc, lc_smooth = lc_extract(get_lc_from_id(tic), smooth='10T')
time, flux, err = lc['Time'].values, lc['Flux'].values, lc['Err'].values
#Initial Values for SLFV
slfv_pars = slfv_emcee[slfv_emcee['tic'] == tic]
#Mean model
mean_flux = pm.Normal("mean_flux", mu = 1.0, sigma=np.std(flux))
if fs is not None:
#Making a callable for celerite
mean_model = tt.sum([a * tt.sin(2.0*np.pi*f*time + phi) for a,f,phi in zip(amps,fs,phases)],axis=0) + mean_flux
#And add it to the model
pm.Deterministic("mean", mean_model)
else:
mean_model = mean_flux
mean = pm.Deterministic("mean", mean_flux)
# A jitter term describing excess white noise (analogous to C_w)
aw = slfv_pars['alphaw'].item()
log_jitter = pm.Uniform("log_jitter", lower=np.log(aw)-15, upper=np.log(aw)+15, testval=np.log(np.median(np.abs(np.diff(flux)))))
# A term to describe the SLF variability
# sigma is the standard deviation of the GP, rho roughly corresponds to the
#breakoff in the power spectrum. rho and tau are related by a factor of
#pi/Q (the quality factor)
#guesses for our parameters
a0 = slfv_pars['alpha'].item()
tau_char = slfv_pars['tau'].item()
nu_char = 1.0/(2.0*np.pi*tau_char)
omega_0_guess = 2*np.pi*nu_char
Q_guess = 1/np.sqrt(2)
sigma_guess = a0 * np.sqrt(omega_0_guess*Q_guess) * np.power(np.pi/2.0, 0.25)
#sigma
logsigma = pm.Uniform("log_sigma", lower=np.log(sigma_guess)-10, upper=np.log(sigma_guess)+10)
sigma = pm.Deterministic("sigma",tt.exp(logsigma))
#rho (characteristic timescale)
logrho = pm.Uniform("log_rho", lower=np.log(0.01/nu_char), upper=np.log(100.0/nu_char))
rho = pm.Deterministic("rho", tt.exp(logrho))
nuchar = pm.Deterministic("nu_char", 1.0 / rho)
#tau (damping timescale)
logtau = pm.Uniform("log_tau", lower=np.log(0.01*2.0*Q_guess/omega_0_guess),upper=np.log(100.0*2.0*Q_guess/omega_0_guess))
tau = pm.Deterministic("tau", tt.exp(logtau))
nudamp = pm.Deterministic("nu_damp", 1.0 / tau)
#We also want to track Q, as it's a good estimate of how stochastic the
#process is.
Q = pm.Deterministic("Q", np.pi*tau/rho)
kernel = terms.SHOTerm(sigma=sigma, rho=rho, tau=tau)
gp = GaussianProcess(
kernel,
t=time,
diag=err ** 2.0 + tt.exp(2 * log_jitter),
quiet=True,
)
# Compute the Gaussian Process likelihood and add it into the
# the PyMC3 model as a "potential"
gp.marginal("gp", observed=flux-mean_model)
# Compute the mean model prediction for plotting purposes
pm.Deterministic("pred", gp.predict(flux-mean_model))
# Optimize to find the maximum a posteriori parameters
map_soln = pmx.optimize()
logp = model.logp(map_soln)
# parameters are tau, sigma, rho, mean, jitter, plus 3 per frequency
if fs is not None:
n_par = 5.0 + (3.0 * len(fs))
else:
n_par = 5.0
bic = -2.0*logp + n_par * np.log(len(time))
#compute variance as well...
if return_var:
eval_in_model(gp.compute(time[::thin],yerr=err[::thin]), map_soln)
mu, var = eval_in_model(gp.predict(flux[::thin], t=time[::thin], return_var=True), map_soln)
return map_soln, logp, bic, var
return map_soln, logp, bic
if __name__ == '__main__':
thin = 50 #What to thin by if we're computing the variance of the gp
tics = []
n_prewhitenings = []
pulses = []
initial_peaks = [] #stars where the OG prewhitening peaks were significant\
for tic, row in tqdm(cool_sgs.iterrows(), total=len(cool_sgs)):
lc, lc_smooth = lc_extract(get_lc_from_id(tic), smooth='10T')
time, flux, err = lc['Time'].values, lc['Flux'].values, lc['Err'].values
this_summ = prewhitening_summary[prewhitening_summary['TIC']==tic]
nf = this_summ['n_peaks'].item()
n_prewhitenings.append(nf)
print(tic)
if nf == 0.0:
print('No frequencies found by prewhitening, fitting GP...')
initial_peaks.append(False)
# Fit just the GP
with pm.Model() as model_np:
map_soln, logp, bic_np, var = pm_fit_gp_sin(tic, model=model_np, thin=thin, return_var=True)
# Subtract the GP prediction,
model_flux = map_soln['pred'] + map_soln['mean']
var_interp = np.interp(time, time[::thin], var)
resid_flux = flux - model_flux
resid_err = np.sqrt(err**2.0 + var_interp)
# Prewhiten the residuals
print('Prewhitening residuals')
good_fs, good_amps, good_phases, _, _ = prewhiten_harmonic(time, resid_flux, resid_err, red_noise=False)
#If we don't find any frequencies, this is definitely not a pulsator
if len(good_fs) == 0 :
print('No additional frequencies found, this isnt a pulsator!')
pulse = False
#If we do, run through all the frequencies and find the minimum BIC
else:
print('Found some new frequencies, checking to see if theyre significant')
pulse = False
for nf_found in range(good_fs.shape[0]+1):
with pm.Model() as model:
if nf_found == 0:
continue # we already did this case
else:
fs = [pm.Uniform(f"f{i}", lower = good_fs[i, 0] - 3*good_fs[i,1], upper=good_fs[i, 0] + 3*good_fs[i,1]) for i in range(nf_found)]
amps = [pm.Uniform(f"a{i}", lower = np.max([good_amps[i, 0] - 3*good_amps[i,1],0.0]), upper=good_amps[i, 0] + 3*good_amps[i,1]) for i in range(nf_found)]
phis = [pmx.Angle(f"phi{i}", testval = good_phases[i,0]) for i in range(nf_found)]
map_soln, logp, bic = pm_fit_gp_sin(tic, fs=fs, amps=amps, phases=phis, model=model)
if bic < bic_np:
pulse = True
if not pulse:
print('None were significant, this isnt a pulsator!')
else:
print('Found significant frequencies, this is a pulsator!')
else: #If we DID find frequencies...
print('Prewhitening frequencies found, lets see if theyre significant...')
with h5.File('prewhitening.hdf5','r') as pw: #load them in from the HDF5 file
good_fs = pw[f'{tic}/good_fs'][()]
good_amps = pw[f'{tic}/good_amps'][()]
good_phases = pw[f'{tic}/good_phases'][()]
# Iterate through and fit the GP successively adding on frequencies
nfs = []
bics = []
pulse = False
for nf_found in range(good_fs.shape[0]+1):
with pm.Model() as model:
if nf_found == 0:
map_soln_np, logp, bic_np, var = pm_fit_gp_sin(tic, model=model, return_var=True, thin=thin)
nfs.append(nf)
bics.append(bic_np)
else:
fs = [pm.Uniform(f"f{i}", lower = good_fs[i, 0] - 3*good_fs[i,1], upper=good_fs[i, 0] + 3*good_fs[i,1]) for i in range(nf_found)]
amps = [pm.Uniform(f"a{i}", lower = np.max([good_amps[i, 0] - 3*good_amps[i,1],0.0]), upper=good_amps[i, 0] + 3*good_amps[i,1]) for i in range(nf_found)]
phis = [pmx.Angle(f"phi{i}", testval = good_phases[i,0]) for i in range(nf_found)]
try: #very occasionally something will break
map_soln, logp, bic = pm_fit_gp_sin(tic, fs=fs, amps=amps, phases=phis, model=model)
if bic < bic_np:
pulse = True
break
except:
print(f'Broke on TIC {tic}, N_f={nf_found}')
continue
if pulse: # if the BIC is better with a frequency...
print('Frequencies were significant, this is a pulsator!')
initial_peaks.append(True)
else:
print('Frequencies were not significant, computing residuals from GP')
initial_peaks.append(False)
#Otherwise, we'll need to run the same logic as above...
# Subtract the GP prediction with no pulsations
model_flux = map_soln_np['pred'] + map_soln_np['mean']
var_interp = np.interp(time, time[::thin], var)
resid_flux = flux - model_flux
resid_err = np.sqrt(err**2.0 + var_interp)
# Prewhiten the residuals
print('Prewhitening residuals')
resid_fs, resid_amps, resid_phases, _, _ = prewhiten_harmonic(time, resid_flux, resid_err, red_noise=False)
#If we don't find any frequencies, this is definitely not a pulsator
if len(good_fs) == 0 :
print('No additional frequencies found, this isnt a pulsator!')
pulse = False
#If we do, run through all the frequencies and find the minimum BIC
else:
print('Found some new frequencies, checking to see if theyre significant')
for nf_found in range(resid_fs.shape[0]+1):
with pm.Model() as model:
if nf_found == 0:
continue # we already did this case
else:
fs = [pm.Uniform(f"f{i}", lower = resid_fs[i, 0] - 3*resid_fs[i,1], upper=resid_fs[i, 0] + 3*resid_fs[i,1]) for i in range(nf_found)]
amps = [pm.Uniform(f"a{i}", lower = np.max([resid_amps[i, 0] - 3*resid_amps[i,1],0.0]), upper=resid_amps[i, 0] + 3*resid_amps[i,1]) for i in range(nf_found)]
phis = [pmx.Angle(f"phi{i}", testval = resid_phases[i,0]) for i in range(nf_found)]
map_soln, logp, bic = pm_fit_gp_sin(tic, fs=fs, amps=amps, phases=phis, model=model)
if bic < bic_np:
pulse = True
break
if not pulse:
print('None were significant, this isnt a pulsator!')
else:
print('Found significant frequencies, this is a pulsator!')
tics.append(tic)
pulses.append(pulse)
out_df = pd.DataFrame({'n_peaks_prewhitening':n_prewhitenings,'initial_peaks_significant':initial_peaks,'pulse_GP':pulses},index=tics)
out_df.to_csv('Find_FYPS_GP_results.csv')
| [
"pandas.DataFrame",
"h5py.File",
"celerite2.theano.terms.SHOTerm",
"pymc3.Model",
"numpy.log",
"pymc3_ext.optimize",
"pandas.read_csv",
"pymc3.Deterministic",
"numpy.power",
"numpy.std",
"aesara_theano_fallback.tensor.exp",
"numpy.max",
"numpy.diff",
"pymc3.Uniform",
"pymc3_ext.Angle",
... | [((659, 697), 'pandas.read_csv', 'pd.read_csv', (['"""sample.csv"""'], {'index_col': '(0)'}), "('sample.csv', index_col=0)\n", (670, 697), True, 'import pandas as pd\n'), ((711, 741), 'pandas.read_csv', 'pd.read_csv', (['"""slfv_params.csv"""'], {}), "('slfv_params.csv')\n", (722, 741), True, 'import pandas as pd\n'), ((766, 797), 'pandas.read_csv', 'pd.read_csv', (['"""prewhitening.csv"""'], {}), "('prewhitening.csv')\n", (777, 797), True, 'import pandas as pd\n'), ((4178, 4216), 'pymc3.Deterministic', 'pm.Deterministic', (['"""nu_char"""', '(1.0 / rho)'], {}), "('nu_char', 1.0 / rho)\n", (4194, 4216), True, 'import pymc3 as pm\n'), ((4446, 4484), 'pymc3.Deterministic', 'pm.Deterministic', (['"""nu_damp"""', '(1.0 / tau)'], {}), "('nu_damp', 1.0 / tau)\n", (4462, 4484), True, 'import pymc3 as pm\n'), ((4592, 4632), 'pymc3.Deterministic', 'pm.Deterministic', (['"""Q"""', '(np.pi * tau / rho)'], {}), "('Q', np.pi * tau / rho)\n", (4608, 4632), True, 'import pymc3 as pm\n'), ((4643, 4687), 'celerite2.theano.terms.SHOTerm', 'terms.SHOTerm', ([], {'sigma': 'sigma', 'rho': 'rho', 'tau': 'tau'}), '(sigma=sigma, rho=rho, tau=tau)\n', (4656, 4687), False, 'from celerite2.theano import terms, GaussianProcess\n'), ((5174, 5188), 'pymc3_ext.optimize', 'pmx.optimize', ([], {}), '()\n', (5186, 5188), True, 'import pymc3_ext as pmx\n'), ((13163, 13303), 'pandas.DataFrame', 'pd.DataFrame', (["{'n_peaks_prewhitening': n_prewhitenings, 'initial_peaks_significant':\n initial_peaks, 'pulse_GP': pulses}"], {'index': 'tics'}), "({'n_peaks_prewhitening': n_prewhitenings,\n 'initial_peaks_significant': initial_peaks, 'pulse_GP': pulses}, index=tics\n )\n", (13175, 13303), True, 'import pandas as pd\n'), ((2892, 2928), 'pymc3.Deterministic', 'pm.Deterministic', (['"""mean"""', 'mean_model'], {}), "('mean', mean_model)\n", (2908, 2928), True, 'import pymc3 as pm\n'), ((2986, 3021), 'pymc3.Deterministic', 'pm.Deterministic', (['"""mean"""', 'mean_flux'], {}), "('mean', mean_flux)\n", (3002, 3021), True, 'import pymc3 as pm\n'), ((3714, 3724), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3721, 3724), True, 'import numpy as np\n'), ((3781, 3808), 'numpy.power', 'np.power', (['(np.pi / 2.0)', '(0.25)'], {}), '(np.pi / 2.0, 0.25)\n', (3789, 3808), True, 'import numpy as np\n'), ((3959, 3975), 'aesara_theano_fallback.tensor.exp', 'tt.exp', (['logsigma'], {}), '(logsigma)\n', (3965, 3975), True, 'import aesara_theano_fallback.tensor as tt\n'), ((4144, 4158), 'aesara_theano_fallback.tensor.exp', 'tt.exp', (['logrho'], {}), '(logrho)\n', (4150, 4158), True, 'import aesara_theano_fallback.tensor as tt\n'), ((4412, 4426), 'aesara_theano_fallback.tensor.exp', 'tt.exp', (['logtau'], {}), '(logtau)\n', (4418, 4426), True, 'import aesara_theano_fallback.tensor as tt\n'), ((2654, 2666), 'numpy.std', 'np.std', (['flux'], {}), '(flux)\n', (2660, 2666), True, 'import numpy as np\n'), ((3748, 3780), 'numpy.sqrt', 'np.sqrt', (['(omega_0_guess * Q_guess)'], {}), '(omega_0_guess * Q_guess)\n', (3755, 3780), True, 'import numpy as np\n'), ((4059, 4081), 'numpy.log', 'np.log', (['(0.01 / nu_char)'], {}), '(0.01 / nu_char)\n', (4065, 4081), True, 'import numpy as np\n'), ((4087, 4110), 'numpy.log', 'np.log', (['(100.0 / nu_char)'], {}), '(100.0 / nu_char)\n', (4093, 4110), True, 'import numpy as np\n'), ((4292, 4336), 'numpy.log', 'np.log', (['(0.01 * 2.0 * Q_guess / omega_0_guess)'], {}), '(0.01 * 2.0 * Q_guess / omega_0_guess)\n', (4298, 4336), True, 'import numpy as np\n'), ((4337, 4382), 'numpy.log', 'np.log', (['(100.0 * 2.0 * Q_guess / omega_0_guess)'], {}), '(100.0 * 2.0 * Q_guess / omega_0_guess)\n', (4343, 4382), True, 'import numpy as np\n'), ((6838, 6872), 'numpy.interp', 'np.interp', (['time', 'time[::thin]', 'var'], {}), '(time, time[::thin], var)\n', (6847, 6872), True, 'import numpy as np\n'), ((6941, 6973), 'numpy.sqrt', 'np.sqrt', (['(err ** 2.0 + var_interp)'], {}), '(err ** 2.0 + var_interp)\n', (6948, 6973), True, 'import numpy as np\n'), ((3195, 3205), 'numpy.log', 'np.log', (['aw'], {}), '(aw)\n', (3201, 3205), True, 'import numpy as np\n'), ((3216, 3226), 'numpy.log', 'np.log', (['aw'], {}), '(aw)\n', (3222, 3226), True, 'import numpy as np\n'), ((3868, 3887), 'numpy.log', 'np.log', (['sigma_guess'], {}), '(sigma_guess)\n', (3874, 3887), True, 'import numpy as np\n'), ((3898, 3917), 'numpy.log', 'np.log', (['sigma_guess'], {}), '(sigma_guess)\n', (3904, 3917), True, 'import numpy as np\n'), ((4773, 4795), 'aesara_theano_fallback.tensor.exp', 'tt.exp', (['(2 * log_jitter)'], {}), '(2 * log_jitter)\n', (4779, 4795), True, 'import aesara_theano_fallback.tensor as tt\n'), ((6575, 6585), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (6583, 6585), True, 'import pymc3 as pm\n'), ((8870, 8903), 'h5py.File', 'h5.File', (['"""prewhitening.hdf5"""', '"""r"""'], {}), "('prewhitening.hdf5', 'r')\n", (8877, 8903), True, 'import h5py as h5\n'), ((11082, 11116), 'numpy.interp', 'np.interp', (['time', 'time[::thin]', 'var'], {}), '(time, time[::thin], var)\n', (11091, 11116), True, 'import numpy as np\n'), ((11193, 11225), 'numpy.sqrt', 'np.sqrt', (['(err ** 2.0 + var_interp)'], {}), '(err ** 2.0 + var_interp)\n', (11200, 11225), True, 'import numpy as np\n'), ((9334, 9344), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (9342, 9344), True, 'import pymc3 as pm\n'), ((2764, 2800), 'aesara_theano_fallback.tensor.sin', 'tt.sin', (['(2.0 * np.pi * f * time + phi)'], {}), '(2.0 * np.pi * f * time + phi)\n', (2770, 2800), True, 'import aesara_theano_fallback.tensor as tt\n'), ((3263, 3276), 'numpy.diff', 'np.diff', (['flux'], {}), '(flux)\n', (3270, 3276), True, 'import numpy as np\n'), ((7702, 7712), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (7710, 7712), True, 'import pymc3 as pm\n'), ((9649, 9755), 'pymc3.Uniform', 'pm.Uniform', (['f"""f{i}"""'], {'lower': '(good_fs[i, 0] - 3 * good_fs[i, 1])', 'upper': '(good_fs[i, 0] + 3 * good_fs[i, 1])'}), "(f'f{i}', lower=good_fs[i, 0] - 3 * good_fs[i, 1], upper=good_fs[\n i, 0] + 3 * good_fs[i, 1])\n", (9659, 9755), True, 'import pymc3 as pm\n'), ((9983, 10030), 'pymc3_ext.Angle', 'pmx.Angle', (['f"""phi{i}"""'], {'testval': 'good_phases[i, 0]'}), "(f'phi{i}', testval=good_phases[i, 0])\n", (9992, 10030), True, 'import pymc3_ext as pmx\n'), ((11976, 11986), 'pymc3.Model', 'pm.Model', ([], {}), '()\n', (11984, 11986), True, 'import pymc3 as pm\n'), ((7893, 7999), 'pymc3.Uniform', 'pm.Uniform', (['f"""f{i}"""'], {'lower': '(good_fs[i, 0] - 3 * good_fs[i, 1])', 'upper': '(good_fs[i, 0] + 3 * good_fs[i, 1])'}), "(f'f{i}', lower=good_fs[i, 0] - 3 * good_fs[i, 1], upper=good_fs[\n i, 0] + 3 * good_fs[i, 1])\n", (7903, 7999), True, 'import pymc3 as pm\n'), ((8235, 8282), 'pymc3_ext.Angle', 'pmx.Angle', (['f"""phi{i}"""'], {'testval': 'good_phases[i, 0]'}), "(f'phi{i}', testval=good_phases[i, 0])\n", (8244, 8282), True, 'import pymc3_ext as pmx\n'), ((9833, 9885), 'numpy.max', 'np.max', (['[good_amps[i, 0] - 3 * good_amps[i, 1], 0.0]'], {}), '([good_amps[i, 0] - 3 * good_amps[i, 1], 0.0])\n', (9839, 9885), True, 'import numpy as np\n'), ((12183, 12293), 'pymc3.Uniform', 'pm.Uniform', (['f"""f{i}"""'], {'lower': '(resid_fs[i, 0] - 3 * resid_fs[i, 1])', 'upper': '(resid_fs[i, 0] + 3 * resid_fs[i, 1])'}), "(f'f{i}', lower=resid_fs[i, 0] - 3 * resid_fs[i, 1], upper=\n resid_fs[i, 0] + 3 * resid_fs[i, 1])\n", (12193, 12293), True, 'import pymc3 as pm\n'), ((12541, 12589), 'pymc3_ext.Angle', 'pmx.Angle', (['f"""phi{i}"""'], {'testval': 'resid_phases[i, 0]'}), "(f'phi{i}', testval=resid_phases[i, 0])\n", (12550, 12589), True, 'import pymc3_ext as pmx\n'), ((8081, 8133), 'numpy.max', 'np.max', (['[good_amps[i, 0] - 3 * good_amps[i, 1], 0.0]'], {}), '([good_amps[i, 0] - 3 * good_amps[i, 1], 0.0])\n', (8087, 8133), True, 'import numpy as np\n'), ((12379, 12433), 'numpy.max', 'np.max', (['[resid_amps[i, 0] - 3 * resid_amps[i, 1], 0.0]'], {}), '([resid_amps[i, 0] - 3 * resid_amps[i, 1], 0.0])\n', (12385, 12433), True, 'import numpy as np\n')] |
import numpy as np
from pandas import DataFrame, Index, PeriodIndex, period_range
import pandas._testing as tm
class TestPeriodIndex:
def test_as_frame_columns(self):
rng = period_range("1/1/2000", periods=5)
df = DataFrame(np.random.randn(10, 5), columns=rng)
ts = df[rng[0]]
tm.assert_series_equal(ts, df.iloc[:, 0])
# GH # 1211
repr(df)
ts = df["1/1/2000"]
tm.assert_series_equal(ts, df.iloc[:, 0])
def test_frame_setitem(self):
rng = period_range("1/1/2000", periods=5, name="index")
df = DataFrame(np.random.randn(5, 3), index=rng)
df["Index"] = rng
rs = Index(df["Index"])
tm.assert_index_equal(rs, rng, check_names=False)
assert rs.name == "Index"
assert rng.name == "index"
rs = df.reset_index().set_index("index")
assert isinstance(rs.index, PeriodIndex)
tm.assert_index_equal(rs.index, rng)
def test_frame_index_to_string(self):
index = PeriodIndex(["2011-1", "2011-2", "2011-3"], freq="M")
frame = DataFrame(np.random.randn(3, 4), index=index)
# it works!
frame.to_string()
| [
"pandas.period_range",
"numpy.random.randn",
"pandas.Index",
"pandas._testing.assert_series_equal",
"pandas._testing.assert_index_equal",
"pandas.PeriodIndex"
] | [((188, 223), 'pandas.period_range', 'period_range', (['"""1/1/2000"""'], {'periods': '(5)'}), "('1/1/2000', periods=5)\n", (200, 223), False, 'from pandas import DataFrame, Index, PeriodIndex, period_range\n'), ((317, 358), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['ts', 'df.iloc[:, 0]'], {}), '(ts, df.iloc[:, 0])\n', (339, 358), True, 'import pandas._testing as tm\n'), ((434, 475), 'pandas._testing.assert_series_equal', 'tm.assert_series_equal', (['ts', 'df.iloc[:, 0]'], {}), '(ts, df.iloc[:, 0])\n', (456, 475), True, 'import pandas._testing as tm\n'), ((525, 574), 'pandas.period_range', 'period_range', (['"""1/1/2000"""'], {'periods': '(5)', 'name': '"""index"""'}), "('1/1/2000', periods=5, name='index')\n", (537, 574), False, 'from pandas import DataFrame, Index, PeriodIndex, period_range\n'), ((672, 690), 'pandas.Index', 'Index', (["df['Index']"], {}), "(df['Index'])\n", (677, 690), False, 'from pandas import DataFrame, Index, PeriodIndex, period_range\n'), ((699, 748), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['rs', 'rng'], {'check_names': '(False)'}), '(rs, rng, check_names=False)\n', (720, 748), True, 'import pandas._testing as tm\n'), ((925, 961), 'pandas._testing.assert_index_equal', 'tm.assert_index_equal', (['rs.index', 'rng'], {}), '(rs.index, rng)\n', (946, 961), True, 'import pandas._testing as tm\n'), ((1021, 1074), 'pandas.PeriodIndex', 'PeriodIndex', (["['2011-1', '2011-2', '2011-3']"], {'freq': '"""M"""'}), "(['2011-1', '2011-2', '2011-3'], freq='M')\n", (1032, 1074), False, 'from pandas import DataFrame, Index, PeriodIndex, period_range\n'), ((247, 269), 'numpy.random.randn', 'np.random.randn', (['(10)', '(5)'], {}), '(10, 5)\n', (262, 269), True, 'import numpy as np\n'), ((598, 619), 'numpy.random.randn', 'np.random.randn', (['(5)', '(3)'], {}), '(5, 3)\n', (613, 619), True, 'import numpy as np\n'), ((1101, 1122), 'numpy.random.randn', 'np.random.randn', (['(3)', '(4)'], {}), '(3, 4)\n', (1116, 1122), True, 'import numpy as np\n')] |
"""
This module contains the :class:`Scene` class which is used to setup a scene for a robot simulation using PyBullet.
"""
import numpy as np
import pybullet as p
from pybullet_utils.bullet_client import BulletClient
from classic_framework import Scene
from classic_framework.pybullet.PyBullet_Camera import InHandCamera, CageCamera
from classic_framework.utils.sim_path import sim_framework_path
class PyBulletScene(Scene):
"""
This class allows to build a scene for the robot simulation. The standard scene is a model of the Panda robot on a
table. The returned ids of the assets are saved as an attribute to the object of the Panda_Robot.
The .urdf files which contain the scene assets (e.g. cubes etc.) are saved in the 'envs' folder of the project.
"""
def __init__(self, object_list=None, dt=0.001, render=True, realtime=False):
super(PyBulletScene, self).__init__(object_list=object_list, dt=dt, render=render)
"""
Initialization of the physics client and cameras (in-hand and cage cam). Calls :func:`setup_scene`.
:param realtime: Enable or disable real time simulation (using the real time clock, RTC) in the physics server.
"""
self.physics_client_id = None
self.ik_client_id = None
self.robot_physics_client_id = None
self.robot_ik_client_id = None
self.setup_scene()
self.realtime = realtime
self.inhand_cam = InHandCamera()
self.cage_cam = CageCamera()
self.obj_name2id = {}
if self.realtime:
p.setRealTimeSimulation(1)
def setup_scene(self):
"""
This function creates a scene.
:return: no return value
"""
print("Scene setup")
# Connect with simulator
if self.render:
self.physics_client = BulletClient(p.GUI) # o r p.DIRECT for non-graphical version
else:
self.physics_client = BulletClient(p.DIRECT) # o r p.DIRECT for non-graphical version
self.physics_client_id = self.physics_client._client
self.ik_client = BulletClient(connection_mode=p.DIRECT)
self.ik_client_id = self.ik_client._client
p.setPhysicsEngineParameter(enableFileCaching=0)
# --------------------------------------------------------------------------------------------------------------
# # Load scene
# --------------------------------------------------------------------------------------------------------------
# module_path = path.dirname(path.abspath(__file__))
# os.path.abspath(os.curdir)
# os.chdir("..")
# module_path = os.path.abspath(os.curdir)
# os.chdir(os.getcwd() + os.sep + os.pardir) # moves to parent directory
# module_path = os.getcwd()
if self.object_list is not None:
for obj in self.object_list:
self.load_object_to_scene(path_to_urdf=obj.data_dir + "/" + obj.urdf_name,
orientation=obj.orientation,
position=obj.position,
id_name=obj.object_name)
self.scene_id = p.loadURDF(sim_framework_path("./envs/plane/plane.urdf"),
physicsClientId=self.physics_client_id)
self.scene_id_ik = p.loadURDF(sim_framework_path("./envs/plane/plane.urdf"), physicsClientId=self.ik_client_id)
# load table
table_urdf = sim_framework_path("./envs/table/table.urdf")
table_start_position = [0.35, 0.0, 0.0]
table_start_orientation = [0.0, 0.0, 0.0]
table_start_orientation_quat = p.getQuaternionFromEuler(table_start_orientation)
self.table_id = p.loadURDF(table_urdf,
table_start_position,
table_start_orientation_quat,
flags=p.URDF_USE_SELF_COLLISION | p.URDF_USE_INERTIA_FROM_FILE,
physicsClientId=self.physics_client_id)
self.table_id_ik = p.loadURDF(table_urdf,
table_start_position,
table_start_orientation_quat,
flags=p.URDF_USE_SELF_COLLISION | p.URDF_USE_INERTIA_FROM_FILE,
physicsClientId=self.ik_client_id)
p.setGravity(0, 0, -9.81)
def load_panda_to_scene(self, physics_robot=True, orientation=None, position=None, id_name=None, path_to_urdf=None,
init_q=None):
"""
This function loads another panda robot to the simulation environment. If loading the object fails, the program is stopped
and an appropriate get_error message is returned to user.
:param physics_robot: number of active robots in physics client
:param path_to_urdf: the whole path of the location of the urdf file to be load as string. If none use default
:param orientation: orientation of the robot. Can be either euler angles, or quaternions. NOTE: quaternions
notation: [x, y, z, w] !
:param position: cartesian world position to place the robot
:param id_name: string valued name of the robot. This name can then be called as self.'name'_id to get the
id number of the object
:return: returns the id of the new panda robot
"""
if position is None:
position = [0.0, 0.0, 0.88]
if orientation is None:
orientation = [0.0, 0.0, 0.0]
if path_to_urdf is None:
obj_urdf = sim_framework_path(
"./envs/frankaemika/robots/panda_arm_hand.urdf") # panda robot with inhand camera
# obj_urdf = sim_framework_path("./envs/frankaemika/robots/panda_arm_hand_pybullet.urdf") # panda robot with inhand camera
# obj_urdf = sim_framework_path("./envs/frankaemika/robots/panda_arm_hand_without_cam.urdf")
# obj_urdf = sim_framework_path("./envs/frankaemika/robots/panda_arm_hand_without_cam_inertia_from_mujoco.urdf")
else:
obj_urdf = path_to_urdf
orientation = list(orientation)
if len(orientation) == 3:
orientation = p.getQuaternionFromEuler(orientation)
position = list(position)
try:
id = p.loadURDF(obj_urdf,
position,
orientation,
useFixedBase=1,
# | p.URDF_USE_SELF_COLLISION_INCLUDE_PARENT
flags=p.URDF_USE_SELF_COLLISION | p.URDF_USE_INERTIA_FROM_FILE,
# flags=p.URDF_USE_SELF_COLLISION | p.URDF_USE_SELF_COLLISION_INCLUDE_PARENT,
physicsClientId=self.physics_client_id)
ik_id = p.loadURDF(obj_urdf,
position,
orientation,
useFixedBase=1,
flags=p.URDF_USE_SELF_COLLISION, # | p.URDF_USE_SELF_COLLISION_INCLUDE_PARENT
physicsClientId=self.ik_client_id)
if id_name is not None:
setattr(self, id_name + '_id', id)
self.obj_name2id[id_name + '_id'] = id
else:
self.robot_physics_client_id = id
self.robot_ik_client_id = ik_id
except Exception:
print()
print('Stopping the program')
raise ValueError('Could not load URDF-file: Check the path to file. Stopping the program. Your path:',
obj_urdf)
if init_q is None:
init_q = (3.57795216e-09,
1.74532920e-01,
3.30500960e-08,
-8.72664630e-01,
-1.14096181e-07,
1.22173047e+00,
7.85398126e-01)
self.set_q(init_q, id)
# robotEndEffectorIndex = 8
# robotEndEffectorIndex = 9
robotEndEffectorIndex = 12
return id, ik_id, robotEndEffectorIndex
def load_object_to_scene(self, path_to_urdf, orientation, position, id_name, fixed=0, inertia_from_file=False):
"""
This function loads an object to the simulation environment. If loading the object fails, the program is stopped
and an appropriate get_error message is returned to user.
:param path_to_urdf: the whole path of the location of the urdf file to be load as string
:param orientation: orientation of the object. Can be either euler angles, or quaternions. NOTE: quaternions
notation: [x, y, z, w] !
:param position: cartesian world position to place the object
:param id_name: string valued name of the object. This name can then be called as self.'name'_id to get the
id number of the object
:param fixed: if the object is fixed in the scene
:param inertia_from_file: if the inertia values from file should be used, or pybullet should calculate its own
inertia values.
:return: returns the id of the loaded object
"""
obj_urdf = path_to_urdf
orientation = list(orientation)
if len(orientation) == 3:
orientation = p.getQuaternionFromEuler(orientation)
position = list(position)
try:
if inertia_from_file == True:
id = p.loadURDF(obj_urdf,
position,
orientation,
fixed,
flags=p.URDF_USE_SELF_COLLISION | p.URDF_USE_INERTIA_FROM_FILE,
physicsClientId=self.physics_client_id)
else:
id = p.loadURDF(obj_urdf,
position,
orientation,
fixed,
flags=p.URDF_USE_SELF_COLLISION, physicsClientId=self.physics_client_id)
setattr(self, id_name + '_id', id)
except Exception:
print()
print('Stopping the program')
raise ValueError('Could not load URDF-file: Check the path to file. Stopping the program. Your path:',
obj_urdf)
return id
def get_id_from_name(self, obj_name):
"""
Returns the object id from the object name specified when creating the object.
Args:
obj_name: Name of the object
Returns:
Index of the object
"""
return self.obj_name2id[obj_name + '_id']
def load_graspa_layout_to_scene(self, path_to_sdf, orientation, position, id_name):
"""
This function loads a GRASPA layout to the simulation environment. If loading the layout fails, the program is stopped
and an appropriate get_error message is returned to user.
:param path_to_sdf: the whole path of the location of the sdf file to be load as string
:param orientation: orientation of the layout. Can be either euler angles, or quaternions. NOTE: quaternions
notation: [x, y, z, w] !
:param position: cartesian world position of the layout. Ref frame is positioned on the bottom right corner
:param id_name: string valued name of the layout. This name can then be called as self.'name'_id to get the
id number of the layout
:return: returns the id of the loaded assets
"""
layout_sdf = path_to_sdf
try:
objects_ids = p.loadSDF(layout_sdf)
except Exception:
print()
print('Stopping the program')
raise ValueError('Could not load SDF-file: Check the path to file. Stopping the program. Your path:',
layout_sdf)
for i, id in enumerate(objects_ids):
setattr(self, id_name + '_' + str(i) + '_id', id)
orientation = list(orientation)
if len(orientation) == 3:
orientation = p.getQuaternionFromEuler(orientation)
position = list(position)
for obj in objects_ids:
pose_obj = p.getBasePositionAndOrientation(obj)
new_pose_obj = p.multiplyTransforms(position, orientation, pose_obj[0], pose_obj[1])
p.resetBasePositionAndOrientation(obj, new_pose_obj[0], new_pose_obj[1])
matrix = p.getMatrixFromQuaternion(orientation)
dcm = np.array([matrix[0:3], matrix[3:6], matrix[6:9]])
pax = np.add(position, dcm.dot([0.1, 0, 0]))
pay = np.add(position, dcm.dot([0, 0.1, 0]))
paz = np.add(position, dcm.dot([0, 0, 0.1]))
p.addUserDebugLine(position, pax.tolist(), [1, 0, 0])
p.addUserDebugLine(position, pay.tolist(), [0, 1, 0])
p.addUserDebugLine(position, paz.tolist(), [0, 0, 1])
return objects_ids
def set_q(self, joints, robot_id=None, physicsClientId=None):
"""
Sets the value of the robot joints.
WARNING: This overrides the physics, do not use during simulation!!
:param joints: tuple of size (7)
:return: no return value
"""
if physicsClientId is None:
physicsClientId = self.physics_client_id
if robot_id is None:
robot_id = self.robot_physics_client_id
j1, j2, j3, j4, j5, j6, j7 = joints
joint_angles = {}
joint_angles["panda_joint_world"] = 0.0 # No actuation
joint_angles["panda_joint1"] = j1
joint_angles["panda_joint2"] = j2
joint_angles["panda_joint3"] = j3
joint_angles["panda_joint4"] = j4
joint_angles["panda_joint5"] = j5
joint_angles["panda_joint6"] = j6
joint_angles["panda_joint7"] = j7
joint_angles["panda_joint8"] = 0.0 # No actuation
joint_angles["panda_hand_joint"] = 0.0 # No actuation
joint_angles["panda_finger_joint1"] = 0.05
joint_angles["panda_finger_joint2"] = 0.05
joint_angles["panda_grasptarget_hand"] = 0.0
joint_angles["camera_joint"] = 0.0 # No actuation
joint_angles["camera_depth_joint"] = 0.0 # No actuation
joint_angles["camera_depth_optical_joint"] = 0.0 # No actuation
joint_angles["camera_left_ir_joint"] = 0.0 # No actuation
joint_angles["camera_left_ir_optical_joint"] = 0.0 # No actuation
joint_angles["camera_right_ir_joint"] = 0.0 # No actuation
joint_angles["camera_right_ir_optical_joint"] = 0.0 # No actuation
joint_angles["camera_color_joint"] = 0.0 # No actuation
joint_angles["camera_color_optical_joint"] = 0.0 # No actuation
for joint_index in range(p.getNumJoints(robot_id, physicsClientId=physicsClientId)):
joint_name = p.getJointInfo(robot_id, joint_index, physicsClientId=physicsClientId)[1].decode('ascii')
joint_angle = joint_angles.get(joint_name, 0.0)
# self.physics_client.changeDynamics(robot_id, joint_index, linearDamping=0, angularDamping=0)
p.resetJointState(bodyUniqueId=robot_id,
jointIndex=joint_index,
targetValue=joint_angle,
physicsClientId=physicsClientId)
def get_point_cloud_inHandCam(self, robot_id=None):
"""
Calculates the 3d world coordinates and the corresponding rgb values of inHandCamera.
:param plot_points: whether the points shall be plot in matplotlib (very slow)
:param robot_id: if not set, it will be set to the robot id of the robot within the physics client
:return: points: numpy array (nb_points x 3)
colors: numpy array (nb_points x 4)
"""
if robot_id is None:
robot_id = self.robot_physics_client_id
client_id = self.physics_client_id
points, colors = self.inhand_cam.calc_point_cloud(id=robot_id, client_id=client_id)
return points, colors
def get_point_cloud_CageCam(self, cam_id):
"""
Calculates the 3d world coordinates and the corresponding rgb values.
:param cam_id: the id of the cage camera.
:param plot_points:whether the points shall be plot in matplotlib (very slow)
:return: 3d world coordinates and the corresponding rgb values
"""
client_id = self.physics_client_id
points, colors = self.cage_cam.calc_point_cloud(id=cam_id, client_id=client_id)
return points, colors
def get_segmentation_from_cam(self, cam_id, client_id=None, with_noise=False, shadow=True):
if client_id is None:
client_id = self.physics_client_id
try:
_, _, seg_img = self.cage_cam.get_image(cam_id=cam_id,
client_id=client_id,
with_noise=with_noise,
shadow=shadow)
return seg_img
except ValueError:
print("Error, no camera with id " + str(cam_id))
def get_depth_image_from_cam(self, cam_id, client_id=None, with_noise=False, shadow=True):
if client_id is None:
client_id = self.physics_client_id
try:
_, depth_img, _ = self.cage_cam.get_image(cam_id=cam_id,
client_id=client_id,
with_noise=with_noise,
shadow=shadow)
return depth_img
except ValueError:
print("Error, no camera with id " + str(cam_id))
def get_rgb_image_from_cam(self, cam_id, client_id=None, with_noise=False, shadow=True):
if client_id is None:
client_id = self.physics_client_id
try:
rgb_img, _, _ = self.cage_cam.get_image(cam_id=cam_id,
client_id=client_id,
with_noise=with_noise,
shadow=shadow)
return rgb_img
except ValueError:
print("Error, no camera with id " + str(cam_id))
def get_point_cloud_from_cam(self, cam_id, client_id=None, with_noise=False):
if client_id is None:
client_id = self.physics_client_id
try:
points, colors = self.cage_cam.calc_point_cloud(id=cam_id,
client_id=client_id,
with_noise=with_noise)
return points, colors
except ValueError:
print("Error, no camera with id " + str(cam_id))
def disable_robot_vel_ctrl(self, robot_id):
p.setJointMotorControlArray(robot_id,
list(np.arange(1, 8)),
p.VELOCITY_CONTROL,
forces=list(np.zeros(7)),
physicsClientId=self.physics_client_id)
def enable_robot_vel_ctrl(self, robot_id, max_forces):
p.setJointMotorControlArray(robot_id,
list(np.arange(1, 8)),
p.VELOCITY_CONTROL,
forces=list(max_forces),
physicsClientId=self.physics_client_id)
| [
"numpy.arange",
"pybullet.getQuaternionFromEuler",
"pybullet.setRealTimeSimulation",
"pybullet.setGravity",
"pybullet.resetBasePositionAndOrientation",
"classic_framework.utils.sim_path.sim_framework_path",
"pybullet.multiplyTransforms",
"pybullet.getJointInfo",
"pybullet.loadSDF",
"pybullet.reset... | [((1449, 1463), 'classic_framework.pybullet.PyBullet_Camera.InHandCamera', 'InHandCamera', ([], {}), '()\n', (1461, 1463), False, 'from classic_framework.pybullet.PyBullet_Camera import InHandCamera, CageCamera\n'), ((1488, 1500), 'classic_framework.pybullet.PyBullet_Camera.CageCamera', 'CageCamera', ([], {}), '()\n', (1498, 1500), False, 'from classic_framework.pybullet.PyBullet_Camera import InHandCamera, CageCamera\n'), ((2105, 2143), 'pybullet_utils.bullet_client.BulletClient', 'BulletClient', ([], {'connection_mode': 'p.DIRECT'}), '(connection_mode=p.DIRECT)\n', (2117, 2143), False, 'from pybullet_utils.bullet_client import BulletClient\n'), ((2203, 2251), 'pybullet.setPhysicsEngineParameter', 'p.setPhysicsEngineParameter', ([], {'enableFileCaching': '(0)'}), '(enableFileCaching=0)\n', (2230, 2251), True, 'import pybullet as p\n'), ((3506, 3551), 'classic_framework.utils.sim_path.sim_framework_path', 'sim_framework_path', (['"""./envs/table/table.urdf"""'], {}), "('./envs/table/table.urdf')\n", (3524, 3551), False, 'from classic_framework.utils.sim_path import sim_framework_path\n'), ((3689, 3738), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['table_start_orientation'], {}), '(table_start_orientation)\n', (3713, 3738), True, 'import pybullet as p\n'), ((3763, 3949), 'pybullet.loadURDF', 'p.loadURDF', (['table_urdf', 'table_start_position', 'table_start_orientation_quat'], {'flags': '(p.URDF_USE_SELF_COLLISION | p.URDF_USE_INERTIA_FROM_FILE)', 'physicsClientId': 'self.physics_client_id'}), '(table_urdf, table_start_position, table_start_orientation_quat,\n flags=p.URDF_USE_SELF_COLLISION | p.URDF_USE_INERTIA_FROM_FILE,\n physicsClientId=self.physics_client_id)\n', (3773, 3949), True, 'import pybullet as p\n'), ((4110, 4291), 'pybullet.loadURDF', 'p.loadURDF', (['table_urdf', 'table_start_position', 'table_start_orientation_quat'], {'flags': '(p.URDF_USE_SELF_COLLISION | p.URDF_USE_INERTIA_FROM_FILE)', 'physicsClientId': 'self.ik_client_id'}), '(table_urdf, table_start_position, table_start_orientation_quat,\n flags=p.URDF_USE_SELF_COLLISION | p.URDF_USE_INERTIA_FROM_FILE,\n physicsClientId=self.ik_client_id)\n', (4120, 4291), True, 'import pybullet as p\n'), ((4445, 4470), 'pybullet.setGravity', 'p.setGravity', (['(0)', '(0)', '(-9.81)'], {}), '(0, 0, -9.81)\n', (4457, 4470), True, 'import pybullet as p\n'), ((12693, 12731), 'pybullet.getMatrixFromQuaternion', 'p.getMatrixFromQuaternion', (['orientation'], {}), '(orientation)\n', (12718, 12731), True, 'import pybullet as p\n'), ((12746, 12795), 'numpy.array', 'np.array', (['[matrix[0:3], matrix[3:6], matrix[6:9]]'], {}), '([matrix[0:3], matrix[3:6], matrix[6:9]])\n', (12754, 12795), True, 'import numpy as np\n'), ((1569, 1595), 'pybullet.setRealTimeSimulation', 'p.setRealTimeSimulation', (['(1)'], {}), '(1)\n', (1592, 1595), True, 'import pybullet as p\n'), ((1843, 1862), 'pybullet_utils.bullet_client.BulletClient', 'BulletClient', (['p.GUI'], {}), '(p.GUI)\n', (1855, 1862), False, 'from pybullet_utils.bullet_client import BulletClient\n'), ((1953, 1975), 'pybullet_utils.bullet_client.BulletClient', 'BulletClient', (['p.DIRECT'], {}), '(p.DIRECT)\n', (1965, 1975), False, 'from pybullet_utils.bullet_client import BulletClient\n'), ((3221, 3266), 'classic_framework.utils.sim_path.sim_framework_path', 'sim_framework_path', (['"""./envs/plane/plane.urdf"""'], {}), "('./envs/plane/plane.urdf')\n", (3239, 3266), False, 'from classic_framework.utils.sim_path import sim_framework_path\n'), ((3381, 3426), 'classic_framework.utils.sim_path.sim_framework_path', 'sim_framework_path', (['"""./envs/plane/plane.urdf"""'], {}), "('./envs/plane/plane.urdf')\n", (3399, 3426), False, 'from classic_framework.utils.sim_path import sim_framework_path\n'), ((5701, 5768), 'classic_framework.utils.sim_path.sim_framework_path', 'sim_framework_path', (['"""./envs/frankaemika/robots/panda_arm_hand.urdf"""'], {}), "('./envs/frankaemika/robots/panda_arm_hand.urdf')\n", (5719, 5768), False, 'from classic_framework.utils.sim_path import sim_framework_path\n'), ((6338, 6375), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['orientation'], {}), '(orientation)\n', (6362, 6375), True, 'import pybullet as p\n'), ((6441, 6614), 'pybullet.loadURDF', 'p.loadURDF', (['obj_urdf', 'position', 'orientation'], {'useFixedBase': '(1)', 'flags': '(p.URDF_USE_SELF_COLLISION | p.URDF_USE_INERTIA_FROM_FILE)', 'physicsClientId': 'self.physics_client_id'}), '(obj_urdf, position, orientation, useFixedBase=1, flags=p.\n URDF_USE_SELF_COLLISION | p.URDF_USE_INERTIA_FROM_FILE, physicsClientId\n =self.physics_client_id)\n', (6451, 6614), True, 'import pybullet as p\n'), ((6945, 7077), 'pybullet.loadURDF', 'p.loadURDF', (['obj_urdf', 'position', 'orientation'], {'useFixedBase': '(1)', 'flags': 'p.URDF_USE_SELF_COLLISION', 'physicsClientId': 'self.ik_client_id'}), '(obj_urdf, position, orientation, useFixedBase=1, flags=p.\n URDF_USE_SELF_COLLISION, physicsClientId=self.ik_client_id)\n', (6955, 7077), True, 'import pybullet as p\n'), ((9502, 9539), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['orientation'], {}), '(orientation)\n', (9526, 9539), True, 'import pybullet as p\n'), ((11853, 11874), 'pybullet.loadSDF', 'p.loadSDF', (['layout_sdf'], {}), '(layout_sdf)\n', (11862, 11874), True, 'import pybullet as p\n'), ((12328, 12365), 'pybullet.getQuaternionFromEuler', 'p.getQuaternionFromEuler', (['orientation'], {}), '(orientation)\n', (12352, 12365), True, 'import pybullet as p\n'), ((12456, 12492), 'pybullet.getBasePositionAndOrientation', 'p.getBasePositionAndOrientation', (['obj'], {}), '(obj)\n', (12487, 12492), True, 'import pybullet as p\n'), ((12520, 12589), 'pybullet.multiplyTransforms', 'p.multiplyTransforms', (['position', 'orientation', 'pose_obj[0]', 'pose_obj[1]'], {}), '(position, orientation, pose_obj[0], pose_obj[1])\n', (12540, 12589), True, 'import pybullet as p\n'), ((12602, 12674), 'pybullet.resetBasePositionAndOrientation', 'p.resetBasePositionAndOrientation', (['obj', 'new_pose_obj[0]', 'new_pose_obj[1]'], {}), '(obj, new_pose_obj[0], new_pose_obj[1])\n', (12635, 12674), True, 'import pybullet as p\n'), ((14987, 15044), 'pybullet.getNumJoints', 'p.getNumJoints', (['robot_id'], {'physicsClientId': 'physicsClientId'}), '(robot_id, physicsClientId=physicsClientId)\n', (15001, 15044), True, 'import pybullet as p\n'), ((15341, 15467), 'pybullet.resetJointState', 'p.resetJointState', ([], {'bodyUniqueId': 'robot_id', 'jointIndex': 'joint_index', 'targetValue': 'joint_angle', 'physicsClientId': 'physicsClientId'}), '(bodyUniqueId=robot_id, jointIndex=joint_index,\n targetValue=joint_angle, physicsClientId=physicsClientId)\n', (15358, 15467), True, 'import pybullet as p\n'), ((9651, 9815), 'pybullet.loadURDF', 'p.loadURDF', (['obj_urdf', 'position', 'orientation', 'fixed'], {'flags': '(p.URDF_USE_SELF_COLLISION | p.URDF_USE_INERTIA_FROM_FILE)', 'physicsClientId': 'self.physics_client_id'}), '(obj_urdf, position, orientation, fixed, flags=p.\n URDF_USE_SELF_COLLISION | p.URDF_USE_INERTIA_FROM_FILE, physicsClientId\n =self.physics_client_id)\n', (9661, 9815), True, 'import pybullet as p\n'), ((10005, 10133), 'pybullet.loadURDF', 'p.loadURDF', (['obj_urdf', 'position', 'orientation', 'fixed'], {'flags': 'p.URDF_USE_SELF_COLLISION', 'physicsClientId': 'self.physics_client_id'}), '(obj_urdf, position, orientation, fixed, flags=p.\n URDF_USE_SELF_COLLISION, physicsClientId=self.physics_client_id)\n', (10015, 10133), True, 'import pybullet as p\n'), ((19219, 19234), 'numpy.arange', 'np.arange', (['(1)', '(8)'], {}), '(1, 8)\n', (19228, 19234), True, 'import numpy as np\n'), ((19578, 19593), 'numpy.arange', 'np.arange', (['(1)', '(8)'], {}), '(1, 8)\n', (19587, 19593), True, 'import numpy as np\n'), ((19341, 19352), 'numpy.zeros', 'np.zeros', (['(7)'], {}), '(7)\n', (19349, 19352), True, 'import numpy as np\n'), ((15072, 15142), 'pybullet.getJointInfo', 'p.getJointInfo', (['robot_id', 'joint_index'], {'physicsClientId': 'physicsClientId'}), '(robot_id, joint_index, physicsClientId=physicsClientId)\n', (15086, 15142), True, 'import pybullet as p\n')] |
from time import time
from modules.logging import logger
import matplotlib.pyplot as plt
import numpy as np
import h5py
import shutil
import os
import collections
def show_slices(pixels, name, nr_slices=12, cols=4, output_dir=None, size=7):
print(name)
fig = plt.figure()
slice_depth = round(np.shape(pixels)[0]/nr_slices)
rows = round(nr_slices/cols)+1
fig.set_size_inches(cols*size, rows*size)
for i in range(nr_slices):
slice_pos = int(slice_depth*i)
y = fig.add_subplot(rows,cols,i+1)
im = pixels[slice_pos]
if(len(np.shape(im))>2):
im = im[:,:,0]
y.imshow(im, cmap='gray')
if(output_dir!=None):
f = output_dir + name + '-' + 'slices.jpg'
plt.savefig(f)
plt.close(fig)
else:
plt.show()
def show_image(pixels, slice_pos, name, output_dir=None, size=4):
print(name)
fig1, ax1 = plt.subplots(1)
fig1.set_size_inches(size,size)
im = pixels[round(np.shape(pixels)[0]*(slice_pos-1))]
if(len(np.shape(im))>2):
im = im[:,:,0]
ax1.imshow(im, cmap=plt.cm.gray)
if(output_dir!=None):
file = output_dir + name + '-' + 'slice-' + str(slice_pos) + '.jpg'
plt.savefig(file)
plt.close(fig1)
else:
plt.show()
def validate_dataset(dataset_dir, name, image_dims, save_dir=None):
dataset_file = dataset_path(dataset_dir, name, image_dims)
ok = True
logger.info('VALIDATING DATASET ' + dataset_file)
with h5py.File(dataset_file, 'r') as h5f:
x_ds = h5f['X']
y_ds = h5f['Y']
if(len(x_ds) != len(y_ds)):
logger.warning('VALIDATION ERROR: x and y datasets with different lengths')
ok = False
for px in range(len(x_ds)):
arr = np.array(x_ds[px])
if(not np.any(arr)):
logger.warning('VALIDATION ERROR: Image not found at index=' + str(px))
ok = False
label_total = np.array([[0,0]])
for py in range(len(y_ds)):
arr = np.array(y_ds[py])
label_total = arr + label_total
if(not np.any(arr) or np.all(arr) or arr[0]==arr[1]):
logger.warning('VALIDATION ERROR: Invalid label found at index=' + str(py) + ' label=' + str(arr))
ok = False
label0_ratio = label_total[0][0]/len(y_ds)
label1_ratio = label_total[0][1]/len(y_ds)
logger.info('Summary')
logger.info('X shape=' + str(x_ds.shape))
logger.info('Y shape=' + str(y_ds.shape))
logger.info('Y: total: ' + str(len(y_ds)))
logger.info('Y: label 0: ' + str(label_total[0][0]) + ' ' + str(100*label0_ratio) + '%')
logger.info('Y: label 1: ' + str(label_total[0][1]) + ' ' + str(100*label1_ratio) + '%')
logger.info('Recording sample data')
size = len(x_ds)
qtty = min(3, size)
f = size/qtty
for i in range(qtty):
pi = round(i*f)
logger.info('patient_index ' + str(pi))
logger.info('x=')
if(save_dir!=None):
mkdirs(save_dir)
show_slices(x_ds[pi], name + str(y_ds[pi]), output_dir=save_dir)
logger.info('y=' + str(y_ds[pi]))
return ok
def dataset_path(dataset_dir, name, image_dims):
return dataset_dir + '{}-{}-{}-{}.h5'.format(name, image_dims[0], image_dims[1], image_dims[2])
def create_xy_datasets(output_dir, name, image_dims, size):
dataset_file = dataset_path(output_dir, name, image_dims)
h5f = h5py.File(dataset_file, 'w')
x_ds = h5f.create_dataset('X', (size, image_dims[0], image_dims[1], image_dims[2], 1), chunks=(1, image_dims[0], image_dims[1], image_dims[2], 1), dtype='f')
y_ds = h5f.create_dataset('Y', (size, 2), dtype='f')
logger.debug('input x shape={}'.format(h5f['X'].shape))
x_ds = h5f['X']
y_ds = h5f['Y']
return h5f, x_ds, y_ds
def normalize_pixels(image_pixels, min_bound, max_bound, pixels_mean):
image_pixels = (image_pixels - min_bound) / (max_bound - min_bound)
image_pixels[image_pixels>1] = 1.
image_pixels[image_pixels<0] = 0.
#0-center pixels
logger.debug('mean pixels=' + str(np.mean(image_pixels)))
image_pixels = image_pixels - pixel_mean
return image_pixels
def mkdirs(base_dir, dirs=[], recreate=False):
if(recreate):
shutil.rmtree(base_dir, True)
if not os.path.exists(base_dir):
os.makedirs(base_dir)
for d in dirs:
if not os.path.exists(base_dir + d):
os.makedirs(base_dir + d)
class Timer:
def __init__(self, name, debug=True):
self._name = name
self._debug = debug
self.start()
def start(self):
self._start = time()
if(self._debug):
logger.info('> [started] ' + self._name + '...')
def stop(self):
self._lastElapsed = (time()-self._start)
if(self._debug):
logger.info('> [done] {} ({:.3f} ms)'.format(self._name, self._lastElapsed*1000))
def elapsed(self):
if(self._lastElapsed != None):
return (self._lastElapsed)
else:
return (time()-self._start)
| [
"h5py.File",
"matplotlib.pyplot.show",
"os.makedirs",
"matplotlib.pyplot.close",
"os.path.exists",
"numpy.all",
"time.time",
"numpy.shape",
"numpy.any",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.array",
"modules.logging.logger.info",
"modules.logging.logger.warning",
"shutil.rmtre... | [((268, 280), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (278, 280), True, 'import matplotlib.pyplot as plt\n'), ((907, 922), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (919, 922), True, 'import matplotlib.pyplot as plt\n'), ((1448, 1497), 'modules.logging.logger.info', 'logger.info', (["('VALIDATING DATASET ' + dataset_file)"], {}), "('VALIDATING DATASET ' + dataset_file)\n", (1459, 1497), False, 'from modules.logging import logger\n'), ((3596, 3624), 'h5py.File', 'h5py.File', (['dataset_file', '"""w"""'], {}), "(dataset_file, 'w')\n", (3605, 3624), False, 'import h5py\n'), ((741, 755), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f'], {}), '(f)\n', (752, 755), True, 'import matplotlib.pyplot as plt\n'), ((764, 778), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (773, 778), True, 'import matplotlib.pyplot as plt\n'), ((797, 807), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (805, 807), True, 'import matplotlib.pyplot as plt\n'), ((1221, 1238), 'matplotlib.pyplot.savefig', 'plt.savefig', (['file'], {}), '(file)\n', (1232, 1238), True, 'import matplotlib.pyplot as plt\n'), ((1247, 1262), 'matplotlib.pyplot.close', 'plt.close', (['fig1'], {}), '(fig1)\n', (1256, 1262), True, 'import matplotlib.pyplot as plt\n'), ((1281, 1291), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1289, 1291), True, 'import matplotlib.pyplot as plt\n'), ((1508, 1536), 'h5py.File', 'h5py.File', (['dataset_file', '"""r"""'], {}), "(dataset_file, 'r')\n", (1517, 1536), False, 'import h5py\n'), ((1986, 2004), 'numpy.array', 'np.array', (['[[0, 0]]'], {}), '([[0, 0]])\n', (1994, 2004), True, 'import numpy as np\n'), ((2449, 2471), 'modules.logging.logger.info', 'logger.info', (['"""Summary"""'], {}), "('Summary')\n", (2460, 2471), False, 'from modules.logging import logger\n'), ((2834, 2870), 'modules.logging.logger.info', 'logger.info', (['"""Recording sample data"""'], {}), "('Recording sample data')\n", (2845, 2870), False, 'from modules.logging import logger\n'), ((4424, 4453), 'shutil.rmtree', 'shutil.rmtree', (['base_dir', '(True)'], {}), '(base_dir, True)\n', (4437, 4453), False, 'import shutil\n'), ((4466, 4490), 'os.path.exists', 'os.path.exists', (['base_dir'], {}), '(base_dir)\n', (4480, 4490), False, 'import os\n'), ((4500, 4521), 'os.makedirs', 'os.makedirs', (['base_dir'], {}), '(base_dir)\n', (4511, 4521), False, 'import os\n'), ((4808, 4814), 'time.time', 'time', ([], {}), '()\n', (4812, 4814), False, 'from time import time\n'), ((1028, 1040), 'numpy.shape', 'np.shape', (['im'], {}), '(im)\n', (1036, 1040), True, 'import numpy as np\n'), ((1642, 1717), 'modules.logging.logger.warning', 'logger.warning', (['"""VALIDATION ERROR: x and y datasets with different lengths"""'], {}), "('VALIDATION ERROR: x and y datasets with different lengths')\n", (1656, 1717), False, 'from modules.logging import logger\n'), ((1796, 1814), 'numpy.array', 'np.array', (['x_ds[px]'], {}), '(x_ds[px])\n', (1804, 1814), True, 'import numpy as np\n'), ((2058, 2076), 'numpy.array', 'np.array', (['y_ds[py]'], {}), '(y_ds[py])\n', (2066, 2076), True, 'import numpy as np\n'), ((3068, 3085), 'modules.logging.logger.info', 'logger.info', (['"""x="""'], {}), "('x=')\n", (3079, 3085), False, 'from modules.logging import logger\n'), ((4561, 4589), 'os.path.exists', 'os.path.exists', (['(base_dir + d)'], {}), '(base_dir + d)\n', (4575, 4589), False, 'import os\n'), ((4603, 4628), 'os.makedirs', 'os.makedirs', (['(base_dir + d)'], {}), '(base_dir + d)\n', (4614, 4628), False, 'import os\n'), ((4852, 4900), 'modules.logging.logger.info', 'logger.info', (["('> [started] ' + self._name + '...')"], {}), "('> [started] ' + self._name + '...')\n", (4863, 4900), False, 'from modules.logging import logger\n'), ((4951, 4957), 'time.time', 'time', ([], {}), '()\n', (4955, 4957), False, 'from time import time\n'), ((305, 321), 'numpy.shape', 'np.shape', (['pixels'], {}), '(pixels)\n', (313, 321), True, 'import numpy as np\n'), ((576, 588), 'numpy.shape', 'np.shape', (['im'], {}), '(im)\n', (584, 588), True, 'import numpy as np\n'), ((1834, 1845), 'numpy.any', 'np.any', (['arr'], {}), '(arr)\n', (1840, 1845), True, 'import numpy as np\n'), ((2155, 2166), 'numpy.all', 'np.all', (['arr'], {}), '(arr)\n', (2161, 2166), True, 'import numpy as np\n'), ((4257, 4278), 'numpy.mean', 'np.mean', (['image_pixels'], {}), '(image_pixels)\n', (4264, 4278), True, 'import numpy as np\n'), ((5241, 5247), 'time.time', 'time', ([], {}), '()\n', (5245, 5247), False, 'from time import time\n'), ((981, 997), 'numpy.shape', 'np.shape', (['pixels'], {}), '(pixels)\n', (989, 997), True, 'import numpy as np\n'), ((2140, 2151), 'numpy.any', 'np.any', (['arr'], {}), '(arr)\n', (2146, 2151), True, 'import numpy as np\n')] |
import numpy as np
import torch
import random
import logging
import time
import pickle
import os
from retro_star.common import args, prepare_starting_molecules, prepare_mlp, \
prepare_molstar_planner, smiles_to_fp
from retro_star.model import ValueMLP
from retro_star.utils import setup_logger
def retro_plan():
device = torch.device('cuda' if args.gpu >= 0 else 'cpu')
starting_mols = prepare_starting_molecules(args.starting_molecules)
routes = pickle.load(open(args.test_routes, 'rb'))
logging.info('%d routes extracted from %s loaded' % (len(routes),
args.test_routes))
one_step = prepare_mlp(args.mlp_templates, args.mlp_model_dump)
# create result folder
if not os.path.exists(args.result_folder):
os.mkdir(args.result_folder)
if args.use_value_fn:
model = ValueMLP(
n_layers=args.n_layers,
fp_dim=args.fp_dim,
latent_dim=args.latent_dim,
dropout_rate=0.1,
device=device
).to(device)
model_f = '%s/%s' % (args.save_folder, args.value_model)
logging.info('Loading value nn from %s' % model_f)
model.load_state_dict(torch.load(model_f, map_location=device))
model.eval()
def value_fn(mol):
fp = smiles_to_fp(mol, fp_dim=args.fp_dim).reshape(1,-1)
fp = torch.FloatTensor(fp).to(device)
v = model(fp).item()
return v
else:
value_fn = lambda x: 0.
plan_handle = prepare_molstar_planner(
one_step=one_step,
value_fn=value_fn,
starting_mols=starting_mols,
expansion_topk=args.expansion_topk,
iterations=args.iterations,
viz=args.viz,
viz_dir=args.viz_dir
)
result = {
'succ': [],
'cumulated_time': [],
'iter': [],
'routes': [],
'route_costs': [],
'route_lens': []
}
num_targets = len(routes)
t0 = time.time()
for (i, route) in enumerate(routes):
target_mol = route[0].split('>')[0]
succ, msg = plan_handle(target_mol, i)
result['succ'].append(succ)
result['cumulated_time'].append(time.time() - t0)
result['iter'].append(msg[1])
result['routes'].append(msg[0])
if succ:
result['route_costs'].append(msg[0].total_cost)
result['route_lens'].append(msg[0].length)
else:
result['route_costs'].append(None)
result['route_lens'].append(None)
tot_num = i + 1
tot_succ = np.array(result['succ']).sum()
avg_time = (time.time() - t0) * 1.0 / tot_num
avg_iter = np.array(result['iter'], dtype=float).mean()
logging.info('Succ: %d/%d/%d | avg time: %.2f s | avg iter: %.2f' %
(tot_succ, tot_num, num_targets, avg_time, avg_iter))
f = open(args.result_folder + '/plan.pkl', 'wb')
pickle.dump(result, f)
f.close()
if __name__ == '__main__':
np.random.seed(args.seed)
torch.manual_seed(args.seed)
random.seed(args.seed)
setup_logger('plan.log')
retro_plan()
| [
"os.mkdir",
"pickle.dump",
"retro_star.common.smiles_to_fp",
"numpy.random.seed",
"retro_star.model.ValueMLP",
"torch.manual_seed",
"torch.load",
"os.path.exists",
"torch.FloatTensor",
"time.time",
"retro_star.common.prepare_mlp",
"logging.info",
"random.seed",
"numpy.array",
"torch.devi... | [((331, 379), 'torch.device', 'torch.device', (["('cuda' if args.gpu >= 0 else 'cpu')"], {}), "('cuda' if args.gpu >= 0 else 'cpu')\n", (343, 379), False, 'import torch\n'), ((401, 452), 'retro_star.common.prepare_starting_molecules', 'prepare_starting_molecules', (['args.starting_molecules'], {}), '(args.starting_molecules)\n', (427, 452), False, 'from retro_star.common import args, prepare_starting_molecules, prepare_mlp, prepare_molstar_planner, smiles_to_fp\n'), ((671, 723), 'retro_star.common.prepare_mlp', 'prepare_mlp', (['args.mlp_templates', 'args.mlp_model_dump'], {}), '(args.mlp_templates, args.mlp_model_dump)\n', (682, 723), False, 'from retro_star.common import args, prepare_starting_molecules, prepare_mlp, prepare_molstar_planner, smiles_to_fp\n'), ((1554, 1754), 'retro_star.common.prepare_molstar_planner', 'prepare_molstar_planner', ([], {'one_step': 'one_step', 'value_fn': 'value_fn', 'starting_mols': 'starting_mols', 'expansion_topk': 'args.expansion_topk', 'iterations': 'args.iterations', 'viz': 'args.viz', 'viz_dir': 'args.viz_dir'}), '(one_step=one_step, value_fn=value_fn, starting_mols\n =starting_mols, expansion_topk=args.expansion_topk, iterations=args.\n iterations, viz=args.viz, viz_dir=args.viz_dir)\n', (1577, 1754), False, 'from retro_star.common import args, prepare_starting_molecules, prepare_mlp, prepare_molstar_planner, smiles_to_fp\n'), ((2012, 2023), 'time.time', 'time.time', ([], {}), '()\n', (2021, 2023), False, 'import time\n'), ((2971, 2993), 'pickle.dump', 'pickle.dump', (['result', 'f'], {}), '(result, f)\n', (2982, 2993), False, 'import pickle\n'), ((3040, 3065), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3054, 3065), True, 'import numpy as np\n'), ((3070, 3098), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (3087, 3098), False, 'import torch\n'), ((3103, 3125), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (3114, 3125), False, 'import random\n'), ((3130, 3154), 'retro_star.utils.setup_logger', 'setup_logger', (['"""plan.log"""'], {}), "('plan.log')\n", (3142, 3154), False, 'from retro_star.utils import setup_logger\n'), ((763, 797), 'os.path.exists', 'os.path.exists', (['args.result_folder'], {}), '(args.result_folder)\n', (777, 797), False, 'import os\n'), ((807, 835), 'os.mkdir', 'os.mkdir', (['args.result_folder'], {}), '(args.result_folder)\n', (815, 835), False, 'import os\n'), ((1147, 1197), 'logging.info', 'logging.info', (["('Loading value nn from %s' % model_f)"], {}), "('Loading value nn from %s' % model_f)\n", (1159, 1197), False, 'import logging\n'), ((2770, 2896), 'logging.info', 'logging.info', (["('Succ: %d/%d/%d | avg time: %.2f s | avg iter: %.2f' % (tot_succ, tot_num,\n num_targets, avg_time, avg_iter))"], {}), "('Succ: %d/%d/%d | avg time: %.2f s | avg iter: %.2f' % (\n tot_succ, tot_num, num_targets, avg_time, avg_iter))\n", (2782, 2896), False, 'import logging\n'), ((1228, 1268), 'torch.load', 'torch.load', (['model_f'], {'map_location': 'device'}), '(model_f, map_location=device)\n', (1238, 1268), False, 'import torch\n'), ((879, 997), 'retro_star.model.ValueMLP', 'ValueMLP', ([], {'n_layers': 'args.n_layers', 'fp_dim': 'args.fp_dim', 'latent_dim': 'args.latent_dim', 'dropout_rate': '(0.1)', 'device': 'device'}), '(n_layers=args.n_layers, fp_dim=args.fp_dim, latent_dim=args.\n latent_dim, dropout_rate=0.1, device=device)\n', (887, 997), False, 'from retro_star.model import ValueMLP\n'), ((2234, 2245), 'time.time', 'time.time', ([], {}), '()\n', (2243, 2245), False, 'import time\n'), ((2613, 2637), 'numpy.array', 'np.array', (["result['succ']"], {}), "(result['succ'])\n", (2621, 2637), True, 'import numpy as np\n'), ((2717, 2754), 'numpy.array', 'np.array', (["result['iter']"], {'dtype': 'float'}), "(result['iter'], dtype=float)\n", (2725, 2754), True, 'import numpy as np\n'), ((1337, 1374), 'retro_star.common.smiles_to_fp', 'smiles_to_fp', (['mol'], {'fp_dim': 'args.fp_dim'}), '(mol, fp_dim=args.fp_dim)\n', (1349, 1374), False, 'from retro_star.common import args, prepare_starting_molecules, prepare_mlp, prepare_molstar_planner, smiles_to_fp\n'), ((1406, 1427), 'torch.FloatTensor', 'torch.FloatTensor', (['fp'], {}), '(fp)\n', (1423, 1427), False, 'import torch\n'), ((2664, 2675), 'time.time', 'time.time', ([], {}), '()\n', (2673, 2675), False, 'import time\n')] |
import os
import sys
import json
import numpy as np
from collections import deque, namedtuple
from argparse import ArgumentParser
os.environ['TF_KERAS'] = '1'
from tensorflow import keras
import bert_tokenization as tokenization
from keras_bert import load_trained_model_from_checkpoint, AdamWarmup
from keras_bert import calc_train_steps, get_custom_objects
from config import DEFAULT_SEQ_LEN, DEFAULT_BATCH_SIZE, DEFAULT_PREDICT_START
Sentences = namedtuple('Sentences', [
'words', 'tokens', 'labels', 'lengths',
'combined_tokens', 'combined_labels','sentence_numbers', 'sentence_starts'
])
def argument_parser(mode='train'):
argparser = ArgumentParser()
argparser.add_argument(
'--input_data', required=True,
help='Training data'
)
argparser.add_argument(
'--batch_size', type=int, default=DEFAULT_BATCH_SIZE,
help='Batch size for training'
)
argparser.add_argument(
'--output_spans', default="pubmed-output/output.spans",
help='File to write predicted spans to'
)
argparser.add_argument(
'--output_tsv', default="pubmed-output/output.tsv",
help='File to write predicted tsv to'
)
argparser.add_argument(
'--ner_model_dir',
help='Trained NER model directory'
)
argparser.add_argument(
'--sentences_on_batch', type=int, default=2000,
help = 'Write tagger output after this number of sentences'
)
return argparser
def read_multi_labels(path):
labels_list = []
with open(path) as f:
labels = []
for line in f:
line = line.strip()
if line:
if line in labels:
raise ValueError('duplicate value {} in {}'.format(line, path))
labels.append(line)
else:
labels_list.append(labels)
labels = []
return labels_list
def load_pretrained(options):
model = load_trained_model_from_checkpoint(
options.bert_config_file,
options.init_checkpoint,
training=False,
trainable=True,
seq_len=options.max_seq_length
)
tokenizer = tokenization.FullTokenizer(
vocab_file=options.vocab_file,
do_lower_case=options.do_lower_case
)
return model, tokenizer
def _ner_model_path(ner_model_dir):
return os.path.join(ner_model_dir, 'model.hdf5')
def _ner_vocab_path(ner_model_dir):
return os.path.join(ner_model_dir, 'vocab.txt')
def _ner_labels_path(ner_model_dir):
return os.path.join(ner_model_dir, 'labels.txt')
def _ner_config_path(ner_model_dir):
return os.path.join(ner_model_dir, 'config.json')
def load_ner_model(ner_model_dir):
with open(_ner_config_path(ner_model_dir)) as f:
config = json.load(f)
model = keras.models.load_model(
_ner_model_path(ner_model_dir),
custom_objects=get_custom_objects()
)
tokenizer = tokenization.FullTokenizer(
vocab_file=_ner_vocab_path(ner_model_dir),
do_lower_case=config['do_lower_case']
)
labels = read_labels(_ner_labels_path(ner_model_dir))
return model, tokenizer, labels, config
def read_labels(path):
labels = []
with open(path) as f:
for line in f:
line = line.strip()
if line in labels:
raise ValueError('duplicate value {} in {}'.format(line, path))
labels.append(line)
return labels
def encode(lines, tokenizer, max_len):
tids = []
sids = []
for line in lines:
tokens = ["[CLS]"]+line
token_ids = tokenizer.convert_tokens_to_ids(tokens)
segment_ids = [0] * len(token_ids)
if len(token_ids) < max_len:
pad_len = max_len - len(token_ids)
token_ids += tokenizer.convert_tokens_to_ids(["[PAD]"]) * pad_len
segment_ids += [0] * pad_len
tids.append(token_ids)
sids.append(segment_ids)
return np.array(tids), np.array(sids)
def tokenize_and_split(words, word_labels, tokenizer, max_length):
unk_token = tokenizer.wordpiece_tokenizer.unk_token
# Tokenize each word in sentence, propagate labels
tokens, labels, lengths = [], [], []
for word, label in zip(words, word_labels):
tokenized = tokenizer.tokenize(word)
if len(tokenized) == 0:
print('word "{}" tokenized to {}, replacing with {}'.format(
word, tokenized, unk_token), file=sys.stderr)
tokenized = [unk_token] # to avoid desync
tokens.extend(tokenized)
lengths.append(len(tokenized))
for i, token in enumerate(tokenized):
if i == 0:
labels.append(label)
else:
if label.startswith('B'):
labels.append('I'+label[1:])
else:
labels.append(label)
# Split into multiple sentences if too long
split_tokens, split_labels = [], []
start, end = 0, max_length
while end < len(tokens):
# Avoid splitting inside tokenized word
while end > start and tokens[end].startswith('##'):
end -= 1
if end == start:
end = start + max_length # only continuations
split_tokens.append(tokens[start:end])
split_labels.append(labels[start:end])
start = end
end += max_length
split_tokens.append(tokens[start:])
split_labels.append(labels[start:])
return split_tokens, split_labels, lengths
def tokenize_and_split_sentences(orig_words, orig_labels, tokenizer, max_length):
words, labels, lengths = [], [], []
for w, l in zip(orig_words, orig_labels):
split_w, split_l, lens = tokenize_and_split(w, l, tokenizer, max_length-2)
words.extend(split_w)
labels.extend(split_l)
lengths.extend(lens)
return words, labels, lengths
def process_sentences(words, orig_labels, tokenizer, max_seq_len, seq_start=0):
# Tokenize words, split sentences to max_seq_len, and keep length
# of each source word in tokens
tokens, labels, lengths = tokenize_and_split_sentences(
words, orig_labels, tokenizer, max_seq_len)
# Extend each sentence to include context sentences
combined_tokens, combined_labels, sentence_numbers, sentence_starts = combine_sentences(
tokens, labels, max_seq_len-1, seq_start)
return Sentences(
words, tokens, labels, lengths, combined_tokens, combined_labels, sentence_numbers, sentence_starts)
def read_data(input_file, tokenizer, max_seq_length):
lines, tags, lengths = [], [], []
def add_sentence(words, labels):
split_tokens, split_labels, lens = tokenize_and_split(
words, labels, tokenizer, max_seq_length-1)
lines.extend(split_tokens)
tags.extend(split_labels)
lengths.extend(lens)
curr_words, curr_labels = [], []
with open(input_file) as rf:
for line in rf:
line = line.strip()
if line:
fields = line.split('\t')
if len(fields) > 1:
curr_words.append(fields[0])
curr_labels.append(fields[1])
else:
print('ignoring line: {}'.format(line), file=sys.stderr)
pass
elif curr_words:
# empty lines separate sentences
add_sentence(curr_words, curr_labels)
curr_words, curr_labels = [], []
# Process last sentence also when there's no empty line after
if curr_words:
add_sentence(curr_words, curr_labels)
return lines, tags, lengths
def write_result(fname, original, token_lengths, tokens, labels, predictions, mode='train'):
lines=[]
with open(fname,'w+') as f:
toks = deque([val for sublist in tokens for val in sublist])
labs = deque([val for sublist in labels for val in sublist])
pred = deque([val for sublist in predictions for val in sublist])
lengths = deque(token_lengths)
sentences = []
for sentence in original:
sent = []
for word in sentence:
tok = toks.popleft()
# TODO avoid hardcoded "[UNK]" string
if not (word.startswith(tok) or tok == '[UNK]'):
print('tokenization mismatch: "{}" vs "{}"'.format(
word, tok), file=sys.stderr)
label = labs.popleft()
predicted = pred.popleft()
sent.append(predicted)
for i in range(int(lengths.popleft())-1):
toks.popleft()
labs.popleft()
pred.popleft()
if mode != 'predict':
line = "{}\t{}\t{}\n".format(word, label, predicted)
else:
# In predict mode, labels are just placeholder dummies
line = "{}\t{}\n".format(word, predicted)
f.write(line)
lines.append(line)
f.write("\n")
sentences.append(sent)
f.close()
return lines, sentences
# Include maximum number of consecutive sentences to each sample
# def combine_sentences(lines, tags, lengths, max_seq):
# lines_in_sample = []
# new_lines = []
# new_tags = []
# for i, line in enumerate(lines):
# line_numbers = [i]
# new_line = []
# new_line.extend(line)
# new_tag = []
# new_tag.extend(tags[i])
# j = 1
# linelen = len(lines[(i+j)%len(lines)])
# while (len(new_line) + linelen) < max_seq-2:
# new_line.append('[SEP]')
# new_tag.append('O')
# new_line.extend(lines[(i+j)%len(lines)])
# new_tag.extend(tags[(i+j)%len(tags)])
# line_numbers.append((i+j)%len(lines))
# j += 1
# linelen = len(lines[(i+j)%len(lines)])
# new_lines.append(new_line)
# new_tags.append(new_tag)
# lines_in_sample.append(line_numbers)
# return new_lines, new_tags, lines_in_sample
def combine_sentences(lines, tags, max_seq, start=0):
lines_in_sample = []
linestarts_in_sample = []
new_lines = []
new_tags = []
position = start
for i, line in enumerate(lines):
line_starts = []
line_numbers = []
if start + len(line) < max_seq:
new_line = [0]*start
new_tag = [0]*start
new_line.extend(line)
new_tag.extend(tags[i])
line_starts.append(start)
line_numbers.append(i)
else:
position = max_seq - len(line) -1
new_line = [0]*position
new_tag = [0]*position
new_line.extend(line)
new_tag.extend(tags[i])
line_starts.append(position)
line_numbers.append(i)
j = 1
next_idx = (i+j)%len(lines)
ready = False
while not ready:
if len(lines[next_idx]) + len(new_line) < max_seq - 1:
new_line.append('[SEP]')
new_tag.append('O')
position = len(new_line)
new_line.extend(lines[next_idx])
new_tag.extend(tags[next_idx])
line_starts.append(position)
line_numbers.append(next_idx)
j += 1
next_idx = (i+j)%len(lines)
else:
new_line.append('[SEP]')
new_tag.append('O')
position = len(new_line)
new_line.extend(lines[next_idx][0:(max_seq-position)])
new_tag.extend(tags[next_idx][0:(max_seq-position)])
ready = True
#lines_in_sample.append(line_numbers)
j=1
ready = False
while not ready:
counter = line_starts[0]
#print(counter)
prev_line = lines[i-j][:]
prev_tags = tags[i-j][:]
prev_line.append('[SEP]')
prev_tags.append('O')
#print(len(prev_line), len(prev_tags))
if len(prev_line)<= counter:
new_line[(counter-len(prev_line)):counter]=prev_line
new_tag[(counter-len(prev_line)):counter]=prev_tags
line_starts.insert(0,counter-len(prev_line))
line_numbers.insert(0,i-j) #negative numbers are indices to end of lines array
j+=1
else:
if counter > 2:
new_line[0:counter] = prev_line[-counter:]
new_tag[0:counter] = prev_tags[-counter:]
ready = True
else:
new_line[0:counter] = ['[PAD]']*counter
new_tag[0:counter] = ['O']*counter
ready = True
new_lines.append(new_line)
new_tags.append(new_tag)
lines_in_sample.append(line_numbers)
linestarts_in_sample.append(line_starts)
return new_lines, new_tags, lines_in_sample, linestarts_in_sample
def get_predictions(predicted, lines, line_numbers):
first_pred = []
final_pred = []
predictions = [[] for _ in range(len(lines))]
for i, sample in enumerate(predicted):
idx = 1
for j, line_number in enumerate(line_numbers[i]):
predictions[line_number].append(sample[idx:idx+len(lines[line_number])])
if j == 0:
first_pred.append(sample[idx:idx+len(lines[line_number])])
idx+=len(lines[line_number])+1
for i, prediction in enumerate(predictions):
pred = []
arr = np.stack(prediction, axis=0)
for j in arr.T:
u,c = np.unique(j, return_counts=True)
pred.append(u[np.argmax(c)])
final_pred.append(pred)
return final_pred, first_pred
def get_predictions2(probs, lines, line_numbers):
first_pred = []
final_pred = []
predictions = []
p_first = []
for i, line in enumerate(lines):
predictions.append(np.zeros((len(line),probs.shape[-1]))) #create empty array for each line
for i, sample in enumerate(probs):
idx = 1
for j, line_number in enumerate(line_numbers[i]):
if j == 0:
p_first.append(sample[idx:idx+len(lines[line_number]),:])
predictions[line_number] += sample[idx:idx+len(lines[line_number]),:]
idx+=len(lines[line_number])+1
for k, line in enumerate(predictions):
final_pred.append(np.argmax(line, axis=-1))
first_pred.append(np.argmax(p_first[k],axis=-1))
return final_pred, first_pred
def process_docs(docs, doc_tags, line_ids, tokenizer, seq_len):
f_words = []
f_tokens = []
f_labels = []
f_lengths = []
f_combined_tokens = []
f_combined_labels = []
f_sentence_numbers = []
f_sentence_starts = []
start_sentence_number = 0
for i, doc in enumerate(docs):
tokens, labels, lengths = tokenize_and_split_sentences(
doc, doc_tags[i], tokenizer, seq_len)
combined_tokens, combined_labels, sentence_numbers = combine_sentences(tokens,
labels,
lengths,
seq_len)
for numbers in sentence_numbers:
f_sentence_numbers.append([num+start_sentence_number for num in numbers])
start_sentence_number += len(tokens)
f_words.extend(doc)
f_tokens.extend(tokens)
f_labels.extend(labels)
f_lengths.extend(lengths)
f_combined_tokens.extend(combined_tokens)
f_combined_labels.extend(combined_labels)
f_sentence_starts.extend([0]*len(tokens))
return Sentences(f_words, f_tokens, f_labels, f_lengths, f_combined_tokens, f_combined_labels, f_sentence_numbers, f_sentence_starts)
def split_to_documents(sentences, tags):
documents = []
documents_tags = []
doc_idx = 0
document = []
d_tags = []
line_ids =[[]]
for i, sentence in enumerate(sentences):
if sentence[0].startswith("-DOCSTART-") and i!=0:
documents.append(document)
documents_tags.append(d_tags)
document = []
d_tags = []
line_ids.append([])
doc_idx+=1
document.append(sentence)
d_tags.append(tags[i])
line_ids[doc_idx].append(i)
if documents:
documents.append(document)
documents_tags.append(d_tags)
return documents, documents_tags, line_ids
def process_no_context(train_words, train_tags, tokenizer, seq_len):
train_tokens, train_labels, train_lengths = tokenize_and_split_sentences(train_words, train_tags, tokenizer, seq_len)
sentence_numbers = []
sentence_starts = []
for i, line in enumerate(train_tokens):
#line.append('[SEP]')
#train_labels[i].append('[SEP]')
sentence_numbers.append([i])
sentence_starts.append([0])
return Sentences(train_words, train_tokens, train_labels, train_lengths, train_tokens, train_labels, sentence_numbers, sentence_starts)
| [
"numpy.stack",
"json.load",
"argparse.ArgumentParser",
"keras_bert.load_trained_model_from_checkpoint",
"numpy.argmax",
"bert_tokenization.FullTokenizer",
"numpy.unique",
"numpy.array",
"collections.namedtuple",
"keras_bert.get_custom_objects",
"os.path.join",
"collections.deque"
] | [((457, 607), 'collections.namedtuple', 'namedtuple', (['"""Sentences"""', "['words', 'tokens', 'labels', 'lengths', 'combined_tokens',\n 'combined_labels', 'sentence_numbers', 'sentence_starts']"], {}), "('Sentences', ['words', 'tokens', 'labels', 'lengths',\n 'combined_tokens', 'combined_labels', 'sentence_numbers',\n 'sentence_starts'])\n", (467, 607), False, 'from collections import deque, namedtuple\n'), ((663, 679), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (677, 679), False, 'from argparse import ArgumentParser\n'), ((1974, 2133), 'keras_bert.load_trained_model_from_checkpoint', 'load_trained_model_from_checkpoint', (['options.bert_config_file', 'options.init_checkpoint'], {'training': '(False)', 'trainable': '(True)', 'seq_len': 'options.max_seq_length'}), '(options.bert_config_file, options.\n init_checkpoint, training=False, trainable=True, seq_len=options.\n max_seq_length)\n', (2008, 2133), False, 'from keras_bert import load_trained_model_from_checkpoint, AdamWarmup\n'), ((2186, 2285), 'bert_tokenization.FullTokenizer', 'tokenization.FullTokenizer', ([], {'vocab_file': 'options.vocab_file', 'do_lower_case': 'options.do_lower_case'}), '(vocab_file=options.vocab_file, do_lower_case=\n options.do_lower_case)\n', (2212, 2285), True, 'import bert_tokenization as tokenization\n'), ((2380, 2421), 'os.path.join', 'os.path.join', (['ner_model_dir', '"""model.hdf5"""'], {}), "(ner_model_dir, 'model.hdf5')\n", (2392, 2421), False, 'import os\n'), ((2471, 2511), 'os.path.join', 'os.path.join', (['ner_model_dir', '"""vocab.txt"""'], {}), "(ner_model_dir, 'vocab.txt')\n", (2483, 2511), False, 'import os\n'), ((2562, 2603), 'os.path.join', 'os.path.join', (['ner_model_dir', '"""labels.txt"""'], {}), "(ner_model_dir, 'labels.txt')\n", (2574, 2603), False, 'import os\n'), ((2654, 2696), 'os.path.join', 'os.path.join', (['ner_model_dir', '"""config.json"""'], {}), "(ner_model_dir, 'config.json')\n", (2666, 2696), False, 'import os\n'), ((2804, 2816), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2813, 2816), False, 'import json\n'), ((3980, 3994), 'numpy.array', 'np.array', (['tids'], {}), '(tids)\n', (3988, 3994), True, 'import numpy as np\n'), ((3996, 4010), 'numpy.array', 'np.array', (['sids'], {}), '(sids)\n', (4004, 4010), True, 'import numpy as np\n'), ((7850, 7903), 'collections.deque', 'deque', (['[val for sublist in tokens for val in sublist]'], {}), '([val for sublist in tokens for val in sublist])\n', (7855, 7903), False, 'from collections import deque, namedtuple\n'), ((7919, 7972), 'collections.deque', 'deque', (['[val for sublist in labels for val in sublist]'], {}), '([val for sublist in labels for val in sublist])\n', (7924, 7972), False, 'from collections import deque, namedtuple\n'), ((7988, 8046), 'collections.deque', 'deque', (['[val for sublist in predictions for val in sublist]'], {}), '([val for sublist in predictions for val in sublist])\n', (7993, 8046), False, 'from collections import deque, namedtuple\n'), ((8065, 8085), 'collections.deque', 'deque', (['token_lengths'], {}), '(token_lengths)\n', (8070, 8085), False, 'from collections import deque, namedtuple\n'), ((13767, 13795), 'numpy.stack', 'np.stack', (['prediction'], {'axis': '(0)'}), '(prediction, axis=0)\n', (13775, 13795), True, 'import numpy as np\n'), ((2917, 2937), 'keras_bert.get_custom_objects', 'get_custom_objects', ([], {}), '()\n', (2935, 2937), False, 'from keras_bert import calc_train_steps, get_custom_objects\n'), ((13838, 13870), 'numpy.unique', 'np.unique', (['j'], {'return_counts': '(True)'}), '(j, return_counts=True)\n', (13847, 13870), True, 'import numpy as np\n'), ((14660, 14684), 'numpy.argmax', 'np.argmax', (['line'], {'axis': '(-1)'}), '(line, axis=-1)\n', (14669, 14684), True, 'import numpy as np\n'), ((14712, 14742), 'numpy.argmax', 'np.argmax', (['p_first[k]'], {'axis': '(-1)'}), '(p_first[k], axis=-1)\n', (14721, 14742), True, 'import numpy as np\n'), ((13897, 13909), 'numpy.argmax', 'np.argmax', (['c'], {}), '(c)\n', (13906, 13909), True, 'import numpy as np\n')] |
import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import h5py
import time
from EdgeConv.DataGeneratorMulti import DataGeneratorMulti
from torch import nn
from torch.utils.data import DataLoader, Dataset
import torch
import torch.optim as optim
import tqdm
import pytorch_lightning as pl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from pytorch_lightning.callbacks import ModelCheckpoint
import torch.nn.functional as F
from pytorch_lightning import loggers as pl_loggers
from gMLPhase.gMLP_torch import gMLPmodel
from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm , conv_block, deconv_block
import torch
import torch.nn.functional as F
from torch_geometric.nn import GCNConv,SAGEConv
from torch import nn
from torch_geometric.nn import MessagePassing
import logging
root_logger= logging.getLogger()
root_logger.setLevel(logging.DEBUG) # or whatever
handler = logging.FileHandler('debug5.log', 'w', 'utf-8') # or whatever
handler.setFormatter(logging.Formatter('%(name)s %(message)s')) # or whatever
root_logger.addHandler(handler)
def cycle(loader):
while True:
for data in loader:
yield data
def trainerMulti(input_hdf5=None,
input_trainset = None,
input_validset = None,
output_name = None,
input_model = None,
hparams_file = None,
model_folder = None,
input_dimention=(6000, 3),
shuffle=True,
label_type='triangle',
normalization_mode='std',
augmentation=True,
add_event_r=0.6,
shift_event_r=0.99,
add_noise_r=0.3,
drop_channel_r=0.5,
add_gap_r=0.2,
coda_ratio=1.4,
scale_amplitude_r=None,
pre_emphasis=False,
batch_size=1,
epochs=200,
monitor='val_loss',
patience=3):
"""
Generate a model and train it.
Parameters
----------
input_hdf5: str, default=None
Path to an hdf5 file containing only one class of data with NumPy arrays containing 3 component waveforms each 1 min long.
input_csv: str, default=None
Path to a CSV file with one column (trace_name) listing the name of all datasets in the hdf5 file.
output_name: str, default=None
Output directory.
input_dimention: tuple, default=(6000, 3)
OLoss types for detection, P picking, and S picking respectively.
cnn_blocks: int, default=5
The number of residual blocks of convolutional layers.
lstm_blocks: int, default=2
The number of residual blocks of BiLSTM layers.
activation: str, default='relu'
Activation function used in the hidden layers.
drop_rate: float, default=0.1
Dropout value.
shuffle: bool, default=True
To shuffle the list prior to the training.
label_type: str, default='triangle'
Labeling type. 'gaussian', 'triangle', or 'box'.
normalization_mode: str, default='std'
Mode of normalization for data preprocessing, 'max': maximum amplitude among three components, 'std', standard deviation.
augmentation: bool, default=True
If True, data will be augmented simultaneously during the training.
add_event_r: float, default=0.6
Rate of augmentation for adding a secondary event randomly into the empty part of a trace.
shift_event_r: float, default=0.99
Rate of augmentation for randomly shifting the event within a trace.
add_noise_r: float, defaults=0.3
Rate of augmentation for adding Gaussian noise with different SNR into a trace.
drop_channel_r: float, defaults=0.4
Rate of augmentation for randomly dropping one of the channels.
add_gap_r: float, defaults=0.2
Add an interval with zeros into the waveform representing filled gaps.
coda_ratio: float, defaults=0.4
% of S-P time to extend event/coda envelope past S pick.
scale_amplitude_r: float, defaults=None
Rate of augmentation for randomly scaling the trace.
pre_emphasis: bool, defaults=False
If True, waveforms will be pre-emphasized. Defaults to False.
loss_weights: list, defaults=[0.03, 0.40, 0.58]
Loss weights for detection, P picking, and S picking respectively.
loss_types: list, defaults=['binary_crossentropy', 'binary_crossentropy', 'binary_crossentropy']
Loss types for detection, P picking, and S picking respectively.
train_valid_test_split: list, defaults=[0.85, 0.05, 0.10]
Precentage of data split into the training, validation, and test sets respectively.
mode: str, defaults='generator'
Mode of running. 'generator', or 'preload'.
batch_size: int, default=200
Batch size.
epochs: int, default=200
The number of epochs.
monitor: int, default='val_loss'
The measure used for monitoring.
patience: int, default=12
The number of epochs without any improvement in the monitoring measure to automatically stop the training.
Returns
--------
output_name/models/output_name_.h5: This is where all good models will be saved.
output_name/final_model.h5: This is the full model for the last epoch.
output_name/model_weights.h5: These are the weights for the last model.
output_name/history.npy: Training history.
output_name/X_report.txt: A summary of the parameters used for prediction and performance.
output_name/test.npy: A number list containing the trace names for the test set.
output_name/X_learning_curve_f1.png: The learning curve of Fi-scores.
output_name/X_learning_curve_loss.png: The learning curve of loss.
Notes
--------
'generator' mode is memory efficient and more suitable for machines with fast disks.
'pre_load' mode is faster but requires more memory and it comes with only box labeling.
"""
args = {
"input_hdf5": input_hdf5,
"input_trainset": input_trainset,
"input_validset": input_validset,
"output_name": output_name,
"input_model": input_model,
"hparams_file": hparams_file,
"model_folder": model_folder,
"input_dimention": input_dimention,
"shuffle": shuffle,
"label_type": label_type,
"normalization_mode": normalization_mode,
"augmentation": augmentation,
"add_event_r": add_event_r,
"shift_event_r": shift_event_r,
"add_noise_r": add_noise_r,
"add_gap_r": add_gap_r,
"coda_ratio": coda_ratio,
"drop_channel_r": drop_channel_r,
"scale_amplitude_r": scale_amplitude_r,
"pre_emphasis": pre_emphasis,
"batch_size": batch_size,
"epochs": epochs,
"monitor": monitor,
"patience": patience
}
save_dir, save_models=_make_dir(args['output_name'])
training = np.load(input_trainset)
validation = np.load(input_validset)
start_training = time.time()
params_training = {'file_name': str(args['input_hdf5']),
'dim': args['input_dimention'][0],
'batch_size': 1,
'n_channels': args['input_dimention'][-1],
'shuffle': args['shuffle'],
'norm_mode': args['normalization_mode'],
'label_type': args['label_type'],
'augmentation': args['augmentation'],
'add_event_r': args['add_event_r'],
'add_gap_r': args['add_gap_r'],
'coda_ratio': args['coda_ratio'],
'shift_event_r': args['shift_event_r'],
'add_noise_r': args['add_noise_r'],
'drop_channel_r': args['drop_channel_r'],
'scale_amplitude_r': args['scale_amplitude_r'],
'pre_emphasis': args['pre_emphasis']}
params_validation = {'file_name': str(args['input_hdf5']),
'dim': args['input_dimention'][0],
'batch_size': 1,
'n_channels': args['input_dimention'][-1],
'shuffle': False,
'coda_ratio': args['coda_ratio'],
'norm_mode': args['normalization_mode'],
'label_type': args['label_type'],
'augmentation': False}
model = gMLPmodel.load_from_checkpoint(checkpoint_path=os.path.join(args['model_folder'],args['input_model']),hparams_file=os.path.join(args['model_folder'],args['hparams_file']))
# change into eval mode
model.eval()
model_GNN = Graphmodel(pre_model=model)
training_generator = DataGeneratorMulti(list_IDs=training, **params_training)
validation_generator = DataGeneratorMulti(list_IDs=validation, **params_validation)
# for i in [3,5919,5920,9651,9652]:
# x=training_generator.__getitem__(i)
# print(x[-1])
# print(x[0].shape)
# print(x[1].shape)
# print(x[2].shape)
# print(x[0].max())
# print(torch.sum(x[0]))
# print(torch.sum(x[1]))
# return
checkpoint_callback = ModelCheckpoint(monitor=monitor,dirpath=save_models,save_top_k=3,verbose=True,save_last=True)
early_stopping = EarlyStopping(monitor=monitor,patience=args['patience']) # patience=3
tb_logger = pl_loggers.TensorBoardLogger(save_dir)
trainer = pl.Trainer(precision=16, gpus=1,gradient_clip_val=0.5, accumulate_grad_batches=16, callbacks=[early_stopping, checkpoint_callback],check_val_every_n_epoch=1,profiler="simple",num_sanity_val_steps=0, logger =tb_logger)
train_loader = DataLoader(training_generator, batch_size = args['batch_size'], num_workers=8, pin_memory=True, prefetch_factor=5)
val_loader = DataLoader(validation_generator, batch_size = args['batch_size'], num_workers=8, pin_memory=True, prefetch_factor=5)
print('Started training in generator mode ...')
trainer.fit(model_GNN, train_dataloaders = train_loader, val_dataloaders = val_loader)
end_training = time.time()
print('Finished Training')
def _make_dir(output_name):
"""
Make the output directories.
Parameters
----------
output_name: str
Name of the output directory.
Returns
-------
save_dir: str
Full path to the output directory.
save_models: str
Full path to the model directory.
"""
if output_name == None:
print('Please specify output_name!')
return
else:
save_dir = os.path.join(os.getcwd(), str(output_name))
save_models = os.path.join(save_dir, 'checkpoints')
if os.path.isdir(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_models)
shutil.copyfile('EdgeConv/trainerMulti.py',os.path.join(save_dir,'trainer.py'))
return save_dir, save_models
def conv_block(n_in, n_out, k, stride ,padding, activation, dropout=0):
if activation:
return nn.Sequential(
nn.Conv1d(n_in, n_out, k, stride=stride, padding=padding),
activation,
nn.Dropout(p=dropout),
)
else:
return nn.Conv1d(n_in, n_out, k, stride=stride, padding=padding)
def deconv_block(n_in, n_out, k, stride,padding, output_padding, activation, dropout=0):
if activation:
return nn.Sequential(
nn.ConvTranspose1d(n_in, n_out, k, stride=stride, padding=padding, output_padding=output_padding),
activation,
nn.Dropout(p=dropout),
)
else:
return nn.ConvTranspose1d(n_in, n_out, k, stride=stride, padding=padding, output_padding=output_padding
)
class EdgeConv(MessagePassing):
def __init__(self, in_channels):
super().__init__(aggr='max',node_dim=-3) # "Max" aggregation.
activation= nn.GELU()
dropout=0.1
self.deconv1 = conv_block(in_channels*2, in_channels*2, 3, 1, 1, activation=activation, dropout=0.1)
self.deconv2 = conv_block(in_channels*2, in_channels*2, 3, 1, 1, activation=activation,dropout=0.1)
self.deconv3 = conv_block(in_channels*2, in_channels, 3, 1, 1,activation=activation,dropout=0.1)
self.deconv4 = conv_block(in_channels, in_channels, 3, 1, 1, activation=activation,dropout=0.1)
self.deconv5 = conv_block(in_channels, in_channels, 3, 1, 1, activation=activation,dropout=0.1)
# self.gMLPmessage = nn.ModuleList([Residual(PreNorm(in_channels*2, gMLPBlock(dim = in_channels*2, heads = 1, dim_ff = in_channels*2, seq_len = 47, attn_dim = None, causal = False))) for i in range(1)])
# self.proj_out = nn.Linear(in_channels*2, in_channels)
# self.conv3 = conv_block(in_channels, out_channels, 3, 1, 1, activation)
def forward(self, x, edge_index):
# x has shape [N, in_channels]
# edge_index has shape [2, E]
return self.propagate(edge_index, x=x)
def message(self, x_i, x_j):
# x_i has shape [E, in_channels]
# x_j has shape [E, in_channels]
# tmp = torch.cat([x_i, x_j], dim=2) # tmp has shape [E, 2 * in_channels]
tmp = torch.cat([x_i, x_j], dim=1) # tmp has shape [E, 2 * in_channels]
# tmp = nn.Sequential(*self.gMLPmessage)(tmp)
# return self.proj_out(tmp)
ans = self.deconv5(self.deconv4(self.deconv3(self.deconv2(self.deconv1(tmp)))))
return ans
def encoder(activation, dropout):
return nn.Sequential(
conv_block(3, 8, 3, 1, 1, activation, dropout = dropout),
nn.BatchNorm1d(8),
conv_block(8, 8, 3, 2, 1, activation),
conv_block(8, 8, 3, 1, 1, activation, dropout = dropout),
nn.BatchNorm1d(8),
conv_block(8, 16, 3, 2, 1, activation),
conv_block(16, 16, 3, 1, 1, activation, dropout = dropout),
nn.BatchNorm1d(16),
conv_block(16, 16, 3, 2, 1, activation),
conv_block(16, 16, 3, 1, 1, activation, dropout = dropout),
nn.BatchNorm1d(16),
conv_block(16, 32, 3, 2, 1, activation),
conv_block(32, 32, 3, 1, 1, activation, dropout = dropout),
nn.BatchNorm1d(32),
conv_block(32, 32, 3, 2, 1, activation),
conv_block(32, 32, 3, 1, 1, activation, dropout = dropout),
nn.BatchNorm1d(32),
conv_block(32, 64, 3, 2, 1, activation),
conv_block(64, 64, 3, 1, 1, activation, dropout = dropout),
nn.BatchNorm1d(64),
conv_block(64, 64, 3, 2, 1, activation),
conv_block(64, 64, 3, 1, 1, activation, dropout = dropout),
nn.BatchNorm1d(64)
)
def decoder(activation, dropout):
return nn.Sequential(
deconv_block(64, 64, 3, 2, padding = 1, output_padding=1, activation=activation),
nn.BatchNorm1d(64),
conv_block(64,64,3,1,1,activation, dropout = dropout),
deconv_block(64, 32, 3, 2, padding = 1, output_padding=1, activation=activation),
nn.BatchNorm1d(32),
conv_block(32,32,3,1,1,activation, dropout = dropout),
deconv_block(32, 32, 3, 2, padding = 1, output_padding=0, activation=activation),
nn.BatchNorm1d(32),
conv_block(32,32,3,1,1,activation, dropout = dropout),
deconv_block(32, 16, 3, 2, padding = 1, output_padding=1, activation=activation),
nn.BatchNorm1d(16),
conv_block(16,16,3,1,1,activation, dropout = dropout),
deconv_block(16, 16, 3, 2, padding = 1, output_padding=1, activation=activation),
nn.BatchNorm1d(16),
conv_block(16,16,3,1,1,activation, dropout = dropout),
deconv_block(16, 8, 3, 2, padding = 1, output_padding=1, activation=activation),
nn.BatchNorm1d(8),
conv_block(8,8,3,1,1,activation, dropout = dropout),
deconv_block(8, 8, 3, 2, padding = 1, output_padding=1, activation=activation),
nn.BatchNorm1d(8),
conv_block(8,8,3,1,1,activation, dropout = dropout),
nn.Conv1d(8, 1, 3, stride=1, padding=1)
)
class Graphmodel(pl.LightningModule):
def __init__(
self,
pre_model
):
super().__init__()
self.edgeconv1 = EdgeConv(64)
for name, p in pre_model.named_parameters():
if "encoder" in name or "gMLPlayers" in name :
p.requires_grad = False
print(name)
else:
p.requires_grad = True
self.encoder = pre_model.encoder
self.gMLPlayers = pre_model.gMLPlayers
self.decoderP = pre_model.decoderP
self.decoderS = pre_model.decoderS
self.criterion = pre_model.criterion
self.loss_weights= pre_model.loss_weights
print('loss weight', self.loss_weights)
def forward(self, data):
x, edge_index = data[0], data[2]
x = np.squeeze(x)
edge_index=np.squeeze(edge_index)
x = self.encoder(x)
x.transpose_(1, 2)
x = nn.Sequential(*self.gMLPlayers)(x)
# x = self.edgeconv1(x,edge_index)
x.transpose_(1, 2)
x = self.edgeconv1(x,edge_index)
x_P = self.decoderP(x)
x_S = self.decoderS(x)
return torch.cat((x_P,x_S), 1 )
def training_step(self, batch, batch_idx):
y = batch[1][0]
y = np.squeeze(y)
y_hat = self.forward(batch)
y_hatP = y_hat[:,0,:].reshape(-1,1)
yP = y[:,0,:].reshape(-1,1)
lossP = self.criterion(y_hatP, yP)* self.loss_weights[0]
y_hatS = y_hat[:,1,:].reshape(-1,1)
yS = y[:,1,:].reshape(-1,1)
lossS = self.criterion(y_hatS, yS)* self.loss_weights[1]
loss = lossP+lossS
if np.isnan(loss.detach().cpu()):
logging.debug('This message should go to the log file')
logging.debug('help')
logging.debug(batch[-1])
logging.debug(np.sum(np.array(batch[0].detach().cpu())))
logging.debug(batch_idx)
logging.debug(batch)
logging.debug(np.sum(np.array(y_hatP.detach().cpu())))
logging.debug(np.array(y_hatP.detach().cpu()))
logging.debug(np.sum(np.array(yP.detach().cpu())))
logging.debug(np.sum(np.array(y_hatS.detach().cpu())))
logging.debug(np.sum(np.array(yS.detach().cpu())))
self.log("train_loss", loss, on_epoch=True, prog_bar=True)
self.log("train_lossP", lossP, on_epoch=True, prog_bar=True)
self.log("train_lossS", lossS, on_epoch=True, prog_bar=True)
return {'loss': loss}
def validation_step(self, batch, batch_idx):
y = batch[1][0]
y = np.squeeze(y)
y_hat = self.forward(batch)
y_hatP = y_hat[:,0,:].reshape(-1,1)
yP = y[:,0,:].reshape(-1,1)
lossP = self.criterion(y_hatP, yP)* self.loss_weights[0]
y_hatS = y_hat[:,1,:].reshape(-1,1)
yS = y[:,1,:].reshape(-1,1)
lossS = self.criterion(y_hatS, yS)* self.loss_weights[1]
loss = lossP+lossS
self.log("val_loss", loss, on_epoch=True, prog_bar=True)
self.log("val_lossP", lossP, on_epoch=True, prog_bar=True)
self.log("val_lossS", lossS, on_epoch=True, prog_bar=True)
return {'val_loss': loss}
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=1e-4)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": scheduler,
"monitor": "train_loss",
"frequency": 5000
},
}
# class Graphmodel(pl.LightningModule):
# def __init__(self,
# pre_model
# ):
# super().__init__()
# # activation= nn.GELU()
# # self.save_hyperparameters()
# self.edgeconv1 = EdgeConv(64)
# # for name, p in pre_model.named_parameters():
# # if "deconv6_" in name :
# # print(name)
# # p.requires_grad = True
# # else:
# # p.requires_grad = False
# # repeat pre_model module
# # self.conv0 = pre_model.conv0
# # self.conv1_0 = pre_model.conv1_0
# # self.conv2_0 = pre_model.conv2_0
# # self.conv3_0 = pre_model.conv3_0
# # self.conv4_0 = pre_model.conv4_0
# # self.conv5_0 = pre_model.conv5_0
# # self.conv6_0 = pre_model.conv6_0
# # self.conv7_0 = pre_model.conv7_0
# # self.conv1_1 = pre_model.conv1_1
# # self.conv2_1 = pre_model.conv2_1
# # self.conv3_1 = pre_model.conv3_1
# # self.conv4_1 = pre_model.conv4_1
# # self.conv5_1 = pre_model.conv5_1
# # self.conv6_1 = pre_model.conv6_1
# # self.conv7_1 = pre_model.conv7_1
# # self.deconv0_0 = pre_model.deconv0_0
# # self.deconv1_0 = pre_model.deconv1_0
# # self.deconv2_0 = pre_model.deconv2_0
# # self.deconv3_0 = pre_model.deconv3_0
# # self.deconv4_0 = pre_model.deconv4_0
# # self.deconv5_0 = pre_model.deconv5_0
# # self.deconv6_0 = pre_model.deconv6_0
# # self.deconv0_1 = pre_model.deconv0_1
# # self.deconv1_1 = pre_model.deconv1_1
# # self.deconv2_1 = pre_model.deconv2_1
# # self.deconv3_1 = pre_model.deconv3_1
# # self.deconv4_1 = pre_model.deconv4_1
# # self.deconv4_1 = pre_model.deconv4_1
# # self.deconv5_1 = pre_model.deconv5_1
# # self.deconv6_1 = pre_model.deconv6_1
# # self.deconv6_2 = pre_model.deconv6_2
# # self.batch_norm0 = pre_model.batch_norm0
# # self.batch_norm1 = pre_model.batch_norm1
# # self.batch_norm2 = pre_model.batch_norm2
# # self.batch_norm3 = pre_model.batch_norm3
# # self.batch_norm4 = pre_model.batch_norm4
# # self.batch_norm5 = pre_model.batch_norm5
# # self.batch_norm6 = pre_model.batch_norm6
# # self.batch_norm7 = pre_model.batch_norm7
# # self.batch_norm8 = pre_model.batch_norm8
# # self.batch_norm9 = pre_model.batch_norm9
# # self.batch_norm10 = pre_model.batch_norm10
# # self.batch_norm11 = pre_model.batch_norm11
# # self.batch_norm12 = pre_model.batch_norm12
# # self.batch_norm13 = pre_model.batch_norm13
# # self.batch_norm14 = pre_model.batch_norm14
# # self.gMLPlayers = pre_model.gMLPlayers
# # self.criterion = pre_model.criterion
# # self.loss_weights= pre_model.loss_weights
# def forward(self, data):
# x, edge_index = data[0], data[2]
# x = np.squeeze(x)
# edge_index=np.squeeze(edge_index)
# x0 = self.conv0(x)
# x0 = self.batch_norm0(x0)
# x1 = self.conv1_1(self.batch_norm1(self.conv1_0(x0)))
# x2 = self.conv2_1(self.batch_norm2(self.conv2_0(x1)))
# x3 = self.conv3_1(self.batch_norm3(self.conv3_0(x2)))
# x4 = self.conv4_1(self.batch_norm4(self.conv4_0(x3)))
# x5 = self.conv5_1(self.batch_norm5(self.conv5_0(x4)))
# x6 = self.conv6_1(self.batch_norm6(self.conv6_0(x5)))
# x7 = self.conv7_1(self.batch_norm7(self.conv7_0(x6)))
# x7.transpose_(1, 2)
# # gMLPlayers=self.gMLPlayers if not self.training else dropout_layers(self.gMLPlayers, self.prob_survival)
# x7 = nn.Sequential(*self.gMLPlayers)(x7)
# x_exchange = self.edgeconv1(x7,edge_index)
# x7 = x7+x_exchange
# x7.transpose_(1, 2)
# # exchange info
# x8 = torch.cat((self.batch_norm8(self.deconv0_0(x7)), x6), 1)
# x8 = self.deconv0_1(x8)
# x9 = torch.cat((self.batch_norm9(self.deconv1_0(x8)), x5), 1)
# x9 = self.deconv1_1(x9)
# x10 = torch.cat((self.batch_norm10(self.deconv2_0(x9)), x4), 1)
# x10 = self.deconv2_1(x10)
# x11 = torch.cat((self.batch_norm11(self.deconv3_0(x10)), x3), 1)
# x11 = self.deconv3_1(x11)
# x12 = torch.cat((self.batch_norm12(self.deconv4_0(x11)), x2), 1)
# x12 = self.deconv4_1(x12)
# x13 = torch.cat((self.batch_norm13(self.deconv5_0(x12)), x1), 1)
# x13 = self.deconv5_1(x13)
# # x13.transpose_(1, 2)
# # x13.transpose_(1, 2)
# x14 = torch.cat((self.batch_norm14(self.deconv6_0(x13)), x0), 1)
# x14 = self.deconv6_1(x14)
# x14 = self.deconv6_2(x14)
# return x14
# def training_step(self, batch, batch_idx):
# # training_step defined the train loop.
# # It is independent of forward
# y = batch[1][0]
# y = np.squeeze(y)
# y_hat = self.forward(batch)
# # y_hat2 = y_hat.view(-1,1)
# # y2 = y.view(-1,1)
# # loss = self.criterion(y_hat2, y2)
# y_hatD = y_hat[:,0,:].reshape(-1,1)
# yD = y[:,0,:].reshape(-1,1)
# lossD = self.criterion(y_hatD, yD)* self.loss_weights[0]
# y_hatP = y_hat[:,1,:].reshape(-1,1)
# yP = y[:,1,:].reshape(-1,1)
# lossP = self.criterion(y_hatP, yP)* self.loss_weights[1]
# y_hatS = y_hat[:,2,:].reshape(-1,1)
# yS = y[:,2,:].reshape(-1,1)
# lossS = self.criterion(y_hatS, yS)* self.loss_weights[2]
# loss = lossD+lossP+lossS
# self.log("train_loss", loss, on_epoch=True, prog_bar=True)
# self.log("train_lossD", lossD, on_epoch=True, prog_bar=True)
# self.log("train_lossP", lossP, on_epoch=True, prog_bar=True)
# self.log("train_lossS", lossS, on_epoch=True, prog_bar=True)
# return loss
# def validation_step(self, batch, batch_idx):
# y = batch[1][0]
# y = np.squeeze(y)
# y_hat = self.forward(batch)
# # y_hat2 = y_hat.view(-1,1)
# # y2 = y.view(-1,1)
# # loss = self.criterion(y_hat2, y2)
# y_hatD = y_hat[:,0,:].reshape(-1,1)
# yD = y[:,0,:].reshape(-1,1)
# lossD = self.criterion(y_hatD, yD)* self.loss_weights[0]
# y_hatP = y_hat[:,1,:].reshape(-1,1)
# yP = y[:,1,:].reshape(-1,1)
# lossP = self.criterion(y_hatP, yP)* self.loss_weights[1]
# y_hatS = y_hat[:,2,:].reshape(-1,1)
# yS = y[:,2,:].reshape(-1,1)
# lossS = self.criterion(y_hatS, yS) *self.loss_weights[2]
# loss = lossD+lossP+lossS
# self.log("val_loss", loss, on_epoch=True, prog_bar=True)
# # self.log("val_lossD", lossD, on_epoch=True, prog_bar=True)
# self.log("val_lossP", lossP, on_epoch=True, prog_bar=True)
# self.log("val_lossS", lossS, on_epoch=True, prog_bar=True)
# return {'val_loss': loss}
# def configure_optimizers(self):
# optimizer = torch.optim.Adam(self.parameters(), lr=1e-4)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)
# return {
# "optimizer": optimizer,
# "lr_scheduler": {
# "scheduler": scheduler,
# "monitor": "val_loss",
# "frequency": 1
# },
# }
| [
"torch.nn.Dropout",
"numpy.load",
"pytorch_lightning.Trainer",
"gMLPhase.gMLP_torch.conv_block",
"torch.cat",
"logging.Formatter",
"shutil.rmtree",
"os.path.join",
"logging.FileHandler",
"torch.utils.data.DataLoader",
"torch.nn.Conv1d",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"pytorch_... | [((896, 915), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (913, 915), False, 'import logging\n'), ((978, 1025), 'logging.FileHandler', 'logging.FileHandler', (['"""debug5.log"""', '"""w"""', '"""utf-8"""'], {}), "('debug5.log', 'w', 'utf-8')\n", (997, 1025), False, 'import logging\n'), ((1062, 1103), 'logging.Formatter', 'logging.Formatter', (['"""%(name)s %(message)s"""'], {}), "('%(name)s %(message)s')\n", (1079, 1103), False, 'import logging\n'), ((7381, 7404), 'numpy.load', 'np.load', (['input_trainset'], {}), '(input_trainset)\n', (7388, 7404), True, 'import numpy as np\n'), ((7423, 7446), 'numpy.load', 'np.load', (['input_validset'], {}), '(input_validset)\n', (7430, 7446), True, 'import numpy as np\n'), ((7469, 7480), 'time.time', 'time.time', ([], {}), '()\n', (7478, 7480), False, 'import time\n'), ((9275, 9331), 'EdgeConv.DataGeneratorMulti.DataGeneratorMulti', 'DataGeneratorMulti', ([], {'list_IDs': 'training'}), '(list_IDs=training, **params_training)\n', (9293, 9331), False, 'from EdgeConv.DataGeneratorMulti import DataGeneratorMulti\n'), ((9360, 9420), 'EdgeConv.DataGeneratorMulti.DataGeneratorMulti', 'DataGeneratorMulti', ([], {'list_IDs': 'validation'}), '(list_IDs=validation, **params_validation)\n', (9378, 9420), False, 'from EdgeConv.DataGeneratorMulti import DataGeneratorMulti\n'), ((9773, 9875), 'pytorch_lightning.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'monitor': 'monitor', 'dirpath': 'save_models', 'save_top_k': '(3)', 'verbose': '(True)', 'save_last': '(True)'}), '(monitor=monitor, dirpath=save_models, save_top_k=3, verbose\n =True, save_last=True)\n', (9788, 9875), False, 'from pytorch_lightning.callbacks import ModelCheckpoint\n'), ((9889, 9946), 'pytorch_lightning.callbacks.early_stopping.EarlyStopping', 'EarlyStopping', ([], {'monitor': 'monitor', 'patience': "args['patience']"}), "(monitor=monitor, patience=args['patience'])\n", (9902, 9946), False, 'from pytorch_lightning.callbacks.early_stopping import EarlyStopping\n'), ((9976, 10014), 'pytorch_lightning.loggers.TensorBoardLogger', 'pl_loggers.TensorBoardLogger', (['save_dir'], {}), '(save_dir)\n', (10004, 10014), True, 'from pytorch_lightning import loggers as pl_loggers\n'), ((10032, 10264), 'pytorch_lightning.Trainer', 'pl.Trainer', ([], {'precision': '(16)', 'gpus': '(1)', 'gradient_clip_val': '(0.5)', 'accumulate_grad_batches': '(16)', 'callbacks': '[early_stopping, checkpoint_callback]', 'check_val_every_n_epoch': '(1)', 'profiler': '"""simple"""', 'num_sanity_val_steps': '(0)', 'logger': 'tb_logger'}), "(precision=16, gpus=1, gradient_clip_val=0.5,\n accumulate_grad_batches=16, callbacks=[early_stopping,\n checkpoint_callback], check_val_every_n_epoch=1, profiler='simple',\n num_sanity_val_steps=0, logger=tb_logger)\n", (10042, 10264), True, 'import pytorch_lightning as pl\n'), ((10273, 10389), 'torch.utils.data.DataLoader', 'DataLoader', (['training_generator'], {'batch_size': "args['batch_size']", 'num_workers': '(8)', 'pin_memory': '(True)', 'prefetch_factor': '(5)'}), "(training_generator, batch_size=args['batch_size'], num_workers=8,\n pin_memory=True, prefetch_factor=5)\n", (10283, 10389), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((10410, 10529), 'torch.utils.data.DataLoader', 'DataLoader', (['validation_generator'], {'batch_size': "args['batch_size']", 'num_workers': '(8)', 'pin_memory': '(True)', 'prefetch_factor': '(5)'}), "(validation_generator, batch_size=args['batch_size'], num_workers\n =8, pin_memory=True, prefetch_factor=5)\n", (10420, 10529), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((10699, 10710), 'time.time', 'time.time', ([], {}), '()\n', (10708, 10710), False, 'import time\n'), ((11356, 11393), 'os.path.join', 'os.path.join', (['save_dir', '"""checkpoints"""'], {}), "(save_dir, 'checkpoints')\n", (11368, 11393), False, 'import os\n'), ((11412, 11435), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (11425, 11435), False, 'import os\n'), ((11485, 11509), 'os.makedirs', 'os.makedirs', (['save_models'], {}), '(save_models)\n', (11496, 11509), False, 'import os\n'), ((11940, 11997), 'torch.nn.Conv1d', 'nn.Conv1d', (['n_in', 'n_out', 'k'], {'stride': 'stride', 'padding': 'padding'}), '(n_in, n_out, k, stride=stride, padding=padding)\n', (11949, 11997), False, 'from torch import nn\n'), ((12370, 12471), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['n_in', 'n_out', 'k'], {'stride': 'stride', 'padding': 'padding', 'output_padding': 'output_padding'}), '(n_in, n_out, k, stride=stride, padding=padding,\n output_padding=output_padding)\n', (12388, 12471), False, 'from torch import nn\n'), ((12644, 12653), 'torch.nn.GELU', 'nn.GELU', ([], {}), '()\n', (12651, 12653), False, 'from torch import nn\n'), ((12704, 12797), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(in_channels * 2)', '(in_channels * 2)', '(3)', '(1)', '(1)'], {'activation': 'activation', 'dropout': '(0.1)'}), '(in_channels * 2, in_channels * 2, 3, 1, 1, activation=activation,\n dropout=0.1)\n', (12714, 12797), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((12814, 12907), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(in_channels * 2)', '(in_channels * 2)', '(3)', '(1)', '(1)'], {'activation': 'activation', 'dropout': '(0.1)'}), '(in_channels * 2, in_channels * 2, 3, 1, 1, activation=activation,\n dropout=0.1)\n', (12824, 12907), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((12923, 13012), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(in_channels * 2)', 'in_channels', '(3)', '(1)', '(1)'], {'activation': 'activation', 'dropout': '(0.1)'}), '(in_channels * 2, in_channels, 3, 1, 1, activation=activation,\n dropout=0.1)\n', (12933, 13012), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((13029, 13114), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['in_channels', 'in_channels', '(3)', '(1)', '(1)'], {'activation': 'activation', 'dropout': '(0.1)'}), '(in_channels, in_channels, 3, 1, 1, activation=activation,\n dropout=0.1)\n', (13039, 13114), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((13135, 13220), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['in_channels', 'in_channels', '(3)', '(1)', '(1)'], {'activation': 'activation', 'dropout': '(0.1)'}), '(in_channels, in_channels, 3, 1, 1, activation=activation,\n dropout=0.1)\n', (13145, 13220), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((13983, 14011), 'torch.cat', 'torch.cat', (['[x_i, x_j]'], {'dim': '(1)'}), '([x_i, x_j], dim=1)\n', (13992, 14011), False, 'import torch\n'), ((14348, 14402), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(3)', '(8)', '(3)', '(1)', '(1)', 'activation'], {'dropout': 'dropout'}), '(3, 8, 3, 1, 1, activation, dropout=dropout)\n', (14358, 14402), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((14415, 14432), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(8)'], {}), '(8)\n', (14429, 14432), False, 'from torch import nn\n'), ((14443, 14480), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(8)', '(8)', '(3)', '(2)', '(1)', 'activation'], {}), '(8, 8, 3, 2, 1, activation)\n', (14453, 14480), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((14491, 14545), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(8)', '(8)', '(3)', '(1)', '(1)', 'activation'], {'dropout': 'dropout'}), '(8, 8, 3, 1, 1, activation, dropout=dropout)\n', (14501, 14545), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((14558, 14575), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(8)'], {}), '(8)\n', (14572, 14575), False, 'from torch import nn\n'), ((14586, 14624), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(8)', '(16)', '(3)', '(2)', '(1)', 'activation'], {}), '(8, 16, 3, 2, 1, activation)\n', (14596, 14624), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((14635, 14691), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(16)', '(16)', '(3)', '(1)', '(1)', 'activation'], {'dropout': 'dropout'}), '(16, 16, 3, 1, 1, activation, dropout=dropout)\n', (14645, 14691), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((14704, 14722), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(16)'], {}), '(16)\n', (14718, 14722), False, 'from torch import nn\n'), ((14733, 14772), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(16)', '(16)', '(3)', '(2)', '(1)', 'activation'], {}), '(16, 16, 3, 2, 1, activation)\n', (14743, 14772), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((14783, 14839), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(16)', '(16)', '(3)', '(1)', '(1)', 'activation'], {'dropout': 'dropout'}), '(16, 16, 3, 1, 1, activation, dropout=dropout)\n', (14793, 14839), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((14852, 14870), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(16)'], {}), '(16)\n', (14866, 14870), False, 'from torch import nn\n'), ((14881, 14920), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(16)', '(32)', '(3)', '(2)', '(1)', 'activation'], {}), '(16, 32, 3, 2, 1, activation)\n', (14891, 14920), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((14931, 14987), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(32)', '(32)', '(3)', '(1)', '(1)', 'activation'], {'dropout': 'dropout'}), '(32, 32, 3, 1, 1, activation, dropout=dropout)\n', (14941, 14987), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((15000, 15018), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(32)'], {}), '(32)\n', (15014, 15018), False, 'from torch import nn\n'), ((15029, 15068), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(32)', '(32)', '(3)', '(2)', '(1)', 'activation'], {}), '(32, 32, 3, 2, 1, activation)\n', (15039, 15068), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((15079, 15135), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(32)', '(32)', '(3)', '(1)', '(1)', 'activation'], {'dropout': 'dropout'}), '(32, 32, 3, 1, 1, activation, dropout=dropout)\n', (15089, 15135), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((15148, 15166), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(32)'], {}), '(32)\n', (15162, 15166), False, 'from torch import nn\n'), ((15181, 15220), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(32)', '(64)', '(3)', '(2)', '(1)', 'activation'], {}), '(32, 64, 3, 2, 1, activation)\n', (15191, 15220), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((15231, 15287), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(64)', '(64)', '(3)', '(1)', '(1)', 'activation'], {'dropout': 'dropout'}), '(64, 64, 3, 1, 1, activation, dropout=dropout)\n', (15241, 15287), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((15300, 15318), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (15314, 15318), False, 'from torch import nn\n'), ((15329, 15368), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(64)', '(64)', '(3)', '(2)', '(1)', 'activation'], {}), '(64, 64, 3, 2, 1, activation)\n', (15339, 15368), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((15379, 15435), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(64)', '(64)', '(3)', '(1)', '(1)', 'activation'], {'dropout': 'dropout'}), '(64, 64, 3, 1, 1, activation, dropout=dropout)\n', (15389, 15435), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((15448, 15466), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (15462, 15466), False, 'from torch import nn\n'), ((15563, 15641), 'gMLPhase.gMLP_torch.deconv_block', 'deconv_block', (['(64)', '(64)', '(3)', '(2)'], {'padding': '(1)', 'output_padding': '(1)', 'activation': 'activation'}), '(64, 64, 3, 2, padding=1, output_padding=1, activation=activation)\n', (15575, 15641), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((15654, 15672), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(64)'], {}), '(64)\n', (15668, 15672), False, 'from torch import nn\n'), ((15683, 15739), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(64)', '(64)', '(3)', '(1)', '(1)', 'activation'], {'dropout': 'dropout'}), '(64, 64, 3, 1, 1, activation, dropout=dropout)\n', (15693, 15739), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((15747, 15825), 'gMLPhase.gMLP_torch.deconv_block', 'deconv_block', (['(64)', '(32)', '(3)', '(2)'], {'padding': '(1)', 'output_padding': '(1)', 'activation': 'activation'}), '(64, 32, 3, 2, padding=1, output_padding=1, activation=activation)\n', (15759, 15825), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((15838, 15856), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(32)'], {}), '(32)\n', (15852, 15856), False, 'from torch import nn\n'), ((15867, 15923), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(32)', '(32)', '(3)', '(1)', '(1)', 'activation'], {'dropout': 'dropout'}), '(32, 32, 3, 1, 1, activation, dropout=dropout)\n', (15877, 15923), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((15931, 16009), 'gMLPhase.gMLP_torch.deconv_block', 'deconv_block', (['(32)', '(32)', '(3)', '(2)'], {'padding': '(1)', 'output_padding': '(0)', 'activation': 'activation'}), '(32, 32, 3, 2, padding=1, output_padding=0, activation=activation)\n', (15943, 16009), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((16022, 16040), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(32)'], {}), '(32)\n', (16036, 16040), False, 'from torch import nn\n'), ((16051, 16107), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(32)', '(32)', '(3)', '(1)', '(1)', 'activation'], {'dropout': 'dropout'}), '(32, 32, 3, 1, 1, activation, dropout=dropout)\n', (16061, 16107), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((16115, 16193), 'gMLPhase.gMLP_torch.deconv_block', 'deconv_block', (['(32)', '(16)', '(3)', '(2)'], {'padding': '(1)', 'output_padding': '(1)', 'activation': 'activation'}), '(32, 16, 3, 2, padding=1, output_padding=1, activation=activation)\n', (16127, 16193), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((16206, 16224), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(16)'], {}), '(16)\n', (16220, 16224), False, 'from torch import nn\n'), ((16235, 16291), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(16)', '(16)', '(3)', '(1)', '(1)', 'activation'], {'dropout': 'dropout'}), '(16, 16, 3, 1, 1, activation, dropout=dropout)\n', (16245, 16291), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((16299, 16377), 'gMLPhase.gMLP_torch.deconv_block', 'deconv_block', (['(16)', '(16)', '(3)', '(2)'], {'padding': '(1)', 'output_padding': '(1)', 'activation': 'activation'}), '(16, 16, 3, 2, padding=1, output_padding=1, activation=activation)\n', (16311, 16377), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((16390, 16408), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(16)'], {}), '(16)\n', (16404, 16408), False, 'from torch import nn\n'), ((16419, 16475), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(16)', '(16)', '(3)', '(1)', '(1)', 'activation'], {'dropout': 'dropout'}), '(16, 16, 3, 1, 1, activation, dropout=dropout)\n', (16429, 16475), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((16483, 16560), 'gMLPhase.gMLP_torch.deconv_block', 'deconv_block', (['(16)', '(8)', '(3)', '(2)'], {'padding': '(1)', 'output_padding': '(1)', 'activation': 'activation'}), '(16, 8, 3, 2, padding=1, output_padding=1, activation=activation)\n', (16495, 16560), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((16573, 16590), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(8)'], {}), '(8)\n', (16587, 16590), False, 'from torch import nn\n'), ((16601, 16655), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(8)', '(8)', '(3)', '(1)', '(1)', 'activation'], {'dropout': 'dropout'}), '(8, 8, 3, 1, 1, activation, dropout=dropout)\n', (16611, 16655), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((16663, 16739), 'gMLPhase.gMLP_torch.deconv_block', 'deconv_block', (['(8)', '(8)', '(3)', '(2)'], {'padding': '(1)', 'output_padding': '(1)', 'activation': 'activation'}), '(8, 8, 3, 2, padding=1, output_padding=1, activation=activation)\n', (16675, 16739), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((16752, 16769), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(8)'], {}), '(8)\n', (16766, 16769), False, 'from torch import nn\n'), ((16780, 16834), 'gMLPhase.gMLP_torch.conv_block', 'conv_block', (['(8)', '(8)', '(3)', '(1)', '(1)', 'activation'], {'dropout': 'dropout'}), '(8, 8, 3, 1, 1, activation, dropout=dropout)\n', (16790, 16834), False, 'from gMLPhase.gMLP_torch import gMLPBlock, Residual, PreNorm, conv_block, deconv_block\n'), ((16842, 16881), 'torch.nn.Conv1d', 'nn.Conv1d', (['(8)', '(1)', '(3)'], {'stride': '(1)', 'padding': '(1)'}), '(8, 1, 3, stride=1, padding=1)\n', (16851, 16881), False, 'from torch import nn\n'), ((17793, 17806), 'numpy.squeeze', 'np.squeeze', (['x'], {}), '(x)\n', (17803, 17806), True, 'import numpy as np\n'), ((17827, 17849), 'numpy.squeeze', 'np.squeeze', (['edge_index'], {}), '(edge_index)\n', (17837, 17849), True, 'import numpy as np\n'), ((18175, 18199), 'torch.cat', 'torch.cat', (['(x_P, x_S)', '(1)'], {}), '((x_P, x_S), 1)\n', (18184, 18199), False, 'import torch\n'), ((18310, 18323), 'numpy.squeeze', 'np.squeeze', (['y'], {}), '(y)\n', (18320, 18323), True, 'import numpy as np\n'), ((19731, 19744), 'numpy.squeeze', 'np.squeeze', (['y'], {}), '(y)\n', (19741, 19744), True, 'import numpy as np\n'), ((20521, 20574), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {}), '(optimizer)\n', (20563, 20574), False, 'import torch\n'), ((9029, 9084), 'os.path.join', 'os.path.join', (["args['model_folder']", "args['input_model']"], {}), "(args['model_folder'], args['input_model'])\n", (9041, 9084), False, 'import os\n'), ((9097, 9153), 'os.path.join', 'os.path.join', (["args['model_folder']", "args['hparams_file']"], {}), "(args['model_folder'], args['hparams_file'])\n", (9109, 9153), False, 'import os\n'), ((11302, 11313), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11311, 11313), False, 'import os\n'), ((11450, 11473), 'shutil.rmtree', 'shutil.rmtree', (['save_dir'], {}), '(save_dir)\n', (11463, 11473), False, 'import shutil\n'), ((11562, 11598), 'os.path.join', 'os.path.join', (['save_dir', '"""trainer.py"""'], {}), "(save_dir, 'trainer.py')\n", (11574, 11598), False, 'import os\n'), ((11780, 11837), 'torch.nn.Conv1d', 'nn.Conv1d', (['n_in', 'n_out', 'k'], {'stride': 'stride', 'padding': 'padding'}), '(n_in, n_out, k, stride=stride, padding=padding)\n', (11789, 11837), False, 'from torch import nn\n'), ((11877, 11898), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (11887, 11898), False, 'from torch import nn\n'), ((12154, 12255), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['n_in', 'n_out', 'k'], {'stride': 'stride', 'padding': 'padding', 'output_padding': 'output_padding'}), '(n_in, n_out, k, stride=stride, padding=padding,\n output_padding=output_padding)\n', (12172, 12255), False, 'from torch import nn\n'), ((12291, 12312), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (12301, 12312), False, 'from torch import nn\n'), ((17924, 17955), 'torch.nn.Sequential', 'nn.Sequential', (['*self.gMLPlayers'], {}), '(*self.gMLPlayers)\n', (17937, 17955), False, 'from torch import nn\n'), ((18777, 18832), 'logging.debug', 'logging.debug', (['"""This message should go to the log file"""'], {}), "('This message should go to the log file')\n", (18790, 18832), False, 'import logging\n'), ((18846, 18867), 'logging.debug', 'logging.debug', (['"""help"""'], {}), "('help')\n", (18859, 18867), False, 'import logging\n'), ((18881, 18905), 'logging.debug', 'logging.debug', (['batch[-1]'], {}), '(batch[-1])\n', (18894, 18905), False, 'import logging\n'), ((18989, 19013), 'logging.debug', 'logging.debug', (['batch_idx'], {}), '(batch_idx)\n', (19002, 19013), False, 'import logging\n'), ((19027, 19047), 'logging.debug', 'logging.debug', (['batch'], {}), '(batch)\n', (19040, 19047), False, 'import logging\n')] |
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import torch
import numpy as np
device = torch.device('cpu')
if(torch.cuda.is_available()):
device = torch.device('cuda:0')
class BirdDataset(Dataset):
def __init__(self,data,label,logprob,reward):
self.data = data
self.label=label
self.logprob=logprob
self.reward=reward
def __getitem__(self, index):
state=self.data[index]
action=self.label[index]
logprob=self.logprob[index]
reward=self.reward[index]
sample={'state':state,'action':action,'logprob':logprob,'reward':reward}
return sample
def __len__(self):
return len(self.data)
class BirdDatasetDQN(Dataset):
def __init__(self,buffer,memory_count,capacity):
self.state = buffer.states
self.action=buffer.actions
self.next_state=buffer.next_states
self.reward=np.array(buffer.rewards)
self.reward=(self.reward[:memory_count]) / (np.std(self.reward[:memory_count]) + 1e-7)
self.is_terminate=buffer.is_terminate
self.img_transform = transforms.Compose([
#transforms.Resize((112,112)),
transforms.ToTensor(),
#transforms.Normalize(*mean_std),
])
self.memory_count=min(memory_count,capacity)
def __getitem__(self, index):
state=self.img_transform(self.state[index])
action=self.action[index]
next_state=self.img_transform(self.next_state[index])
reward=self.reward[index]
is_terminate=self.is_terminate[index]
sample={'state':state,'action':action,'next_state':next_state,'reward':reward,'is_terminate':is_terminate}
return sample
def __len__(self):
return self.memory_count | [
"numpy.std",
"numpy.array",
"torch.cuda.is_available",
"torch.device",
"torchvision.transforms.ToTensor"
] | [((122, 141), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (134, 141), False, 'import torch\n'), ((145, 170), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (168, 170), False, 'import torch\n'), ((187, 209), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (199, 209), False, 'import torch\n'), ((942, 966), 'numpy.array', 'np.array', (['buffer.rewards'], {}), '(buffer.rewards)\n', (950, 966), True, 'import numpy as np\n'), ((1019, 1053), 'numpy.std', 'np.std', (['self.reward[:memory_count]'], {}), '(self.reward[:memory_count])\n', (1025, 1053), True, 'import numpy as np\n'), ((1213, 1234), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1232, 1234), True, 'import torchvision.transforms as transforms\n')] |
from gs_orth import *
import numpy as np
def qr_decomposition_solver(A,B):
"""
function that takes arrays A,B of A.x = B and returns list x (top to bottom)
this function does this using QR decomposition with Gram-Schmidt orthogonalization
followed by back substitution
Inputs: Lists A,B (converted to arrays in the function)
Outputs: the list x
"""
# perform Gram-Schmidt orthogonalization
matrix = np.array(A)
columns,on_basis_vec = gs_orthogonalization(matrix)
# columns store the value of columns of matrix A
# on_basis_vec stores the value of the orthonormal basis vectors
# these vectors are obtained using Gram-Schmidt orthogonalization
# perform QR decomposition
# calculating Q
Q_t = []
for vector in on_basis_vec:
# adding the orthonormal basis vectors as rows
Q_t.append(vector.tolist())
Q_t = np.array(Q_t)
# to get orthonormalbasis vectors as columns, taking transpose
Q = Q_t.T
"""
# checking is Q is orthogonalization
check = Q_t.dot(Q) #multiplying Q_transpose and Q to check determinant
print( np.linalg.det(check))
# output should be 1
"""
# calculating R
# initializing R matrix
n_cols = matrix.shape[1]
R = np.zeros((n_cols,n_cols))
# populating the R matrix
for i in range(n_cols):
for j in range(i,n_cols):
R[i][j] = np.dot(columns[j],on_basis_vec[i])
# QR decomposition done
"""
we know that : A.x = B is reduced using QR decomposition to : R.x = Q_t.B
"""
RHS = Q_t.dot(np.array(B))
# using back substitution to solve for co-efficients
b = []
b.append(RHS[n_cols-1]/R[n_cols-1][n_cols-1])
for i in range(n_cols-2,-1,-1):
val = RHS[i]
for j in range(0,n_cols-1-i):
val = val - (R[i][n_cols-1-j])*(b[j])
b.append(val/R[i][i])
# since we are using back substitution, we found bn first, then b_(n-1) upto b_0
# thus the list b contains the coefficients in the reverse order
# therefore, reversing the list to get the indexes to match their coefficient number
b.reverse()
return b
| [
"numpy.zeros",
"numpy.dot",
"numpy.array"
] | [((440, 451), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (448, 451), True, 'import numpy as np\n'), ((898, 911), 'numpy.array', 'np.array', (['Q_t'], {}), '(Q_t)\n', (906, 911), True, 'import numpy as np\n'), ((1269, 1295), 'numpy.zeros', 'np.zeros', (['(n_cols, n_cols)'], {}), '((n_cols, n_cols))\n', (1277, 1295), True, 'import numpy as np\n'), ((1587, 1598), 'numpy.array', 'np.array', (['B'], {}), '(B)\n', (1595, 1598), True, 'import numpy as np\n'), ((1410, 1445), 'numpy.dot', 'np.dot', (['columns[j]', 'on_basis_vec[i]'], {}), '(columns[j], on_basis_vec[i])\n', (1416, 1445), True, 'import numpy as np\n')] |
import cv2
import numpy as np
import pyautogui as gui
from time import time
loop_time = time()
while(True):
screenshot = gui.screenshot()
# re-shape into format opencv understands
screenshot = np.array(screenshot)
# remember opencv uses BGR so we have to convert
screenshot = cv2.cvtColor(screenshot, cv2.COLOR_RGB2BGR)
cv2.imshow("Screenshot", screenshot)
print('FPS {}'.format(1 / (time() - loop_time)))
loop_time = time()
# press 'q' with the output window focused to exit.
# waits 1 ms every loop to process key presses
if cv2.waitKey(1) == ord('q'):
cv2.destroyAllWindows()
break | [
"cv2.cvtColor",
"cv2.waitKey",
"cv2.destroyAllWindows",
"pyautogui.screenshot",
"time.time",
"numpy.array",
"cv2.imshow"
] | [((89, 95), 'time.time', 'time', ([], {}), '()\n', (93, 95), False, 'from time import time\n'), ((128, 144), 'pyautogui.screenshot', 'gui.screenshot', ([], {}), '()\n', (142, 144), True, 'import pyautogui as gui\n'), ((208, 228), 'numpy.array', 'np.array', (['screenshot'], {}), '(screenshot)\n', (216, 228), True, 'import numpy as np\n'), ((300, 343), 'cv2.cvtColor', 'cv2.cvtColor', (['screenshot', 'cv2.COLOR_RGB2BGR'], {}), '(screenshot, cv2.COLOR_RGB2BGR)\n', (312, 343), False, 'import cv2\n'), ((349, 385), 'cv2.imshow', 'cv2.imshow', (['"""Screenshot"""', 'screenshot'], {}), "('Screenshot', screenshot)\n", (359, 385), False, 'import cv2\n'), ((456, 462), 'time.time', 'time', ([], {}), '()\n', (460, 462), False, 'from time import time\n'), ((578, 592), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (589, 592), False, 'import cv2\n'), ((614, 637), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (635, 637), False, 'import cv2\n'), ((418, 424), 'time.time', 'time', ([], {}), '()\n', (422, 424), False, 'from time import time\n')] |
import unittest
import numpy as np
from ShutTUM import Interpolation
class TestInterpolation(unittest.TestCase):
def setUp(self):
self.linear = Interpolation.linear
self.slerp = Interpolation.slerp
self.cubic = Interpolation.cubic
self.lower = np.array((0,2,10))
self.upper = np.array((1,4,110))
self.middle = np.array((.5, 3, 60))
def test_linear_for_lower_bound(self):
self.assertEqual(self.linear(0, 1, 0), 0)
self.assertEqual(self.linear(2, 180, 0), 2)
self.assertEqual(self.linear(self.lower, self.upper, 0).tolist(), self.lower.tolist())
def test_linear_for_upper_bound(self):
self.assertEqual(self.linear(0, 1, 1), 1)
self.assertEqual(self.linear(2, 180, 1), 180)
self.assertListEqual(self.linear(self.lower, self.upper, 1).tolist(), self.upper.tolist())
def test_linear_50_percent(self):
self.assertEqual(self.linear(0, 1, 0.5), 0.5)
self.assertEqual(self.linear(2, 182, 0.5), 92)
self.assertListEqual(self.linear(self.lower, self.upper, .5).tolist(), self.middle.tolist())
def test_cubic_interpolation_with_borders(self):
# see http://www.paulinternet.nl/?page=bicubic
a0, a, b, b0 = 2, 4, 2, 3
def f(x): return 7./2.*x**3 - 11./2.*x**2 + 4
for x in np.linspace(0,1, num=10):
exp = f(x)
act = self.cubic(a,b, x, a0, b0)
self.assertAlmostEqual(exp, act)
def test_cubic_interpolation_without_borders(self):
a, b = 0, 1
def f(x): return (a-b)*x**3 + 1.5*(b-a)*x**2 + (.5*b+.5*a)*x + a
for x in np.linspace(0,1, num=16):
exp = f(x)
act = self.cubic(a,b,x)
self.assertAlmostEqual(exp, act)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.array",
"numpy.linspace"
] | [((1816, 1831), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1829, 1831), False, 'import unittest\n'), ((286, 306), 'numpy.array', 'np.array', (['(0, 2, 10)'], {}), '((0, 2, 10))\n', (294, 306), True, 'import numpy as np\n'), ((327, 348), 'numpy.array', 'np.array', (['(1, 4, 110)'], {}), '((1, 4, 110))\n', (335, 348), True, 'import numpy as np\n'), ((369, 391), 'numpy.array', 'np.array', (['(0.5, 3, 60)'], {}), '((0.5, 3, 60))\n', (377, 391), True, 'import numpy as np\n'), ((1345, 1370), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': '(10)'}), '(0, 1, num=10)\n', (1356, 1370), True, 'import numpy as np\n'), ((1653, 1678), 'numpy.linspace', 'np.linspace', (['(0)', '(1)'], {'num': '(16)'}), '(0, 1, num=16)\n', (1664, 1678), True, 'import numpy as np\n')] |
import logging
import numpy as np
import satmeta.s2.meta as s2meta
import satmeta.s2.angles_2d as s2angles
logger = logging.getLogger(__name__)
def toa_reflectance_to_radiance(dndata, mtdFile, mtdFile_tile, band_ids, dst_transform=None):
"""Method taken from the bottom of http://s2tbx.telespazio-vega.de/sen2three/html/r2rusage.html
Parameters
----------
dndata : ndarray shape(nbands, ny, nx)
input data
mtdFile : str
path to metadata file
mtdFile_tile : str
path to granule metadata file
band_ids : sequence of int
band IDs (0-based index of bands in img)
Note
----
Assumes a L1C product which contains TOA reflectance:
https://sentinel.esa.int/web/sentinel/user-guides/sentinel-2-msi/product-types
"""
if mtdFile_tile is None:
raise ValueError('Tile metadata file required!')
meta = s2meta.parse_metadata(mtdFile)
rc = meta['reflectance_conversion']
qv = meta['quantification_value']
irradiance = np.array(meta['irradiance_values'])[band_ids]
dst_shape = dndata.shape[1:]
if dst_transform is not None:
# use per-pixel interpolated values
angles = s2angles.parse_resample_angles(
mtdFile_tile,
dst_transform=dst_transform,
dst_shape=dst_shape,
angles=['Sun'],
angle_dirs=['Zenith'],
resample_method='rasterio'
)
sun_zenith = angles['Sun']['Zenith']
else:
# use mean values
gmeta = s2meta.parse_granule_metadata(mtdFile_tile)
sun_zenith = gmeta['sun_zenith']
# Convert to radiance
radiance = dndata.astype('f4')
radiance /= rc
radiance *= irradiance[:, None, None]
radiance *= np.cos(np.radians(sun_zenith))
radiance /= (np.pi * qv)
return radiance
| [
"numpy.radians",
"satmeta.s2.meta.parse_metadata",
"logging.getLogger",
"numpy.array",
"satmeta.s2.angles_2d.parse_resample_angles",
"satmeta.s2.meta.parse_granule_metadata"
] | [((119, 146), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (136, 146), False, 'import logging\n'), ((891, 921), 'satmeta.s2.meta.parse_metadata', 's2meta.parse_metadata', (['mtdFile'], {}), '(mtdFile)\n', (912, 921), True, 'import satmeta.s2.meta as s2meta\n'), ((1017, 1052), 'numpy.array', 'np.array', (["meta['irradiance_values']"], {}), "(meta['irradiance_values'])\n", (1025, 1052), True, 'import numpy as np\n'), ((1193, 1362), 'satmeta.s2.angles_2d.parse_resample_angles', 's2angles.parse_resample_angles', (['mtdFile_tile'], {'dst_transform': 'dst_transform', 'dst_shape': 'dst_shape', 'angles': "['Sun']", 'angle_dirs': "['Zenith']", 'resample_method': '"""rasterio"""'}), "(mtdFile_tile, dst_transform=dst_transform,\n dst_shape=dst_shape, angles=['Sun'], angle_dirs=['Zenith'],\n resample_method='rasterio')\n", (1223, 1362), True, 'import satmeta.s2.angles_2d as s2angles\n'), ((1534, 1577), 'satmeta.s2.meta.parse_granule_metadata', 's2meta.parse_granule_metadata', (['mtdFile_tile'], {}), '(mtdFile_tile)\n', (1563, 1577), True, 'import satmeta.s2.meta as s2meta\n'), ((1766, 1788), 'numpy.radians', 'np.radians', (['sun_zenith'], {}), '(sun_zenith)\n', (1776, 1788), True, 'import numpy as np\n')] |
"""
In this implementation, we use the architecture of Reptile as the same as MAML.
"""
import torch
import visdom
import numpy as np
import learn2learn as l2l
import copy
import time
import os
from Models.MAML.maml_model import Net4CNN
from Datasets.cwru_data import MAML_Dataset
from my_utils.train_utils import accuracy
vis = visdom.Visdom(env='yancy_meta')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class Reptile_learner(object):
def __init__(self, ways):
super().__init__()
h_size = 64
layers = 4
sample_len = 1024
feat_size = (sample_len // 2 ** layers) * h_size
self.model = Net4CNN(output_size=ways, hidden_size=h_size, layers=layers,
channels=1, embedding_size=feat_size).to(device)
self.ways = ways
@staticmethod
def fast_adapt(batch, learner, adapt_opt, loss, adaptation_steps, shots, ways, batch_size=10):
data, labels = batch
data, labels = data.to(device), labels.to(device)
# Separate data into adaptation/evaluation sets
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots * ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices) # 偶数序号为True, 奇数序号为False
adaptation_indices = torch.from_numpy(adaptation_indices) # 偶数序号为False, 奇数序号为True
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model
for step in range(adaptation_steps):
idx = torch.randint(adaptation_data.shape[0], size=(batch_size,))
adapt_x = adaptation_data[idx]
adapt_y = adaptation_labels[idx]
adapt_opt.zero_grad()
error = loss(learner(adapt_x), adapt_y)
error.backward()
adapt_opt.step()
# Evaluate the adapted model
predictions = learner(evaluation_data)
valid_error = loss(predictions, evaluation_labels)
valid_accuracy = accuracy(predictions, evaluation_labels)
return valid_error, valid_accuracy
@staticmethod
def build_tasks(mode='train', ways=10, shots=5, num_tasks=100, filter_labels=None):
dataset = l2l.data.MetaDataset(MAML_Dataset(mode=mode, ways=ways))
new_ways = len(filter_labels) if filter_labels is not None else ways
# label_shuffle_per_task = False if ways <=30 else True
assert shots * 2 * new_ways <= dataset.__len__() // ways * new_ways, "Reduce the number of shots!"
tasks = l2l.data.TaskDataset(dataset, task_transforms=[
l2l.data.transforms.FusedNWaysKShots(dataset, new_ways, 2 * shots, filter_labels=filter_labels),
l2l.data.transforms.LoadData(dataset),
# l2l.data.transforms.RemapLabels(dataset, shuffle=label_shuffle_per_task),
l2l.data.transforms.RemapLabels(dataset, shuffle=True),
# do not keep the original labels, use (0 ,..., n-1);
# if shuffle=True, to shuffle labels at each task.
l2l.data.transforms.ConsecutiveLabels(dataset),
# re-order samples and make their original labels as (0 ,..., n-1).
], num_tasks=num_tasks)
return tasks
def model_save(self, model_path):
filename = model_path+'(1)' if os.path.exists(model_path) else model_path
torch.save(self.model.state_dict(), filename)
print(f'Save model at: {filename}')
def train(self, save_path, shots):
# 5 shot:
# meta_lr = 1.0 # 1.0
# fast_lr = 0.001 # 0.001-0.005
# 1 shot:
meta_lr = 0.1 # 0.005, <0.01
fast_lr = 0.001 # 0.05
opt = torch.optim.SGD(self.model.parameters(), meta_lr)
adapt_opt = torch.optim.Adam(self.model.parameters(), lr=fast_lr, betas=(0, 0.999)) # 5 shot
# adapt_opt_state = adapt_opt.state_dict()
init_adapt_opt_state = adapt_opt.state_dict()
adapt_opt_state = None
loss = torch.nn.CrossEntropyLoss(reduction='mean')
train_ways = valid_ways = self.ways
print(f"{train_ways}-ways, {shots}-shots for training ...")
train_tasks = self.build_tasks('train', train_ways, shots, 1000, None)
valid_tasks = self.build_tasks('validation', valid_ways, shots, 1000, None)
counter = 0
Epochs = 10000
meta_batch_size = 16
train_steps = 4 # 8
test_steps = 4
train_bsz = 10
test_bsz = 15
for ep in range(Epochs):
t0 = time.time()
if ep == 0:
adapt_opt_state = init_adapt_opt_state
opt.zero_grad()
meta_train_error = 0.0
meta_train_accuracy = 0.0
meta_valid_error = 0.0
meta_valid_accuracy = 0.0
# anneal meta-lr
frac_done = float(ep) / 100000
new_lr = frac_done * meta_lr + (1 - frac_done) * meta_lr
for pg in opt.param_groups:
pg['lr'] = new_lr
# zero-grad the parameters
for p in self.model.parameters():
p.grad = torch.zeros_like(p.data)
for task in range(meta_batch_size):
# Compute meta-training loss
learner = copy.deepcopy(self.model)
adapt_opt = torch.optim.Adam(learner.parameters(), lr=fast_lr, betas=(0, 0.999))
adapt_opt.load_state_dict(adapt_opt_state)
batch = train_tasks.sample()
evaluation_error, evaluation_accuracy = self.fast_adapt(batch, learner, adapt_opt, loss,
train_steps, shots, train_ways, train_bsz)
adapt_opt_state = adapt_opt.state_dict()
for p, l in zip(self.model.parameters(), learner.parameters()):
p.grad.data.add_(l.data, alpha=-1.0)
meta_train_error += evaluation_error.item()
meta_train_accuracy += evaluation_accuracy.item()
# Compute meta-validation loss
learner = copy.deepcopy(self.model)
adapt_opt = torch.optim.Adam(learner.parameters(), lr=fast_lr, betas=(0, 0.999))
# adapt_opt.load_state_dict(adapt_opt_state)
adapt_opt.load_state_dict(init_adapt_opt_state)
batch = valid_tasks.sample()
evaluation_error, evaluation_accuracy = self.fast_adapt(batch, learner, adapt_opt, loss,
test_steps, shots, train_ways, test_bsz)
meta_valid_error += evaluation_error.item()
meta_valid_accuracy += evaluation_accuracy.item()
# Print some metrics
t1 = time.time()
print(f'Time /epoch: {t1-t0:.3f} s')
print('\n')
print('Iteration', ep + 1)
print(f'Meta Train Error: {meta_train_error / meta_batch_size: .4f}')
print(f'Meta Train Accuracy: {meta_train_accuracy / meta_batch_size: .4f}')
print(f'Meta Valid Error: {meta_valid_error / meta_batch_size: .4f}')
print(f'Meta Valid Accuracy: {meta_valid_accuracy / meta_batch_size: .4f}')
# Take the meta-learning step:
# Average the accumulated gradients and optimize
for p in self.model.parameters():
p.grad.data.mul_(1.0 / meta_batch_size).add_(p.data)
opt.step()
vis.line(Y=[[meta_train_error / meta_batch_size, meta_valid_error / meta_batch_size]], X=[counter],
update=None if counter == 0 else 'append', win='Loss_Reptile',
opts=dict(legend=['train', 'val'], title='Loss_Reptile'))
vis.line(Y=[[meta_train_accuracy / meta_batch_size, meta_valid_accuracy / meta_batch_size]], X=[counter],
update=None if counter == 0 else 'append', win='Acc_Reptile',
opts=dict(legend=['train', 'val'], title='Acc_Reptile'))
counter += 1
if (ep + 1) >= 700 and (ep + 1) % 2 == 0:
if input('\n== Stop training? == (y/n)\n').lower() == 'y':
new_save_path = save_path + rf'_ep{ep + 1}'
self.model_save(new_save_path)
break
elif input('\n== Save model? == (y/n)\n').lower() == 'y':
new_save_path = save_path + rf'_ep{ep + 1}'
self.model_save(new_save_path)
def test(self, load_path, shots, inner_test_steps=10):
self.model.load_state_dict(torch.load(load_path))
print('Load Model successfully from [%s]' % load_path)
# meta_lr = 1.0 # 0.005, <0.01
fast_lr = 0.001 # 0.05
# opt = torch.optim.SGD(self.model.parameters(), meta_lr)
adapt_opt = torch.optim.Adam(self.model.parameters(), lr=fast_lr, betas=(0, 0.999))
init_adapt_opt_state = adapt_opt.state_dict()
loss = torch.nn.CrossEntropyLoss(reduction='mean')
test_ways = self.ways
print(f"{test_ways}-ways, {shots}-shots for testing ...")
test_tasks = self.build_tasks('test', test_ways, shots, 1000, None)
meta_batch_size = 100
test_steps = inner_test_steps # 50
test_bsz = 15
meta_test_error = 0.0
meta_test_accuracy = 0.0
t0 = time.time()
# zero-grad the parameters
for p in self.model.parameters():
p.grad = torch.zeros_like(p.data)
for task in range(meta_batch_size):
# Compute meta-validation loss
learner = copy.deepcopy(self.model)
adapt_opt = torch.optim.Adam(learner.parameters(), lr=fast_lr, betas=(0, 0.999))
# adapt_opt.load_state_dict(adapt_opt_state)
adapt_opt.load_state_dict(init_adapt_opt_state)
batch = test_tasks.sample()
evaluation_error, evaluation_accuracy = self.fast_adapt(batch, learner, adapt_opt, loss,
test_steps, shots, test_ways, test_bsz)
meta_test_error += evaluation_error.item()
meta_test_accuracy += evaluation_accuracy.item()
t1 = time.time()
print(f"-------- Time for {meta_batch_size * shots} samples: {t1 - t0:.4f} sec. ----------")
print(f'Meta Test Error: {meta_test_error / meta_batch_size: .4f}')
print(f'Meta Test Accuracy: {meta_test_accuracy / meta_batch_size: .4f}\n')
if __name__ == "__main__":
from my_utils.init_utils import seed_torch
seed_torch(2021)
# Net = Reptile_learner(ways=10) # T1
Net = Reptile_learner(ways=4) # T2
if input('Train? y/n\n').lower() == 'y':
# path = r"G:\model_save\meta_learning\Reptile\5shot\Reptile_C30"
# Net.train(save_path=path, shots=5)
# path = r"G:\model_save\meta_learning\Reptile\1shot\Reptile_C30"
# Net.train(save_path=path, shots=1)
# path = r"G:\model_save\meta_learning\Reptile\5shot\Reptile_T2"
# Net.train(save_path=path, shots=5)
path = r"G:\model_save\meta_learning\Reptile\5shot\Reptile_T2"
Net.train(save_path=path, shots=1)
if input('Test? y/n\n').lower() == 'y':
# path = r"G:\model_save\meta_learning\Reptile\5shot\Reptile_C30_ep730"
# Net.test(path, shots=5, inner_test_steps=30)
# path = r"G:\model_save\meta_learning\Reptile\5shot\Reptile_T2_ep732"
# Net.test(path, shots=5, inner_test_steps=30)
path = r"G:\model_save\meta_learning\Reptile\1shot\Reptile_T2_ep702"
Net.test(path, shots=5, inner_test_steps=30)
| [
"visdom.Visdom",
"learn2learn.data.transforms.FusedNWaysKShots",
"numpy.arange",
"Datasets.cwru_data.MAML_Dataset",
"torch.load",
"os.path.exists",
"my_utils.train_utils.accuracy",
"Models.MAML.maml_model.Net4CNN",
"learn2learn.data.transforms.ConsecutiveLabels",
"copy.deepcopy",
"torch.randint"... | [((332, 363), 'visdom.Visdom', 'visdom.Visdom', ([], {'env': '"""yancy_meta"""'}), "(env='yancy_meta')\n", (345, 363), False, 'import visdom\n'), ((10769, 10785), 'my_utils.init_utils.seed_torch', 'seed_torch', (['(2021)'], {}), '(2021)\n', (10779, 10785), False, 'from my_utils.init_utils import seed_torch\n'), ((396, 421), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (419, 421), False, 'import torch\n'), ((1249, 1286), 'torch.from_numpy', 'torch.from_numpy', (['(~adaptation_indices)'], {}), '(~adaptation_indices)\n', (1265, 1286), False, 'import torch\n'), ((1341, 1377), 'torch.from_numpy', 'torch.from_numpy', (['adaptation_indices'], {}), '(adaptation_indices)\n', (1357, 1377), False, 'import torch\n'), ((2151, 2191), 'my_utils.train_utils.accuracy', 'accuracy', (['predictions', 'evaluation_labels'], {}), '(predictions, evaluation_labels)\n', (2159, 2191), False, 'from my_utils.train_utils import accuracy\n'), ((4124, 4167), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (4149, 4167), False, 'import torch\n'), ((9163, 9206), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (9188, 9206), False, 'import torch\n'), ((9554, 9565), 'time.time', 'time.time', ([], {}), '()\n', (9563, 9565), False, 'import time\n'), ((10415, 10426), 'time.time', 'time.time', ([], {}), '()\n', (10424, 10426), False, 'import time\n'), ((1689, 1748), 'torch.randint', 'torch.randint', (['adaptation_data.shape[0]'], {'size': '(batch_size,)'}), '(adaptation_data.shape[0], size=(batch_size,))\n', (1702, 1748), False, 'import torch\n'), ((2381, 2415), 'Datasets.cwru_data.MAML_Dataset', 'MAML_Dataset', ([], {'mode': 'mode', 'ways': 'ways'}), '(mode=mode, ways=ways)\n', (2393, 2415), False, 'from Datasets.cwru_data import MAML_Dataset\n'), ((3445, 3471), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (3459, 3471), False, 'import os\n'), ((4665, 4676), 'time.time', 'time.time', ([], {}), '()\n', (4674, 4676), False, 'import time\n'), ((6933, 6944), 'time.time', 'time.time', ([], {}), '()\n', (6942, 6944), False, 'import time\n'), ((8777, 8798), 'torch.load', 'torch.load', (['load_path'], {}), '(load_path)\n', (8787, 8798), False, 'import torch\n'), ((9665, 9689), 'torch.zeros_like', 'torch.zeros_like', (['p.data'], {}), '(p.data)\n', (9681, 9689), False, 'import torch\n'), ((9800, 9825), 'copy.deepcopy', 'copy.deepcopy', (['self.model'], {}), '(self.model)\n', (9813, 9825), False, 'import copy\n'), ((667, 769), 'Models.MAML.maml_model.Net4CNN', 'Net4CNN', ([], {'output_size': 'ways', 'hidden_size': 'h_size', 'layers': 'layers', 'channels': '(1)', 'embedding_size': 'feat_size'}), '(output_size=ways, hidden_size=h_size, layers=layers, channels=1,\n embedding_size=feat_size)\n', (674, 769), False, 'from Models.MAML.maml_model import Net4CNN\n'), ((1184, 1207), 'numpy.arange', 'np.arange', (['(shots * ways)'], {}), '(shots * ways)\n', (1193, 1207), True, 'import numpy as np\n'), ((5258, 5282), 'torch.zeros_like', 'torch.zeros_like', (['p.data'], {}), '(p.data)\n', (5274, 5282), False, 'import torch\n'), ((5403, 5428), 'copy.deepcopy', 'copy.deepcopy', (['self.model'], {}), '(self.model)\n', (5416, 5428), False, 'import copy\n'), ((6245, 6270), 'copy.deepcopy', 'copy.deepcopy', (['self.model'], {}), '(self.model)\n', (6258, 6270), False, 'import copy\n'), ((2741, 2840), 'learn2learn.data.transforms.FusedNWaysKShots', 'l2l.data.transforms.FusedNWaysKShots', (['dataset', 'new_ways', '(2 * shots)'], {'filter_labels': 'filter_labels'}), '(dataset, new_ways, 2 * shots,\n filter_labels=filter_labels)\n', (2777, 2840), True, 'import learn2learn as l2l\n'), ((2850, 2887), 'learn2learn.data.transforms.LoadData', 'l2l.data.transforms.LoadData', (['dataset'], {}), '(dataset)\n', (2878, 2887), True, 'import learn2learn as l2l\n'), ((2989, 3043), 'learn2learn.data.transforms.RemapLabels', 'l2l.data.transforms.RemapLabels', (['dataset'], {'shuffle': '(True)'}), '(dataset, shuffle=True)\n', (3020, 3043), True, 'import learn2learn as l2l\n'), ((3186, 3232), 'learn2learn.data.transforms.ConsecutiveLabels', 'l2l.data.transforms.ConsecutiveLabels', (['dataset'], {}), '(dataset)\n', (3223, 3232), True, 'import learn2learn as l2l\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
r"""Provide the Cartesian CoM acceleration task.
The CoM task tries to impose a desired position of the CoM with respect to the world frame.
.. math:: ||J_{CoM} \dot{q} - (K_p (x_d - x) + \dot{x}_d)||^2
where :math:`J_{CoM}` is the CoM Jacobian, :math:`\dot{q}` are the joint velocities being optimized, :math:`K_p`
is the stiffness gain, :math:`x_d` and :math:`x` are the desired and current cartesian CoM position
respectively, and :math:`\dot{x}_d` is the desired linear velocity of the CoM.
This is equivalent to the QP objective function :math:`||Ax - b||^2`, by setting :math:`A=J_{CoM}`,
:math:`x=\dot{q}`, and :math:`b = K_p (x_d - x) + \dot{x}_d`.
Note that you can only specify the center of mass linear velocity if you wish.
The CoM acceleration task tries to impose a desired pose, velocity and acceleration profiles for the CoM with respect
to the world frame.
Before presenting the optimization problem, here is a small reminder. The acceleration is the time derivative of
the velocity, i.e. :math:`a = \frac{dv}{dt}` where the cartesian velocities are related to joint velocities by
:math:`v_{CoM} = J_{CoM}(q) \dot{q}` where :math:`J_{CoM}(q)` is the CoM Jacobian, thus deriving that expression wrt
time gives us:
.. math:: a = \frac{d}{dt} v = \frac{d}{dt} J_{CoM}(q) \dot{q} = J_{CoM}(q) \ddot{q} + \dot{J}_{CoM}(q) \dot{q}.
Now, we can formulate our minimization problem as:
.. math:: || J_{CoM}(q) \ddot{q} + \dot{J}_{CoM} \dot{q} - (a_d + K_d (v_d - v) + K_p (x_d - x)) ||^2,
where :math:`\ddot{q}` are the joint accelerations being optimized, :math:`a_d` are the desired cartesian linear
accelerations, :math:`v_d` and :math:`v` are the desired and current cartesian linear velocities,
:math:`J_{CoM}(q) \in \mathbb{R}^{3 \times N}` is the CoM Jacobian, :math:`K_p` and :math:`K_d` are the stiffness and
damping gains respectively, and :math:`x_d` and :math:`x` are the desired and current cartesian CoM position
respectively.
The above formulation is equivalent to the QP objective function :math:`||Ax - b||^2`, by setting
:math:`A = J_{CoM}(q)`, :math:`x = \ddot{q}`, and
:math:`b = - \dot{J}_{CoM} \dot{q} + (a_d + K_d (v_d - v) + K_p (x_d - x))`.
Inverse dynamics
----------------
Once the optimal joint accelerations :math:`\ddot{q}^*` have been computed, we can use inverse dynamics to
compute the corresponding torques to apply on the joints. This is given by:
.. math:: \tau = H(q) \ddot{q} + N(q,\dot{q)}
where :math:`H(q)` is the inertia joint matrix, and N(q, \dot{q}) is a vector force that accounts for all the
other non-linear forces acting on the system (Coriolis, centrifugal, gravity, external forces, friction, etc.).
The implementation of this class is inspired by [1] (which is licensed under the LGPLv2).
References:
- [1] "OpenSoT: A whole-body control library for the compliant humanoid robot COMAN", Rocchi et al., 2015
"""
import numpy as np
from pyrobolearn.priorities.tasks import JointAccelerationTask
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, PyRoboLearn"
__credits__ = ["<NAME> (C++)", "<NAME> (insight)", "<NAME> (Python + doc)"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
class CoMAccelerationTask(JointAccelerationTask):
r"""CoM Acceleration Task
The CoM task tries to impose a desired position of the CoM with respect to the world frame.
.. math:: ||J_{CoM} \dot{q} - (K_p (x_d - x) + \dot{x}_d)||^2
where :math:`J_{CoM}` is the CoM Jacobian, :math:`\dot{q}` are the joint velocities being optimized, :math:`K_p`
is the stiffness gain, :math:`x_d` and :math:`x` are the desired and current cartesian CoM position
respectively, and :math:`\dot{x}_d` is the desired linear velocity of the CoM.
This is equivalent to the QP objective function :math:`||Ax - b||^2`, by setting :math:`A=J_{CoM}`,
:math:`x=\dot{q}`, and :math:`b = K_p (x_d - x) + \dot{x}_d`.
Note that you can only specify the center of mass linear velocity if you wish.
The CoM acceleration task tries to impose a desired pose, velocity and acceleration profiles for the CoM wrt the
world frame.
Before presenting the optimization problem, here is a small reminder. The acceleration is the time derivative of
the velocity, i.e. :math:`a = \frac{dv}{dt}` where the cartesian velocities are related to joint velocities by
:math:`v_{CoM} = J_{CoM}(q) \dot{q}` where :math:`J_{CoM}(q)` is the CoM Jacobian, thus deriving that expression
wrt time gives us:
.. math:: a = \frac{d}{dt} v = \frac{d}{dt} J_{CoM}(q) \dot{q} = J_{CoM}(q) \ddot{q} + \dot{J}_{CoM}(q) \dot{q}.
Now, we can formulate our minimization problem as:
.. math:: || J_{CoM}(q) \ddot{q} + \dot{J}_{CoM} \dot{q} - (a_d + K_d (v_d - v) + K_p (x_d - x)) ||^2,
where :math:`\ddot{q}` are the joint accelerations being optimized, :math:`a_d` are the desired cartesian linear
accelerations, :math:`v_d` and :math:`v` are the desired and current cartesian linear velocities,
:math:`J_{CoM}(q) \in \mathbb{R}^{3 \times N}` is the CoM Jacobian, :math:`K_p` and :math:`K_d` are the stiffness
and damping gains respectively, and :math:`x_d` and :math:`x` are the desired and current cartesian CoM position
respectively.
The above formulation is equivalent to the QP objective function :math:`||Ax - b||^2`, by setting
:math:`A = J_{CoM}(q)`, :math:`x = \ddot{q}`, and
:math:`b = - \dot{J}_{CoM} \dot{q} + (a_d + K_d (v_d - v) + K_p (x_d - x))`.
Inverse dynamics
----------------
Once the optimal joint accelerations :math:`\ddot{q}^*` have been computed, we can use inverse dynamics to
compute the corresponding torques to apply on the joints. This is given by:
.. math:: \tau = H(q) \ddot{q} + N(q,\dot{q)}
where :math:`H(q)` is the inertia joint matrix, and N(q, \dot{q}) is a vector force that accounts for all the
other non-linear forces acting on the system (Coriolis, centrifugal, gravity, external forces, friction, etc.).
The implementation of this class is inspired by [1] (which is licensed under the LGPLv2).
References:
- [1] "OpenSoT: A whole-body control library for the compliant humanoid robot COMAN", Rocchi et al., 2015
"""
def __init__(self, model, desired_position=None, desired_velocity=None, desired_acceleration=None, kp=1., kd=1.,
weight=1., constraints=[]):
"""
Initialize the task.
Args:
model (ModelInterface): model interface.
desired_position (np.array[float[3]], None): desired CoM position. If None, it will be set to 0.
desired_velocity (np.array[float[3]], None): desired CoM linear velocity. If None, it will be set to 0.
desired_acceleration (np.array[float[3]], None): desired CoM linear acceleration. If None, it will be set
to 0.
kp (float, np.array[float[3,3]]): position gain.
kd (float, np.array[float[3,3]]): linear velocity gain.
weight (float, np.array[float[3,3]]): weight scalar or matrix associated to the task.
constraints (list[Constraint]): list of constraints associated with the task.
"""
super(CoMAccelerationTask, self).__init__(model=model, weight=weight, constraints=constraints)
# define variable
self.kp = kp
self.kd = kd
# define desired references
self.desired_position = desired_position
self.desired_velocity = desired_velocity
self.desired_acceleration = desired_acceleration
# first update
self.update()
##############
# Properties #
##############
@property
def desired_position(self):
"""Get the desired CoM position."""
return self._des_pos
@desired_position.setter
def desired_position(self, position):
"""Set the desired CoM position."""
if position is not None:
if not isinstance(position, (np.ndarray, list, tuple)):
raise TypeError("Expecting the given desired position to be a np.array, instead got: "
"{}".format(type(position)))
position = np.asarray(position)
if len(position) != 3:
raise ValueError("Expecting the given desired position array to be of length 3, but instead got: "
"{}".format(len(position)))
self._des_pos = position
@property
def desired_velocity(self):
"""Get the desired CoM linear velocity."""
return self._des_vel
@desired_velocity.setter
def desired_velocity(self, velocity):
"""Set the desired CoM linear velocity."""
if velocity is None:
velocity = np.zeros(3)
elif not isinstance(velocity, (np.ndarray, list, tuple)):
raise TypeError("Expecting the given desired linear velocity to be a np.array, instead got: "
"{}".format(type(velocity)))
velocity = np.asarray(velocity)
if len(velocity) != 3:
raise ValueError("Expecting the given desired linear velocity array to be of length 3, but instead "
"got: {}".format(len(velocity)))
self._des_vel = velocity
@property
def desired_acceleration(self):
"""Get the desired CoM linear acceleration."""
return self._des_acc
@desired_acceleration.setter
def desired_acceleration(self, acceleration):
"""Set the desired CoM linear acceleration."""
if acceleration is None:
acceleration = np.zeros(3)
elif not isinstance(acceleration, (np.ndarray, list, tuple)):
raise TypeError("Expecting the given desired linear acceleration to be a np.array, instead got: "
"{}".format(type(acceleration)))
acceleration = np.asarray(acceleration)
if len(acceleration) != 3:
raise ValueError("Expecting the given desired linear acceleration array to be of length 3, but instead "
"got: {}".format(len(acceleration)))
self._des_acc = acceleration
@property
def x_desired(self):
"""Get the desired CoM position."""
return self._des_pos
@x_desired.setter
def x_desired(self, x_d):
"""Set the desired CoM position."""
self.desired_position = x_d
@property
def dx_desired(self):
"""Get the desired CoM linear velocity."""
return self._des_vel
@dx_desired.setter
def dx_desired(self, dx_d):
"""Set the desired CoM linear velocity."""
self.desired_velocity = dx_d
@property
def ddx_desired(self):
"""Get the desired CoM linear acceleration."""
return self._des_acc
@ddx_desired.setter
def ddx_desired(self, ddx_d):
"""Set the desired CoM linear acceleration."""
self.desired_acceleration = ddx_d
@property
def kp(self):
"""Return the position gain."""
return self._kp
@kp.setter
def kp(self, kp):
"""Set the position gain."""
if kp is None:
kp = 1.
if not isinstance(kp, (float, int, np.ndarray)):
raise TypeError("Expecting the given position gain kp to be an int, float, np.array, instead got: "
"{}".format(type(kp)))
if isinstance(kp, np.ndarray) and kp.shape != (3, 3):
raise ValueError("Expecting the given position gain matrix kp to be of shape {}, but instead got "
"shape: {}".format((self.x_size, self.x_size), kp.shape))
self._kp = kp
@property
def kd(self):
"""Return the linear velocity gain."""
return self._kd
@kd.setter
def kd(self, kd):
"""Set the linear velocity gain."""
if kd is None:
kd = 1.
if not isinstance(kd, (float, int, np.ndarray)):
raise TypeError("Expecting the given linear velocity gain kd to be an int, float, np.array, "
"instead got: {}".format(type(kd)))
if isinstance(kd, np.ndarray) and kd.shape != (3, 3):
raise ValueError("Expecting the given linear velocity gain matrix kd to be of shape {}, but "
"instead got shape: {}".format((3, 3), kd.shape))
self._kd = kd
###########
# Methods #
###########
def set_desired_references(self, x_des, dx_des=None, ddx_des=None, *args, **kwargs):
"""Set the desired references.
Args:
x_des (np.array[float[3]], None): desired CoM position. If None, it will be set to 0.
dx_des (np.array[float[3]], None): desired CoM linear velocity. If None, it will be set to 0.
ddx_des (np.array[float[3]], None): desired CoM linear velocity. If None, it will be set to 0.
"""
self.x_desired = x_des
self.dx_desired = dx_des
self.ddx_desired = ddx_des
def get_desired_references(self):
"""Return the desired references.
Returns:
np.array[float[3]]: desired CoM position.
np.array[float[3]]: desired CoM linear velocity.
"""
return self.x_desired, self.dx_desired, self.ddx_desired
def _update(self, x=None):
"""
Update the task by computing the A matrix and b vector that will be used by the task solver.
"""
self._A = self.model.get_com_jacobian(full=False) # shape: (3, N)
jdotqdot = self.model.compute_com_JdotQdot()[:3] # shape: (3,)
vel = self.model.get_com_velocity() # shape: (3,)
self._b = -jdotqdot + self._des_acc + np.dot(self.kd, (self._des_vel - vel)) # shape: (3,)
if self._des_pos is not None:
x = self.model.get_com_position()
self._b = self._b + np.dot(self.kp, (self._des_pos - x)) # shape: (3,)
| [
"numpy.dot",
"numpy.asarray",
"numpy.zeros"
] | [((9151, 9171), 'numpy.asarray', 'np.asarray', (['velocity'], {}), '(velocity)\n', (9161, 9171), True, 'import numpy as np\n'), ((10021, 10045), 'numpy.asarray', 'np.asarray', (['acceleration'], {}), '(acceleration)\n', (10031, 10045), True, 'import numpy as np\n'), ((8324, 8344), 'numpy.asarray', 'np.asarray', (['position'], {}), '(position)\n', (8334, 8344), True, 'import numpy as np\n'), ((8891, 8902), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (8899, 8902), True, 'import numpy as np\n'), ((9745, 9756), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (9753, 9756), True, 'import numpy as np\n'), ((13852, 13888), 'numpy.dot', 'np.dot', (['self.kd', '(self._des_vel - vel)'], {}), '(self.kd, self._des_vel - vel)\n', (13858, 13888), True, 'import numpy as np\n'), ((14023, 14057), 'numpy.dot', 'np.dot', (['self.kp', '(self._des_pos - x)'], {}), '(self.kp, self._des_pos - x)\n', (14029, 14057), True, 'import numpy as np\n')] |
# Copyright (c) 2019 <NAME>. All rights reserved.
import numpy as np
from sdf import Group, Dataset
import scipy.io
# extract strings from the matrix
strMatNormal = lambda a: [''.join(s).rstrip() for s in a]
strMatTrans = lambda a: [''.join(s).rstrip() for s in zip(*a)]
def _split_description(comment):
unit = None
display_unit = None
info = dict()
if comment.endswith(']'):
i = comment.rfind('[')
unit = comment[i + 1:-1]
comment = comment[0:i].strip()
if unit is not None:
if ':#' in unit:
segments = unit.split(':#')
unit = segments[0]
for segment in segments[1:]:
key, value = segment[1:-1].split('=')
info[key] = value
if '|' in unit:
unit, display_unit = unit.split('|')
return unit, display_unit, comment, info
def load(filename, objectname):
g_root = _load_mat(filename)
if objectname == '/':
return g_root
else:
obj = g_root
segments = objectname.split('/')
for s in segments:
if s:
obj = obj[s]
return obj
def _load_mat(filename):
mat = scipy.io.loadmat(filename, chars_as_strings=False)
_vars = {}
_blocks = []
try:
fileInfo = strMatNormal(mat['Aclass'])
except KeyError:
raise Exception('File structure not supported!')
if fileInfo[1] == '1.1':
if fileInfo[3] == 'binTrans':
# usually files from OpenModelica or Dymola auto saved,
# all methods rely on this structure since this was the only
# one understand by earlier versions
names = strMatTrans(mat['name']) # names
descr = strMatTrans(mat['description']) # descriptions
cons = mat['data_1']
traj = mat['data_2']
d = mat['dataInfo'][0, :]
x = mat['dataInfo'][1, :]
elif fileInfo[3] == 'binNormal':
# usually files from dymola, save as...,
# variables are mapped to the structure above ('binTrans')
names = strMatNormal(mat['name']) # names
descr = strMatNormal(mat['description']) # descriptions
cons = mat['data_1'].T
traj = mat['data_2'].T
d = mat['dataInfo'][:, 0]
x = mat['dataInfo'][:, 1]
else:
raise Exception('File structure not supported!')
c = np.abs(x) - 1 # column
s = np.sign(x) # sign
vars = zip(names, descr, d, c, s)
elif fileInfo[1] == '1.0':
# files generated with dymola, save as..., only plotted ...
# fake the structure of a 1.1 transposed file
names = strMatNormal(mat['names']) # names
_blocks.append(0)
mat['data_0'] = mat['data'].transpose()
del mat['data']
_absc = (names[0], '')
for i in range(1, len(names)):
_vars[names[i]] = ('', 0, i, 1)
else:
raise Exception('File structure not supported!')
# build the SDF tree
g_root = Group('/')
ds_time = None
for name, desc, d, c, s in vars:
unit, display_unit, comment, info = _split_description(desc)
path = name.split('.')
g_parent = g_root
for segment in path[:-1]:
if segment in g_parent:
g_parent = g_parent[segment]
else:
g_child = Group(segment)
g_parent.groups.append(g_child)
g_parent = g_child
pass
if d == 1:
data = cons[c, 0] * s
else:
data = traj[c, :] * s
if 'type' in info:
if info['type'] == 'Integer' or 'Boolean':
data = np.asarray(data, dtype=np.int32)
if d == 0:
ds = Dataset(path[-1], comment="Simulation time", unit=unit, display_unit=display_unit, data=data)
ds_time = ds
elif d == 1:
ds = Dataset(path[-1], comment=comment, unit=unit, display_unit=display_unit, data=data)
else:
ds = Dataset(path[-1], comment=comment, unit=unit, display_unit=display_unit, data=data, scales=[ds_time])
g_parent.datasets.append(ds)
return g_root
| [
"numpy.abs",
"numpy.asarray",
"numpy.sign",
"sdf.Dataset",
"sdf.Group"
] | [((3193, 3203), 'sdf.Group', 'Group', (['"""/"""'], {}), "('/')\n", (3198, 3203), False, 'from sdf import Group, Dataset\n'), ((2591, 2601), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (2598, 2601), True, 'import numpy as np\n'), ((2554, 2563), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (2560, 2563), True, 'import numpy as np\n'), ((3975, 4073), 'sdf.Dataset', 'Dataset', (['path[-1]'], {'comment': '"""Simulation time"""', 'unit': 'unit', 'display_unit': 'display_unit', 'data': 'data'}), "(path[-1], comment='Simulation time', unit=unit, display_unit=\n display_unit, data=data)\n", (3982, 4073), False, 'from sdf import Group, Dataset\n'), ((3567, 3581), 'sdf.Group', 'Group', (['segment'], {}), '(segment)\n', (3572, 3581), False, 'from sdf import Group, Dataset\n'), ((3902, 3934), 'numpy.asarray', 'np.asarray', (['data'], {'dtype': 'np.int32'}), '(data, dtype=np.int32)\n', (3912, 3934), True, 'import numpy as np\n'), ((4135, 4222), 'sdf.Dataset', 'Dataset', (['path[-1]'], {'comment': 'comment', 'unit': 'unit', 'display_unit': 'display_unit', 'data': 'data'}), '(path[-1], comment=comment, unit=unit, display_unit=display_unit,\n data=data)\n', (4142, 4222), False, 'from sdf import Group, Dataset\n'), ((4252, 4357), 'sdf.Dataset', 'Dataset', (['path[-1]'], {'comment': 'comment', 'unit': 'unit', 'display_unit': 'display_unit', 'data': 'data', 'scales': '[ds_time]'}), '(path[-1], comment=comment, unit=unit, display_unit=display_unit,\n data=data, scales=[ds_time])\n', (4259, 4357), False, 'from sdf import Group, Dataset\n')] |
""" Utilies to uniform marker names
"""
import logging
import numpy as np
import pandas as pd
import pathlib
from ..model.mapping import OTHER_FEATHERS
log = logging.getLogger(__name__)
module = pathlib.Path(__file__).absolute().parent
PATH_TO_MARKERS = str(pathlib.Path.joinpath(module, 'markers.tsv'))
class Markers(object):
""" cycif markers
Parameters
-----------
path_to_markers: str or None.
The file path to the `markers.json`.
"""
def __init__(self, path_to_markers=None):
self.path_to_markers = path_to_markers
if self.path_to_markers:
self._path = self.path_to_markers
else:
self._path = PATH_TO_MARKERS
markers_df = pd.read_csv(self._path, sep='\t', dtype=str).fillna('')
self.unique_keys = ['name', 'fluor', 'anti', 'duplicate']
self._check_duplicate(markers_df)
self._load_stock_markers()
def _check_duplicate(self, df):
""" check marker duplicate.
"""
# check uniqueness of all stock markers
duplicate_markers = df.duplicated(subset=self.unique_keys, keep=False)
if duplicate_markers.any():
raise Exception("Duplicate markers found in `markers.tsv`: %s"
% df[duplicate_markers])
log.info("Loaded %d unique stock markers." % df.shape[0])
self.markers_df = df
def _load_stock_markers(self):
""" Load `markers.json` into python dictinary and convert to
alias: db_name format.
"""
self.markers = {alias.lower().strip(): i for i, v in enumerate(
self.markers_df['aliases']) for alias in v.split(',')}
log.info("Converted to %d pairs of `marker: db_marker`."
% len(self.markers))
other_features = OTHER_FEATHERS
log.info("Loaded %d unique DB column names for non-marker features."
% len(other_features))
self.other_features = \
{name.lower(): k for k, v in other_features.items()
for name in v}
def get_marker_db_entry(self, marker):
""" Get database ingestion entry for a marker, support various
alias names.
Parameters
----------
marker: str
The name of a marker, which can be common name or alias.
Returns
----------
Tuple, or None if the name doesn't exist in the `markers.tsv` file.
"""
marker = format_marker(marker)
id = self.markers.get(marker, None)
if id is None:
log.warn(f"The marker name `{marker}` was not recognized!")
return
rval = tuple(self.markers_df.loc[id, self.unique_keys])
return rval
def get_other_feature_db_name(self, name):
""" Get formatted database name to a non-marker features.
Parameters
----------
name: str
The name of a non-marker feature.
Returns
----------
str, or None if the name doesn't exist self.other_features.
"""
name = format_marker(name)
rval = self.other_features.get(name, None)
if not rval:
log.warn(f"The feature name `{name}` was not recognized!")
return rval
def update_stock_markers(self, new_markers, toplace=None, reload=False):
""" Update `markers.tsv`.
Arguments
---------
new_markers: tuple, list or list of lists.
In (name, fluor, anti, duplicate, aliases) format.
toplace: None or str, default is None.
The path to save the updated marker dataframe. When toplace
is None, it's the original path + '.new'.
reload: boolean, default is False.
Whether to reload the updated markers/features.
"""
if not isinstance(new_markers, (list, tuple)):
raise ValueError("`new_markers` must be list, tuple or list of "
"lists datatype!")
if not isinstance(new_markers[0], (list, tuple)):
new_markers = [new_markers]
df = self.markers_df.copy()
start = df.shape[0]
for i, mkr in enumerate(new_markers):
marker_mask = [(df.loc[:, x] == (y or ''))
for x, y in zip(self.unique_keys, mkr)]
marker_mask = np.logical_and.reduce(marker_mask)
if marker_mask.any():
for alias in mkr[-1].split(','):
if format_marker(alias) not in self.markers:
df.loc[marker_mask, 'aliases'] += ', ' + alias
else:
df.loc[start+i] = mkr
self._check_duplicate(df)
if not toplace:
toplace = self._path + '.new'
df.to_csv(toplace, sep='\t', index=False)
if reload:
self._path = toplace
self._load_stock_markers()
log.info("Marker/Feature list is updated!")
def format_marker(name):
""" Turn to lowercaes and remove all whitespaces
"""
rval = name.lower()
rval = ''.join(rval.split())
rval = rval.replace('-', '_')
return rval
| [
"pandas.read_csv",
"numpy.logical_and.reduce",
"pathlib.Path",
"pathlib.Path.joinpath",
"logging.getLogger"
] | [((161, 188), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (178, 188), False, 'import logging\n'), ((262, 306), 'pathlib.Path.joinpath', 'pathlib.Path.joinpath', (['module', '"""markers.tsv"""'], {}), "(module, 'markers.tsv')\n", (283, 306), False, 'import pathlib\n'), ((199, 221), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (211, 221), False, 'import pathlib\n'), ((4370, 4404), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['marker_mask'], {}), '(marker_mask)\n', (4391, 4404), True, 'import numpy as np\n'), ((723, 767), 'pandas.read_csv', 'pd.read_csv', (['self._path'], {'sep': '"""\t"""', 'dtype': 'str'}), "(self._path, sep='\\t', dtype=str)\n", (734, 767), True, 'import pandas as pd\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
This code uses a user-defined database, image directory, image file
extension, and darkframe to loop through the Tetra database and generate
inputs for the opencv camera calibration routine
"""
################################
#LOAD LIBRARIES
################################
import os
import sys
import cv2
import json
import time
import pathlib
import numpy as np
from PIL import Image
from scipy.io import savemat
from tetra3_Cal import Tetra3
################################
#USER INPUT
################################
db_name = 'test_db'
path_to_images = ''
image_file_extension = '.jpg'
darkframe_filename = ''
estimated_full_angle_fov = 20
verbose = True
################################
#MAIN CODE
################################
# figure out if a database exists for Tetra
if isinstance(db_name, str):
path = (pathlib.Path(__file__).parent / db_name).with_suffix('.npz')
else:
path = pathlib.Path(path).with_suffix('.npz')
# if not, create one
if not os.path.exists(path):
print("\nUnable to find specified database: " + str(path))
print(" Generating database...")
t3 = Tetra3() #this initializes stuff
t3.generate_database(max_fov = estimated_full_angle_fov, save_as=db_name) #this builds a database
print(" ...complete\n\n")
# init Tetra w/ database
t3 = Tetra3(db_name)
# find darkframe
if darkframe_filename == '':
print("\nDarkframe file not provided, proceeding without darkframe subtraction.\n")
darkframe_filename = None
else:
darkframe_filename = os.path.join(path_to_images, darkframe_filename)
if os.path.exists(darkframe_filename):
print('\nDarkframe found! Applying to calibration images...\n')
else:
darkframe_filename = None
print("\nUnable to find darkframe, proceeding without darkframe subtraction.\n")
# define variables and start processing
path = pathlib.Path(path_to_images)
solution_list = []
AllProjs = np.array([[0,0,0]])
AllCents = np.array([[0,0]])
num_images = 0
for impath in path.glob('*'+image_file_extension):
num_images+=1
print('Attempting to solve image: ' + str(impath))
start_time = time.time()
img = cv2.imread(str(impath), cv2.IMREAD_GRAYSCALE)
if img is None:
print("ERROR ["+str(__name__)+"]:Image file "+impath+" does not exist in path. ")
sys.exit()
if verbose: print('Loaded image in {} seconds'.format(time.time()-start_time))
image_size = (img.shape[1],img.shape[0]) # tuple required, input args flipped
if darkframe_filename is not None:
darkframe = cv2.imread(darkframe_filename, cv2.IMREAD_GRAYSCALE)
img = cv2.subtract(img, darkframe)
solved = t3.solve_from_image(img, fov_estimate=estimated_full_angle_fov) # Adding e.g. fov_estimate=11.4, fov_max_error=.1 may improve performance
try:
Vecs = []
ProjVecs = []
R = solved['Rmat']
Cents = np.array(solved['MatchCentroids'])
Cents[:,[0,1]] = Cents[:,[1,0]]
#print(Cents) #Swap rows because tetra uses centroids in (y,x)
AllCents = np.append(AllCents, Cents, axis = 0)
i=0
angY = 90 * (np.pi/180)
RotY = np.array([[np.cos(angY), 0, -np.sin(angY)],
[0,1,0],
[np.sin(angY), 0, np.cos(angY)]])
angZ = -90 * (np.pi/180)
RotZ = np.array([[np.cos(angZ), -np.sin(angZ), 0 ],
[np.sin(angZ), np.cos(angZ), 0],
[0,0,1]])
for tup in solved['MatchVectors']:
v = np.array(tup[1]).transpose()
#print(v)
vcam = np.dot(RotZ, np.dot(RotY, np.dot(R, v)))
#Project Vector onto z = 1
proj = np.array([(vcam[0]/vcam[2]),(vcam[1]/vcam[2]), 0])
#print(proj)
AllProjs = np.append(AllProjs, [proj], axis = 0)
ProjVecs.append(proj)
Vecs.append(vcam)
i+=1
imgdict = {'R_inertial_to_camera_guess': R, 'uvd_meas': Cents, 'CAT_FOV':Vecs, 'Projections':ProjVecs, 'NumStars': len(Vecs), 'FOV': solved['FOV']}
solution_list.append(imgdict)
print(" ...success! (took " +str(time.time()-start_time)[:8]+" seconds)")
except:
print(' ...FAILED to solve \n')
# if some of the images were solved, use the solutions to calibrate the camera
if len(solution_list) > 0:
print("\n\nFound ("+str(len(solution_list))+") solutions out of (" + str(num_images)+") images\n")
solved = solution_list[0] # these seem to be fairly consistent, though one day we may want to average all
fovguess = solved['FOV']
flpix = image_size[0]/(2*np.tan(np.deg2rad(fovguess)/2))
matGuess = np.array([[flpix, 0,(image_size[0]/2) - 0.5 ], [0, flpix, (image_size[1]/2)-0.5], [0, 0, 1]])
AllCents = np.delete(AllCents, 0, axis = 0).astype('float32')
AllProjs = np.delete(AllProjs, 0, axis = 0).astype('float32')
RMS_reproj_err_pix, mtx, dist, rvecs, tvecs = cv2.calibrateCamera([AllProjs], [AllCents], image_size, matGuess, None, flags=(cv2.CALIB_USE_INTRINSIC_GUESS+cv2.CALIB_FIX_PRINCIPAL_POINT))
[fovx, fovy, fl, pp, ar] = cv2.calibrationMatrixValues(mtx, image_size, 4.8e-3*image_size[0], 4.8e-3*image_size[1] ) # Note that this may not work without an accurate detector size (Args 3 and 4)
print("\nReprojection Error (pix, RMS): " + str(RMS_reproj_err_pix))
print("\nFoV (x,y): "+str(fovx)+", "+str(fovy))
print("Focal Length (pix): " + str(flpix))
print("Focal Length (mm): " + str(fl))
print("\nCamera Matrix: "+ str(mtx))
print("\nDist: " + str(dist))
print("\nRvecs: " + str(rvecs))
print("\nTvecs: " + str(tvecs))
print("")
# populate dict
dist_l=dist.tolist()
newcameramtx_l = mtx.tolist()
# in this case, cy is vp and cx is up.
cam_cal_dict = {'camera_matrix': newcameramtx_l, 'dist_coefs': dist_l, 'resolution':[image_size[0],image_size[1]], 'camera_model':'Brown','k1':dist_l[0][0],'k2':dist_l[0][1],'k3':dist_l[0][4],'p1':dist_l[0][2],'p2':dist_l[0][3],'fx':newcameramtx_l[0][0],'fy':newcameramtx_l[1][1],'up':newcameramtx_l[0][2],'vp':newcameramtx_l[1][2],'skew':newcameramtx_l[0][1], 'RMS_reproj_err_pix':RMS_reproj_err_pix}
# save
usr_in = 'generic_cam_params'
usr_in_split = usr_in.split('.json')
usr_in = usr_in_split[0]
cam_config_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.getcwd())))),'data','cam_config')
full_cam_file_path = os.path.join(cam_config_dir,usr_in+'.json')
with open(full_cam_file_path, 'w') as fp:
json.dump(cam_cal_dict, fp, indent=2)
print("\n\nCamera parameter file saved to: " + str(full_cam_file_path) +"\n\n")
else: print("\n\n\nNo solutions found (at all). Exiting unsuccessfully...\n\n\n")
| [
"pathlib.Path",
"numpy.sin",
"os.path.join",
"tetra3_Cal.Tetra3",
"cv2.subtract",
"os.path.exists",
"numpy.append",
"cv2.calibrationMatrixValues",
"json.dump",
"numpy.cos",
"cv2.calibrateCamera",
"numpy.dot",
"numpy.delete",
"sys.exit",
"numpy.deg2rad",
"os.getcwd",
"time.time",
"c... | [((1360, 1375), 'tetra3_Cal.Tetra3', 'Tetra3', (['db_name'], {}), '(db_name)\n', (1366, 1375), False, 'from tetra3_Cal import Tetra3\n'), ((1918, 1946), 'pathlib.Path', 'pathlib.Path', (['path_to_images'], {}), '(path_to_images)\n', (1930, 1946), False, 'import pathlib\n'), ((1977, 1998), 'numpy.array', 'np.array', (['[[0, 0, 0]]'], {}), '([[0, 0, 0]])\n', (1985, 1998), True, 'import numpy as np\n'), ((2008, 2026), 'numpy.array', 'np.array', (['[[0, 0]]'], {}), '([[0, 0]])\n', (2016, 2026), True, 'import numpy as np\n'), ((1025, 1045), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1039, 1045), False, 'import os\n'), ((1159, 1167), 'tetra3_Cal.Tetra3', 'Tetra3', ([], {}), '()\n', (1165, 1167), False, 'from tetra3_Cal import Tetra3\n'), ((1572, 1620), 'os.path.join', 'os.path.join', (['path_to_images', 'darkframe_filename'], {}), '(path_to_images, darkframe_filename)\n', (1584, 1620), False, 'import os\n'), ((1628, 1662), 'os.path.exists', 'os.path.exists', (['darkframe_filename'], {}), '(darkframe_filename)\n', (1642, 1662), False, 'import os\n'), ((2195, 2206), 'time.time', 'time.time', ([], {}), '()\n', (2204, 2206), False, 'import time\n'), ((4877, 4976), 'numpy.array', 'np.array', (['[[flpix, 0, image_size[0] / 2 - 0.5], [0, flpix, image_size[1] / 2 - 0.5],\n [0, 0, 1]]'], {}), '([[flpix, 0, image_size[0] / 2 - 0.5], [0, flpix, image_size[1] / 2 -\n 0.5], [0, 0, 1]])\n', (4885, 4976), True, 'import numpy as np\n'), ((5154, 5298), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['[AllProjs]', '[AllCents]', 'image_size', 'matGuess', 'None'], {'flags': '(cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_FIX_PRINCIPAL_POINT)'}), '([AllProjs], [AllCents], image_size, matGuess, None,\n flags=cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_FIX_PRINCIPAL_POINT)\n', (5173, 5298), False, 'import cv2\n'), ((5326, 5422), 'cv2.calibrationMatrixValues', 'cv2.calibrationMatrixValues', (['mtx', 'image_size', '(0.0048 * image_size[0])', '(0.0048 * image_size[1])'], {}), '(mtx, image_size, 0.0048 * image_size[0], 0.0048 *\n image_size[1])\n', (5353, 5422), False, 'import cv2\n'), ((6677, 6723), 'os.path.join', 'os.path.join', (['cam_config_dir', "(usr_in + '.json')"], {}), "(cam_config_dir, usr_in + '.json')\n", (6689, 6723), False, 'import os\n'), ((2398, 2408), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2406, 2408), False, 'import sys\n'), ((2650, 2702), 'cv2.imread', 'cv2.imread', (['darkframe_filename', 'cv2.IMREAD_GRAYSCALE'], {}), '(darkframe_filename, cv2.IMREAD_GRAYSCALE)\n', (2660, 2702), False, 'import cv2\n'), ((2721, 2749), 'cv2.subtract', 'cv2.subtract', (['img', 'darkframe'], {}), '(img, darkframe)\n', (2733, 2749), False, 'import cv2\n'), ((3020, 3054), 'numpy.array', 'np.array', (["solved['MatchCentroids']"], {}), "(solved['MatchCentroids'])\n", (3028, 3054), True, 'import numpy as np\n'), ((3198, 3232), 'numpy.append', 'np.append', (['AllCents', 'Cents'], {'axis': '(0)'}), '(AllCents, Cents, axis=0)\n', (3207, 3232), True, 'import numpy as np\n'), ((6775, 6812), 'json.dump', 'json.dump', (['cam_cal_dict', 'fp'], {'indent': '(2)'}), '(cam_cal_dict, fp, indent=2)\n', (6784, 6812), False, 'import json\n'), ((957, 975), 'pathlib.Path', 'pathlib.Path', (['path'], {}), '(path)\n', (969, 975), False, 'import pathlib\n'), ((3856, 3907), 'numpy.array', 'np.array', (['[vcam[0] / vcam[2], vcam[1] / vcam[2], 0]'], {}), '([vcam[0] / vcam[2], vcam[1] / vcam[2], 0])\n', (3864, 3907), True, 'import numpy as np\n'), ((3963, 3998), 'numpy.append', 'np.append', (['AllProjs', '[proj]'], {'axis': '(0)'}), '(AllProjs, [proj], axis=0)\n', (3972, 3998), True, 'import numpy as np\n'), ((4987, 5017), 'numpy.delete', 'np.delete', (['AllCents', '(0)'], {'axis': '(0)'}), '(AllCents, 0, axis=0)\n', (4996, 5017), True, 'import numpy as np\n'), ((5053, 5083), 'numpy.delete', 'np.delete', (['AllProjs', '(0)'], {'axis': '(0)'}), '(AllProjs, 0, axis=0)\n', (5062, 5083), True, 'import numpy as np\n'), ((879, 901), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (891, 901), False, 'import pathlib\n'), ((2471, 2482), 'time.time', 'time.time', ([], {}), '()\n', (2480, 2482), False, 'import time\n'), ((3317, 3329), 'numpy.cos', 'np.cos', (['angY'], {}), '(angY)\n', (3323, 3329), True, 'import numpy as np\n'), ((3392, 3404), 'numpy.sin', 'np.sin', (['angY'], {}), '(angY)\n', (3398, 3404), True, 'import numpy as np\n'), ((3409, 3421), 'numpy.cos', 'np.cos', (['angY'], {}), '(angY)\n', (3415, 3421), True, 'import numpy as np\n'), ((3492, 3504), 'numpy.cos', 'np.cos', (['angZ'], {}), '(angZ)\n', (3498, 3504), True, 'import numpy as np\n'), ((3544, 3556), 'numpy.sin', 'np.sin', (['angZ'], {}), '(angZ)\n', (3550, 3556), True, 'import numpy as np\n'), ((3558, 3570), 'numpy.cos', 'np.cos', (['angZ'], {}), '(angZ)\n', (3564, 3570), True, 'import numpy as np\n'), ((3670, 3686), 'numpy.array', 'np.array', (['tup[1]'], {}), '(tup[1])\n', (3678, 3686), True, 'import numpy as np\n'), ((3774, 3786), 'numpy.dot', 'np.dot', (['R', 'v'], {}), '(R, v)\n', (3780, 3786), True, 'import numpy as np\n'), ((4837, 4857), 'numpy.deg2rad', 'np.deg2rad', (['fovguess'], {}), '(fovguess)\n', (4847, 4857), True, 'import numpy as np\n'), ((3335, 3347), 'numpy.sin', 'np.sin', (['angY'], {}), '(angY)\n', (3341, 3347), True, 'import numpy as np\n'), ((3507, 3519), 'numpy.sin', 'np.sin', (['angZ'], {}), '(angZ)\n', (3513, 3519), True, 'import numpy as np\n'), ((6615, 6626), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6624, 6626), False, 'import os\n'), ((4345, 4356), 'time.time', 'time.time', ([], {}), '()\n', (4354, 4356), False, 'import time\n')] |
import os
import numpy as np
from setuptools import find_packages, setup
from setuptools.extension import Extension
from Cython.Build import cythonize
extensions = [
Extension('pulse2percept.fast_retina', ['pulse2percept/fast_retina.pyx'],
include_dirs=[np.get_include()],
extra_compile_args=['-O3'])
]
# Get version and release info, which is all stored in pulse2percept/version.py
ver_file = os.path.join('pulse2percept', 'version.py')
with open(ver_file) as f:
exec(f.read())
opts = dict(name=NAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
classifiers=CLASSIFIERS,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
platforms=PLATFORMS,
version=VERSION,
packages=find_packages(),
ext_modules=cythonize(extensions),
install_requires=REQUIRES)
if __name__ == '__main__':
setup(**opts)
| [
"Cython.Build.cythonize",
"setuptools.setup",
"numpy.get_include",
"os.path.join",
"setuptools.find_packages"
] | [((428, 471), 'os.path.join', 'os.path.join', (['"""pulse2percept"""', '"""version.py"""'], {}), "('pulse2percept', 'version.py')\n", (440, 471), False, 'import os\n'), ((1118, 1131), 'setuptools.setup', 'setup', ([], {}), '(**opts)\n', (1123, 1131), False, 'from setuptools import find_packages, setup\n'), ((982, 997), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (995, 997), False, 'from setuptools import find_packages, setup\n'), ((1023, 1044), 'Cython.Build.cythonize', 'cythonize', (['extensions'], {}), '(extensions)\n', (1032, 1044), False, 'from Cython.Build import cythonize\n'), ((273, 289), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (287, 289), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# encoding: utf-8
"""
my_test2.py
Created by <NAME> on 2011-06-06.
Copyright (c) 2011 University of Strathclyde. All rights reserved.
"""
from __future__ import division
import sys
import os
import numpy as np
import scipy.integrate as integral
def test(N, w):
# Parameters
pmin = 0
pmax = 1
# Types:
types = [np.random.uniform(pmin, pmax) for i in range(N)]
# Reputations:
rep = [np.random.uniform(pmin, pmax) for i in range(N)]
# Bids:
inv_prices = ((N-1)/pmax / (1-c/pmax)**(N-1) * integral.quad(lambda x,n=N: (1-x/pmax)**(N-2) / x, c, pmax)[0] for c in types)
prices = map(lambda x: 1/x, inv_prices)
my_prices = [max([prices[i], 0.5 + (1-w)/w * (0.5-rep[i])]) for i in range(N)]
# Compound bids:
my_bids = map(lambda x,y: w*x + (1-w)*y, my_prices, rep)
bids = map(lambda x,y: w*x + (1-w)*y, prices, rep)
# Utilities:
my_utility = []
if my_bids[0] < my_bids[1]:
my_utility.append(my_prices[0]-types[0])
my_utility.append(0)
else:
my_utility.append(0)
my_utility.append(my_prices[1]-types[1])
utility = []
if bids[0] < bids[1]:
utility.append(prices[0]-types[0])
utility.append(0)
else:
utility.append(0)
utility.append(prices[1]-types[1])
# Results:
print("Types and reputations")
print("Bidder 1: t1={0}, r1={1}".format(types[0], rep[0]))
print("Bidder 2: t2={0}, r2={1}".format(types[1], rep[1]))
print("\nMy bidding strategy")
print("Bidder 1: p1={0}, b1={1}, u1={2}".format(my_prices[0], my_bids[0], my_utility[0]))
print("Bidder 2: p2={0}, b2={1}, u2={2}".format(my_prices[1], my_bids[1], my_utility[1]))
print("\nStandard FPA bidding strategy")
print("Bidder 1: p1={0}, b1={1}, u1={2}".format(prices[0], bids[0], utility[0]))
print("Bidder 2: p2={0}, b2={1}, u2={2}".format(prices[1], bids[1], utility[1]))
if __name__ == '__main__':
test(2, 0.1)
| [
"numpy.random.uniform",
"scipy.integrate.quad"
] | [((340, 369), 'numpy.random.uniform', 'np.random.uniform', (['pmin', 'pmax'], {}), '(pmin, pmax)\n', (357, 369), True, 'import numpy as np\n'), ((413, 442), 'numpy.random.uniform', 'np.random.uniform', (['pmin', 'pmax'], {}), '(pmin, pmax)\n', (430, 442), True, 'import numpy as np\n'), ((519, 587), 'scipy.integrate.quad', 'integral.quad', (['(lambda x, n=N: (1 - x / pmax) ** (N - 2) / x)', 'c', 'pmax'], {}), '(lambda x, n=N: (1 - x / pmax) ** (N - 2) / x, c, pmax)\n', (532, 587), True, 'import scipy.integrate as integral\n')] |
from collections import Counter
from shutil import copyfile
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.decomposition import TruncatedSVD
import _pickle as pickle
import re
import path
import os
import numpy as np
import sys
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
train_data=[]
test_data=[]
class_train_flag=[]
class_test_flag=[]
num_files=[]
num_classes = 0
vocabulary = set()
path_test = '../q2data/test'
path_train = '../q2data/train'
def read_freq_file(path):
with open(path, errors='ignore') as f1:
shop = f1.read()
regex = r'\b\w+\b'
list1=re.findall(regex,shop)
list1 = [x for x in list1 if not any(c.isdigit() for c in x)]
c=Counter(x.strip() for x in list1)
return c
def create_test_data():
global num_classes
global num_files
global train_data
global test_data
global class_train_flag
global class_test_flag
num_classes = len([f for f in os.listdir(os.path.join(path_train)) ])
num_files = [0]*num_classes
for class_no in range(0,num_classes):
for f in os.listdir(os.path.join(path_test,str(class_no).zfill(1))):
# print(f)
os.remove(os.path.join(path_test,str(class_no).zfill(1), f))
num_files[class_no] = len([f for f in os.listdir(os.path.join(path_train,str(class_no).zfill(1))) \
if os.path.isfile(os.path.join(path_train,str(class_no).zfill(1), f))])
for file_no in range(int(num_files[class_no]*0.8)+1,num_files[class_no]+1):
copyfile(os.path.join(path_train,str(class_no).zfill(1),str(file_no).zfill(3)+'.txt'),\
os.path.join(path_test,str(class_no).zfill(1),str(file_no).zfill(3)+'.txt'))
for f in os.listdir(os.path.join(path_test,str(class_no).zfill(1))):
test_data.append(read_freq_file(os.path.join(path_test,str(class_no).zfill(1),f)))
class_test_flag.append(class_no)
for file_no in range(1,int(num_files[class_no]*0.8)+1):
train_data.append(read_freq_file(os.path.join(path_train,str(class_no).zfill(1),str(file_no).zfill(3)+'.txt')))
class_train_flag.append(class_no)
def build_lexicon(train_data):
lexicon = set()
for doc in train_data:
lexicon.update([x for x in doc])
filtered_words = [word for word in lexicon if word not in stopwords.words('english')]
return filtered_words
def tf(term, document):
if term in document:
return document[term]
else:
return 0
def find_dim(threshold, Vt , s):
num_dim=0
# pc = U*np.diag(s)
# pc = pc[:,::-1]
# explained_variance = np.var(pc, axis=0)
# full_variance = np.var(tf_idf_matrix,axis=0)
# explained_variance_ratio = explained_variance / full_variance.sum()
# print(explained_variance_ratio.cumsum())
cumsum = (s/s.sum()).cumsum()
# cumsum = explained_variance_ratio.cumsum()
# print(cumsum)
# print(threshold)
for i in range(0,len(s)):
num_dim+=1
if cumsum[i]>threshold:
break
# num_dim = cumsum[np.where(cumsum>threshold)].shape[1]
print('Number of Dimensions for threshold-> ',threshold, '\t',num_dim)
return Vt[0:num_dim,:].T
# B = U[:,0:num_dim]*((np.diag(s))[0:num_dim,0:num_dim])
# B_test = test_tfidf*V[0:num_dim,:].T
# print (s)
# print (V)
def sign(num):
if np.asscalar(num) >0:
return 1
else:
return -1
def OVAPerceptronTraining(x_train, y_train):
num_obs = len(x_train)
a,num_feat = x_train[0].shape
output=[]
for i in range(0,num_classes):
output.append(np.zeros(num_feat))
c1=0
c2=0
for i in range(0,num_obs):
actual = y_train[i]
pred = 0
prod_max = np.asscalar(np.dot(x_train[i],output[0].T))
for class_no in range(0,num_classes):
prod = np.asscalar(np.dot(x_train[i],output[class_no].T))
if prod > prod_max:
pred = class_no
prod_max = prod
if pred != actual:
output[actual] = output[actual]+x_train[i]
output[pred] = output[pred]-x_train[i]
c1+=1
else:
c2+=1
print(c1,c2)
return output
def OVAPerceptronTesting(output, x_test, y_test):
correct_v=0
incorrect_v=0
for ind in range(0,len(x_test)):
pred = 0
actual = y_test[ind]
prod_max=0
w = output[0]
prod_max=np.asscalar(np.dot(x_test[ind],w.T))
for class_number in range(0,num_classes):
w = output[class_number]
prod=np.asscalar(np.dot(x_test[ind],w.T))
if prod > prod_max:
pred = class_number
prod_max = prod
if pred == actual:
correct_v+=1
else:
incorrect_v+=1
print(correct_v, incorrect_v)
return float(float(correct_v)/float(incorrect_v+correct_v))
def cosine_similarity( x_train, y_train, x_test, y_test):
correct_v=0
incorrect_v=0
for test_doc in range(0,len(x_test)):
cosines=[]
for train_doc in range(0,len(x_train)):
a=x_train[train_doc]
b=x_test[test_doc]
moda = np.linalg.norm(a)
modb = np.linalg.norm(b)
cosines.append((y_train[train_doc], np.dot(a,b.T)/moda/modb))
top_10 = sorted(cosines, key=lambda x: x[1], reverse = True)[0:10]
# print (top_10)
count=[0]*num_classes
for i in top_10:
count[i[0]]+=1
pred = count.index(max(count))
if pred == y_test[test_doc]:
correct_v+=1
else:
incorrect_v+=1
return float(float(correct_v)/float(incorrect_v+correct_v))
def perform_tfidf(train_data, test_data, vocabulary):
doc_term_matrix = []
for doc in train_data+test_data:
tf_vector = [tf(word, doc) for word in vocabulary]
doc_term_matrix.append(tf_vector)
doc_term_matrix = np.array(doc_term_matrix)
tfidf = TfidfTransformer(norm="l2")
tfidf.fit(doc_term_matrix)
tf_idf_matrix = np.matrix(tfidf.transform(doc_term_matrix).toarray())
tf_idf_matrix-=np.matrix((np.mean(tf_idf_matrix, 0)))
U, s, V = np.linalg.svd( tf_idf_matrix , full_matrices=False)
return s, V
# return s,V [0:num_dim,:].T
def perform_tfidf_validation(train_data, test_data, vocabulary):
doc_term_matrix = []
for doc in train_data:
tf_vector = [tf(word, doc) for word in vocabulary]
doc_term_matrix.append(tf_vector)
doc_term_matrix = np.array(doc_term_matrix)
tfidf = TfidfTransformer(norm="l2")
tfidf.fit(doc_term_matrix)
doc_term_matrix = doc_term_matrix[0:len(train_data)]
tf_idf_matrix = np.matrix(tfidf.transform(doc_term_matrix).toarray())
tf_idf_matrix-=np.matrix((np.mean(tf_idf_matrix, 0)))
return tf_idf_matrix, tfidf
def perform_transform(test_data, tfidf, vocabulary):
doc_term_matrix = []
for doc in test_data:
tf_vector = [tf(word, doc) for word in vocabulary]
doc_term_matrix.append(tf_vector)
doc_term_matrix = np.array(doc_term_matrix)
tf_idf_matrix = np.matrix(tfidf.transform(doc_term_matrix).toarray())
tf_idf_matrix-=np.matrix((np.mean(tf_idf_matrix, 0)))
return tf_idf_matrix
def dimension_reduction(V, Matrix):
return Matrix*V
def perform_tfidf_query(train_data):
vocabulary = build_lexicon(train_data)
doc_term_matrix = []
for doc in train_data:
tf_vector = [tf(word, doc) for word in vocabulary]
doc_term_matrix.append(tf_vector)
doc_term_matrix = np.array(doc_term_matrix)
tfidf = TfidfTransformer(norm="l2")
tfidf.fit(doc_term_matrix)
tf_idf_matrix = np.matrix(tfidf.transform(doc_term_matrix).toarray())
tf_idf_matrix-=np.matrix((np.mean(tf_idf_matrix, 0)))
U, s, V = np.linalg.svd( tf_idf_matrix , full_matrices=False)
return tf_idf_matrix, tfidf, vocabulary, V, s
# return s,V [0:num_dim,:].T
def cosine_similarity_query(x_train, y_train, x_test):
for test_doc in range(0,len(x_test)):
cosines=[]
for train_doc in range(0,len(x_train)):
a=x_train[train_doc]
b=x_test[test_doc]
moda = np.linalg.norm(a)
modb = np.linalg.norm(b)
cosines.append((y_train[train_doc], np.dot(a,b.T)/moda/modb))
top_10 = sorted(cosines, key=lambda x: x[1], reverse = True)[0:10]
count=[0]*num_classes
for i in top_10:
count[i[0]]+=1
pred = count.index(max(count))
return pred
return 0
def create_test_data_query():
global num_classes
global num_files
global train_data
global test_data
global class_train_flag
global class_test_flag
num_classes = len([f for f in os.listdir(os.path.join(path_train)) ])
num_files = [0]*num_classes
for class_no in range(0,num_classes):
num_files[class_no] = len([f for f in os.listdir(os.path.join(path_train,str(class_no).zfill(1))) \
if os.path.isfile(os.path.join(path_train,str(class_no).zfill(1), f))])
for file_no in range(1,int(num_files[class_no])+1):
train_data.append(read_freq_file(os.path.join(path_train,str(class_no).zfill(1),str(file_no).zfill(3)+'.txt')))
class_train_flag.append(class_no)
# if __name__ == "__main__":
if sys.argv[1]=='1':
if len(sys.argv)>=3:
path_train = sys.argv[2]
if len(sys.argv)>=4:
path_test = sys.argv[3]
create_test_data()
vocabulary = build_lexicon(train_data)
s, Vt = perform_tfidf(train_data, test_data, vocabulary)
B, tfidf = perform_tfidf_validation(train_data, test_data, vocabulary)
B_test = perform_transform(test_data, tfidf, vocabulary)
with open("Save_SVD.data",'wb') as fp:
pickle.dump(s,fp)
pickle.dump(Vt,fp)
pickle.dump(B,fp)
pickle.dump(B_test,fp)
elif sys.argv[1]=='2':
print("Threshold\tNum Dim")
with open("Save_SVD.data",'rb') as fp:
s=pickle.load(fp)
Vt=pickle.load(fp)
for threshold in range(50,110,5):
find_dim(float(threshold/100),Vt , s)
elif sys.argv[1]=='3':
if len(sys.argv)>=3:
path_train = sys.argv[2]
if len(sys.argv)>=4:
path_test = sys.argv[3]
create_test_data()
vocabulary = build_lexicon(train_data)
with open("Save_SVD.data",'rb') as fp:
s=pickle.load(fp)
Vt=pickle.load(fp)
B=pickle.load(fp)
B_test=pickle.load(fp)
X_plot=[]
Y_plotp=[]
Y_plotc=[]
print('threshold\taccuracy_perceptron\taccuracy_cosine')
for threshold in range(60,101,5):
V = find_dim(float(threshold/100),Vt , s)
B_transformed = dimension_reduction(V, B)
B_test_transformed = dimension_reduction(V, B_test)
x_train=[]
y_train=[]
for ind in range(0,len(B_transformed)):
# x_train.append(np.append(np.squeeze(B_transformed[ind]),np.ones((1,1)), axis=1))
x_train.append(np.squeeze(B_transformed[ind]))
y_train.append(class_train_flag[ind])
model = OVAPerceptronTraining(x_train, y_train)
x_test=[]
y_test=[]
for ind in range(0,len(B_test_transformed)):
# x_test.append(np.append(np.squeeze(B_test_transformed[ind]),np.ones((1,1)), axis=1))
x_test.append(np.squeeze(B_test_transformed[ind]))
# x_test.append(np.squeeze(B_test_transformed[ind]))
y_test.append(class_test_flag[ind])
accuracy_perceptron = OVAPerceptronTesting(model, x_test, y_test)
accuracy_cosine = cosine_similarity(x_train, y_train, x_test, y_test)
print(threshold,'\t', accuracy_perceptron,'\t', accuracy_cosine)
X_plot.append(threshold)
Y_plotp.append(accuracy_perceptron*100)
Y_plotc.append(accuracy_cosine*100)
plt.plot(X_plot, Y_plotp, color = 'blue', label='Perceptron Accuracy')
plt.plot(X_plot, Y_plotc, color = 'black', label='Cosine Sim Accuracy')
for i in range(0,len(X_plot)):
plt.plot(X_plot[i], Y_plotp[i], 'ro')
for i in range(0,len(X_plot)):
plt.plot(X_plot[i], Y_plotc[i], 'ro')
plt.axis([50, 110, 60, 110])
plt.legend(loc='best')
plt.xlabel('Threshold (in %)', fontsize=18)
plt.ylabel('Accuracy', fontsize=18)
plt.show()
elif sys.argv[1]=='4':
if len(sys.argv)>=3:
path_train = sys.argv[2]
if len(sys.argv)>=4:
path_test = sys.argv[3]
if len(sys.argv)>=5:
class_already = int(sys.argv[4])
create_test_data_query()
query_file = read_freq_file(path_test)
tfidf_matrix, tfidf, vocabulary, Vt, s = perform_tfidf_query(train_data)
query_tfidf = perform_transform([query_file], tfidf, vocabulary)
V = find_dim(float(95/100),Vt , s)
train_data = dimension_reduction(V,tfidf_matrix)
query_tfidf = dimension_reduction(V,query_tfidf)
predicted_class = cosine_similarity_query(train_data, class_train_flag, query_tfidf)
print("Predicted Class-> ",predicted_class,'\n',"Query Class-> ", class_already) | [
"numpy.linalg.svd",
"numpy.mean",
"numpy.linalg.norm",
"numpy.asscalar",
"os.path.join",
"_pickle.load",
"re.findall",
"sklearn.feature_extraction.text.TfidfTransformer",
"matplotlib.pyplot.show",
"_pickle.dump",
"matplotlib.pyplot.legend",
"nltk.corpus.stopwords.words",
"numpy.squeeze",
"... | [((5324, 5349), 'numpy.array', 'np.array', (['doc_term_matrix'], {}), '(doc_term_matrix)\n', (5332, 5349), True, 'import numpy as np\n'), ((5360, 5387), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {'norm': '"""l2"""'}), "(norm='l2')\n", (5376, 5387), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((5556, 5605), 'numpy.linalg.svd', 'np.linalg.svd', (['tf_idf_matrix'], {'full_matrices': '(False)'}), '(tf_idf_matrix, full_matrices=False)\n', (5569, 5605), True, 'import numpy as np\n'), ((5881, 5906), 'numpy.array', 'np.array', (['doc_term_matrix'], {}), '(doc_term_matrix)\n', (5889, 5906), True, 'import numpy as np\n'), ((5916, 5943), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {'norm': '"""l2"""'}), "(norm='l2')\n", (5932, 5943), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((6396, 6421), 'numpy.array', 'np.array', (['doc_term_matrix'], {}), '(doc_term_matrix)\n', (6404, 6421), True, 'import numpy as np\n'), ((6861, 6886), 'numpy.array', 'np.array', (['doc_term_matrix'], {}), '(doc_term_matrix)\n', (6869, 6886), True, 'import numpy as np\n'), ((6896, 6923), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {'norm': '"""l2"""'}), "(norm='l2')\n", (6912, 6923), False, 'from sklearn.feature_extraction.text import TfidfTransformer\n'), ((7089, 7138), 'numpy.linalg.svd', 'np.linalg.svd', (['tf_idf_matrix'], {'full_matrices': '(False)'}), '(tf_idf_matrix, full_matrices=False)\n', (7102, 7138), True, 'import numpy as np\n'), ((673, 696), 're.findall', 're.findall', (['regex', 'shop'], {}), '(regex, shop)\n', (683, 696), False, 'import re\n'), ((3177, 3193), 'numpy.asscalar', 'np.asscalar', (['num'], {}), '(num)\n', (3188, 3193), True, 'import numpy as np\n'), ((5516, 5541), 'numpy.mean', 'np.mean', (['tf_idf_matrix', '(0)'], {}), '(tf_idf_matrix, 0)\n', (5523, 5541), True, 'import numpy as np\n'), ((6124, 6149), 'numpy.mean', 'np.mean', (['tf_idf_matrix', '(0)'], {}), '(tf_idf_matrix, 0)\n', (6131, 6149), True, 'import numpy as np\n'), ((6520, 6545), 'numpy.mean', 'np.mean', (['tf_idf_matrix', '(0)'], {}), '(tf_idf_matrix, 0)\n', (6527, 6545), True, 'import numpy as np\n'), ((7050, 7075), 'numpy.mean', 'np.mean', (['tf_idf_matrix', '(0)'], {}), '(tf_idf_matrix, 0)\n', (7057, 7075), True, 'import numpy as np\n'), ((8841, 8859), '_pickle.dump', 'pickle.dump', (['s', 'fp'], {}), '(s, fp)\n', (8852, 8859), True, 'import _pickle as pickle\n'), ((8864, 8883), '_pickle.dump', 'pickle.dump', (['Vt', 'fp'], {}), '(Vt, fp)\n', (8875, 8883), True, 'import _pickle as pickle\n'), ((8888, 8906), '_pickle.dump', 'pickle.dump', (['B', 'fp'], {}), '(B, fp)\n', (8899, 8906), True, 'import _pickle as pickle\n'), ((8911, 8934), '_pickle.dump', 'pickle.dump', (['B_test', 'fp'], {}), '(B_test, fp)\n', (8922, 8934), True, 'import _pickle as pickle\n'), ((3387, 3405), 'numpy.zeros', 'np.zeros', (['num_feat'], {}), '(num_feat)\n', (3395, 3405), True, 'import numpy as np\n'), ((3505, 3536), 'numpy.dot', 'np.dot', (['x_train[i]', 'output[0].T'], {}), '(x_train[i], output[0].T)\n', (3511, 3536), True, 'import numpy as np\n'), ((4068, 4092), 'numpy.dot', 'np.dot', (['x_test[ind]', 'w.T'], {}), '(x_test[ind], w.T)\n', (4074, 4092), True, 'import numpy as np\n'), ((4671, 4688), 'numpy.linalg.norm', 'np.linalg.norm', (['a'], {}), '(a)\n', (4685, 4688), True, 'import numpy as np\n'), ((4699, 4716), 'numpy.linalg.norm', 'np.linalg.norm', (['b'], {}), '(b)\n', (4713, 4716), True, 'import numpy as np\n'), ((7424, 7441), 'numpy.linalg.norm', 'np.linalg.norm', (['a'], {}), '(a)\n', (7438, 7441), True, 'import numpy as np\n'), ((7452, 7469), 'numpy.linalg.norm', 'np.linalg.norm', (['b'], {}), '(b)\n', (7466, 7469), True, 'import numpy as np\n'), ((9033, 9048), '_pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (9044, 9048), True, 'import _pickle as pickle\n'), ((9057, 9072), '_pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (9068, 9072), True, 'import _pickle as pickle\n'), ((10664, 10732), 'matplotlib.pyplot.plot', 'plt.plot', (['X_plot', 'Y_plotp'], {'color': '"""blue"""', 'label': '"""Perceptron Accuracy"""'}), "(X_plot, Y_plotp, color='blue', label='Perceptron Accuracy')\n", (10672, 10732), True, 'import matplotlib.pyplot as plt\n'), ((10736, 10805), 'matplotlib.pyplot.plot', 'plt.plot', (['X_plot', 'Y_plotc'], {'color': '"""black"""', 'label': '"""Cosine Sim Accuracy"""'}), "(X_plot, Y_plotc, color='black', label='Cosine Sim Accuracy')\n", (10744, 10805), True, 'import matplotlib.pyplot as plt\n'), ((10956, 10984), 'matplotlib.pyplot.axis', 'plt.axis', (['[50, 110, 60, 110]'], {}), '([50, 110, 60, 110])\n', (10964, 10984), True, 'import matplotlib.pyplot as plt\n'), ((10986, 11008), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (10996, 11008), True, 'import matplotlib.pyplot as plt\n'), ((11011, 11054), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Threshold (in %)"""'], {'fontsize': '(18)'}), "('Threshold (in %)', fontsize=18)\n", (11021, 11054), True, 'import matplotlib.pyplot as plt\n'), ((11056, 11091), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {'fontsize': '(18)'}), "('Accuracy', fontsize=18)\n", (11066, 11091), True, 'import matplotlib.pyplot as plt\n'), ((11093, 11103), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11101, 11103), True, 'import matplotlib.pyplot as plt\n'), ((2254, 2280), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2269, 2280), False, 'from nltk.corpus import stopwords\n'), ((3600, 3638), 'numpy.dot', 'np.dot', (['x_train[i]', 'output[class_no].T'], {}), '(x_train[i], output[class_no].T)\n', (3606, 3638), True, 'import numpy as np\n'), ((4186, 4210), 'numpy.dot', 'np.dot', (['x_test[ind]', 'w.T'], {}), '(x_test[ind], w.T)\n', (4192, 4210), True, 'import numpy as np\n'), ((9376, 9391), '_pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (9387, 9391), True, 'import _pickle as pickle\n'), ((9400, 9415), '_pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (9411, 9415), True, 'import _pickle as pickle\n'), ((9423, 9438), '_pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (9434, 9438), True, 'import _pickle as pickle\n'), ((9451, 9466), '_pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (9462, 9466), True, 'import _pickle as pickle\n'), ((10843, 10880), 'matplotlib.pyplot.plot', 'plt.plot', (['X_plot[i]', 'Y_plotp[i]', '"""ro"""'], {}), "(X_plot[i], Y_plotp[i], 'ro')\n", (10851, 10880), True, 'import matplotlib.pyplot as plt\n'), ((10916, 10953), 'matplotlib.pyplot.plot', 'plt.plot', (['X_plot[i]', 'Y_plotc[i]', '"""ro"""'], {}), "(X_plot[i], Y_plotc[i], 'ro')\n", (10924, 10953), True, 'import matplotlib.pyplot as plt\n'), ((1001, 1025), 'os.path.join', 'os.path.join', (['path_train'], {}), '(path_train)\n', (1013, 1025), False, 'import os\n'), ((7918, 7942), 'os.path.join', 'os.path.join', (['path_train'], {}), '(path_train)\n', (7930, 7942), False, 'import os\n'), ((9912, 9942), 'numpy.squeeze', 'np.squeeze', (['B_transformed[ind]'], {}), '(B_transformed[ind])\n', (9922, 9942), True, 'import numpy as np\n'), ((10215, 10250), 'numpy.squeeze', 'np.squeeze', (['B_test_transformed[ind]'], {}), '(B_test_transformed[ind])\n', (10225, 10250), True, 'import numpy as np\n'), ((4756, 4770), 'numpy.dot', 'np.dot', (['a', 'b.T'], {}), '(a, b.T)\n', (4762, 4770), True, 'import numpy as np\n'), ((7509, 7523), 'numpy.dot', 'np.dot', (['a', 'b.T'], {}), '(a, b.T)\n', (7515, 7523), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
import os
import time
import h5py
import sys
import numpy as np
import pandas as pd
from pprint import pprint
import matplotlib.pyplot as plt
from pygama import DataSet, read_lh5, get_lh5_header
import pygama.analysis.histograms as pgh
# new viewer for waveforms for llama / scarf tests.
# Largely "inspired" by Clint's processing.py
def main():
"""
Currently this is quite simple: just take a filename from argument
(ignoring the whole fancy run number stuff) and throws it to the plotter
"""
#process_data()
#plotWFS()
#exit(0)
try:
filename = sys.argv[1]
except:
print("You have to give a file name as argument!")
exit(0)
plotWFS(filename)
#plot_data(filename)
#testmethod(filename)
def plotWFS(filename):
print("enter channels to plot, e.g. 0 2 3 for plotting channels 0, 2 and 3 at the same time.")
user = input("enter channels> ")
channels = list(map(int,user.strip().split()))
print("will plot the following channels:")
print(channels)
hf = h5py.File(filename, "r")
nevt = hf['/daqdata/waveform/values/cumulative_length'].size
wfs = []
wfidx = hf["/daqdata/waveform/values/cumulative_length"] # where each wf starts
wfdata = hf["/daqdata/waveform/values/flattened_data"] # adc values
chunksize = 2000 if nevt > 2000 else nevt - 1
wfsel = np.arange(chunksize)
for iwf in wfsel:
ilo = wfidx[iwf]
ihi = wfidx[iwf+1] if iwf+1 < nevt else nevt
wfs.append(wfdata[ilo : ihi])
wfs = np.vstack(wfs)
plt.ion() #make plot non-blocking
while True:
index = int(input("enter index> "))
wfx = getWFEvent(hf, wfs, index, channels)
plt.clf()
for q in wfx:
plt.plot(q)
#for i in range(wfx.shape[0]):
# wf = wfx[i,:]
# plt.plot(np.arange(len(wf)), wf)
plt.tight_layout()
plt.show()
plt.pause(0.001)
hf.close()
def getWFEvent(hf, waveforms, index, channels):
current = [0] * len(channels)
i = 0
chIDs = hf["/daqdata/channel"]
wfList = []
while True:
try:
#print(i, chIDs[i], channels.index(chIDs[i]))
indexx = channels.index(chIDs[i])
except ValueError as e:
i+=1
continue
if current[indexx] == index:
wfList.append(waveforms[i])
current[indexx] += 1
i += 1
if len(wfList) == len(channels):
return wfList
def testmethod(filename):
hf = h5py.File(filename)
ts = hf['/daqdata/timestamp']
print(ts.size, ts[137], ts[138], ts[707])
#here data from the daw is stored
print(hf['/daqdata'])
#here header info is stored --> stuff that does not change btw events
# see http://docs.h5py.org/en/stable/high/attr.html#attributes
print(hf['/header'].attrs.get("file_name"), hf['/header'].attrs.get("nsamples"))
print(hf['/header'].attrs.get("file_name"))
print("The following metadata is available in the header:")
print(hf['/header'].attrs.keys())
plt.plot(ts)
plt.show()
def test2():
while(True):
user = input("tell me something> ")
print(user)
li = list(map(int,user.strip().split()))
print(li[2])
def plot_data(filename):
"""
read the lh5 output.
plot waveforms
Mostly written by Clint
"""
#filename = "/mnt/e15/schwarz/testdata_pg/scarf/tier1/t1_run2002.lh5"
df = get_lh5_header(filename)
#df = read_lh5(filename)
print(df)
#exit()
hf = h5py.File(filename)
# # 1. energy histogram
# wf_max = hf['/daqdata/wf_max'][...] # slice reads into memory
# wf_bl = hf['/daqdata/baseline'][...]
# wf_max = wf_max - wf_bl
# xlo, xhi, xpb = 0, 5000, 10
# hist, bins = pgh.get_hist(wf_max, range=(xlo, xhi), dx=xpb)
# plt.semilogy(bins, hist, ls='steps', c='b')
# plt.xlabel("Energy (uncal)", ha='right', x=1)
# plt.ylabel("Counts", ha='right', y=1)
# # plt.show()
# # exit()
# plt.cla()
# 2. energy vs time
# ts = hf['/daqdata/timestamp']
# plt.plot(ts, wf_max, '.b')
# plt.show()
# 3. waveforms
nevt = hf['/daqdata/waveform/values/cumulative_length'].size
# create a waveform block compatible w/ pygama
# and yeah, i know, for loops are inefficient. i'll optimize when it matters
wfs = []
wfidx = hf["/daqdata/waveform/values/cumulative_length"] # where each wf starts
wfdata = hf["/daqdata/waveform/values/flattened_data"] # adc values
wfsel = np.arange(2000)
for iwf in wfsel:
ilo = wfidx[iwf]
ihi = wfidx[iwf+1] if iwf+1 < nevt else nevt
wfs.append(wfdata[ilo : ihi])
wfs = np.vstack(wfs)
print("Shape of the waveforms:")
print(wfs.shape) # wfs on each row. will work w/ pygama.
# plot waveforms, flip polarity for fun
for i in range(wfs.shape[0]):
wf = wfs[i,:]
plt.plot(np.arange(len(wf)), wf)
plt.xlabel("clock ticks", ha='right', x=1)
plt.ylabel("adc", ha='right', y=1)
plt.tight_layout()
plt.show()
# plt.savefig(f"testdata_evt{ievt}.png")
hf.close()
if __name__=="__main__":
main()
| [
"h5py.File",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"pygama.get_lh5_header",
"matplotlib.pyplot.ion",
"numpy.arange",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.tight_layout",
"numpy.vstack"
] | [((1093, 1117), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (1102, 1117), False, 'import h5py\n'), ((1414, 1434), 'numpy.arange', 'np.arange', (['chunksize'], {}), '(chunksize)\n', (1423, 1434), True, 'import numpy as np\n'), ((1584, 1598), 'numpy.vstack', 'np.vstack', (['wfs'], {}), '(wfs)\n', (1593, 1598), True, 'import numpy as np\n'), ((1604, 1613), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1611, 1613), True, 'import matplotlib.pyplot as plt\n'), ((2590, 2609), 'h5py.File', 'h5py.File', (['filename'], {}), '(filename)\n', (2599, 2609), False, 'import h5py\n'), ((3134, 3146), 'matplotlib.pyplot.plot', 'plt.plot', (['ts'], {}), '(ts)\n', (3142, 3146), True, 'import matplotlib.pyplot as plt\n'), ((3151, 3161), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3159, 3161), True, 'import matplotlib.pyplot as plt\n'), ((3536, 3560), 'pygama.get_lh5_header', 'get_lh5_header', (['filename'], {}), '(filename)\n', (3550, 3560), False, 'from pygama import DataSet, read_lh5, get_lh5_header\n'), ((3645, 3664), 'h5py.File', 'h5py.File', (['filename'], {}), '(filename)\n', (3654, 3664), False, 'import h5py\n'), ((4657, 4672), 'numpy.arange', 'np.arange', (['(2000)'], {}), '(2000)\n', (4666, 4672), True, 'import numpy as np\n'), ((4822, 4836), 'numpy.vstack', 'np.vstack', (['wfs'], {}), '(wfs)\n', (4831, 4836), True, 'import numpy as np\n'), ((5091, 5133), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""clock ticks"""'], {'ha': '"""right"""', 'x': '(1)'}), "('clock ticks', ha='right', x=1)\n", (5101, 5133), True, 'import matplotlib.pyplot as plt\n'), ((5138, 5172), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""adc"""'], {'ha': '"""right"""', 'y': '(1)'}), "('adc', ha='right', y=1)\n", (5148, 5172), True, 'import matplotlib.pyplot as plt\n'), ((5177, 5195), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5193, 5195), True, 'import matplotlib.pyplot as plt\n'), ((5200, 5210), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5208, 5210), True, 'import matplotlib.pyplot as plt\n'), ((1760, 1769), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1767, 1769), True, 'import matplotlib.pyplot as plt\n'), ((1937, 1955), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1953, 1955), True, 'import matplotlib.pyplot as plt\n'), ((1964, 1974), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1972, 1974), True, 'import matplotlib.pyplot as plt\n'), ((1983, 1999), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (1992, 1999), True, 'import matplotlib.pyplot as plt\n'), ((1804, 1815), 'matplotlib.pyplot.plot', 'plt.plot', (['q'], {}), '(q)\n', (1812, 1815), True, 'import matplotlib.pyplot as plt\n')] |
import math
import os
import pathlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import garch as g
import garch_const_lambda as gc
import pricing as p
def compare(theta_hat,
risk_free_rate,
asset_price_t,
t=0,
T=90,
moneyness=1.0,
conditional_volatility_ratio=1.0):
# some consts
a0, a1, b1, risk_premium, sigma_0 = theta_hat
num_periods = T - t
num_simulations = 50000
strike_price = 100
asset_price_t = strike_price * moneyness
# some relevant computation
BS_sigma_sq = a0 / (1 - a1 - b1) # Black scholes' sigma squared
GARCH_stationary_variance = a0 / (1 - (1 + risk_premium ** 2) * a1 - b1) # stationary variance under GARCH and Local Risk Neutralization
GARCH_sigma_t = conditional_volatility_ratio * np.sqrt(GARCH_stationary_variance) # conditional variance
GARCH_asset_price_array_T, _, _ = p.compute_GARCH_price(theta=theta_hat,
num_periods=num_periods,
init_price=asset_price_t,
init_sigma=GARCH_sigma_t,
risk_free_rate=risk_free_rate,
num_simulations=num_simulations)
GARCH_asset_price_T = np.mean(GARCH_asset_price_array_T)
# strike_price = GARCH_asset_price_T / moneyness
# GARCH option pricing
GARCH_call_price, GARCH_call_price_array = p.compute_GARCH_call_price(sim_price_array=GARCH_asset_price_array_T,
strike_price=strike_price,
num_periods=num_periods,
risk_free_rate=risk_free_rate)
GARCH_call_delta, GARCH_call_delta_array = p.compute_GARCH_delta(sim_price_array=GARCH_asset_price_array_T,
strike_price=strike_price,
num_periods=num_periods,
current_price=asset_price_t,
risk_free_rate=risk_free_rate)
BS_call_price = p.compute_BS_call_price(sigma_sq=BS_sigma_sq,
current_price=asset_price_t,
strike_price=strike_price,
risk_free_rate=risk_free_rate,
num_periods=num_periods)
BS_call_delta = p.compute_BS_delta(sigma_sq=BS_sigma_sq,
current_price=asset_price_t,
strike_price=strike_price,
risk_free_rate=risk_free_rate,
num_periods=num_periods)
# construct comparison result
# option price
price_bias_percent_mean = np.mean((GARCH_call_price_array - BS_call_price) / BS_call_price * 100)
price_bias_percent_std = np.std((GARCH_call_price_array - BS_call_price) / BS_call_price)
# option delta
delta_bias_percent_mean = np.mean((GARCH_call_delta_array - BS_call_delta) / BS_call_delta * 100)
delta_bias_percent_std = np.std((GARCH_call_delta_array - BS_call_delta) / BS_call_delta)
# implied volatility
GARCH_iv = p.compute_BS_implied_volatility(call_option_price=GARCH_call_price,
current_price=asset_price_t,
strike_price=strike_price,
risk_free_rate=risk_free_rate,
num_periods=num_periods,
tolerance=1e-5,
max_iterations=1e5)
# result as dict for easy DF later
result = {
"t": t,
"T": T,
"moneyness": moneyness,
"conditional_volatility_ratio": conditional_volatility_ratio,
"GARCH_asset_price": GARCH_asset_price_T,
# "strike_price": strike_price,
# "GARCH_stationary_sigma": np.sqrt(GARCH_stationary_variance),
# "GARCH_sigma_t": GARCH_sigma_t,
"price_BS": BS_call_price,
"price_GARCH": GARCH_call_price,
"price_bias_mean": price_bias_percent_mean,
"price_bias_std": price_bias_percent_std,
"delta_BS": BS_call_delta,
"delta_GARCH": GARCH_call_delta,
"delta_bias_mean": delta_bias_percent_mean,
"delta_bias_std": delta_bias_percent_std,
"iv_GARCH": np.sqrt(GARCH_iv * 365)
}
return result
def main():
# get data, using BMW data
data_path = 'data/BMW.csv'
price_df = pd.read_csv(data_path,
sep=';',
decimal=',',
parse_dates=['Datum'])
price_array = price_df['Schlusskurs'].astype(float).values
price_array = np.flip(price_array)[-500:]
date_array = np.flip(price_df['Datum'].values)[-500:]
print(f'Start date: {np.min(date_array)}')
print(f'End date: {np.max(date_array)}')
print(f'Duration: {len(date_array)}')
return_array = g.compute_log_return(price_array)
# define some consts
RISK_PREMIUM_AS_CONSTANT = True
risk_free_rate = 0.05 / 365
risk_premium = 0.001
# GARCH(1, 1) parameter estimation
# GARCH(1, 1) parameter: (alpha_0, alpha_1, beta_1, lambda, sigma)
if RISK_PREMIUM_AS_CONSTANT:
theta_hat = gc.estimate_GARCH_11_theta(return_array, risk_premium, risk_free_rate)
a0, a1, b1, sigma_0 = theta_hat
theta_hat = (a0, a1, b1, risk_premium, sigma_0) # rearrange for easier input in later functions
else:
theta_hat = g.estimate_GARCH_11_theta(return_array, risk_free_rate)
# set up context for comparison
t = 0 # current time wrt call option creation date
T_comp = (30, 90, 180) # option lengths
moneyness_comp = np.arange(0.7, 1.3, 0.001) # asset price at T / strike price
volatility_ratio = (0.8, 1.0, 1.2) # sqrt(h_t) / stationary sigma
settings = []
for _T in T_comp:
for _m in moneyness_comp:
for _v in volatility_ratio:
settings.append((_T, _m, _v))
result_list = []
for setting in settings:
T, moneyness, conditional_volatility_ratio = setting
num_periods = T - t # time-to-maturity
asset_price_t = price_array[-num_periods]
res = compare(theta_hat=theta_hat,
risk_free_rate=risk_free_rate,
asset_price_t=asset_price_t,
t=t,
T=T,
moneyness=moneyness,
conditional_volatility_ratio=conditional_volatility_ratio)
print(res)
result_list.append(res)
result_df = pd.DataFrame(result_list,)
result_df = (result_df
.set_index(['T', 'moneyness', 'conditional_volatility_ratio', 'price_BS', 'delta_BS'])
)
result_df_save = (result_df
.reset_index(2)
.drop('t', axis=1)
.pivot(columns='conditional_volatility_ratio')
.reorder_levels([1, 0], axis=1)
.sort_index(axis=1, level=0)
)
print(result_df_save.filter(like='0.8', axis=1))
print(result_df_save.filter(like='1.0', axis=1))
print(result_df_save.filter(like='1.2', axis=1))
# save to csv
data_folder, data_filename = data_path.split('/')
result_filename = 'result_' + data_filename
result_path = os.path.join(data_folder, result_filename)
result_df_save.to_csv(result_path, sep='\t')
# plot implied volatility
# prepare data to plot
iv_df_low_vol = (result_df
.reset_index()
[result_df.reset_index()['conditional_volatility_ratio'] == 0.8]
[["T", "conditional_volatility_ratio", "moneyness", "iv_GARCH"]])
iv_df_high_vol = (result_df
.reset_index()
[result_df.reset_index()['conditional_volatility_ratio'] == 1.2]
[["T", "conditional_volatility_ratio", "moneyness", "iv_GARCH"]])
# actual plotting
# low vol
low_fig_filename = result_filename.split('.')[0] + '_low_cond_vol.png'
low_fig_filepath = os.path.join(data_folder, low_fig_filename)
fig, ax = plt.subplots(figsize=(8, 6))
sns.lineplot(data=iv_df_low_vol,
x="moneyness",
y="iv_GARCH",
hue='T',
ci=None,
ax=ax)
ax.set_xlabel('Moneyness')
ax.set_ylabel('GARCH Implied Volatility by Black-Scholes formula')
plt.savefig(low_fig_filepath, dpi=200)
# low vol
high_fig_filename = result_filename.split('.')[0] + '_high_cond_vol.png'
high_fig_filepath = os.path.join(data_folder, high_fig_filename)
fig, ax = plt.subplots(figsize=(8, 6))
sns.lineplot(data=iv_df_high_vol,
x="moneyness",
y="iv_GARCH",
hue='T',
ci=None,
ax=ax)
ax.set_xlabel('Moneyness')
ax.set_ylabel('GARCH Implied Volatility by Black-Scholes formula')
plt.savefig(high_fig_filepath, dpi=200)
return result_df
if __name__ == "__main__":
main() | [
"seaborn.lineplot",
"pandas.read_csv",
"pricing.compute_GARCH_delta",
"garch_const_lambda.estimate_GARCH_11_theta",
"numpy.mean",
"numpy.arange",
"pricing.compute_GARCH_call_price",
"os.path.join",
"pandas.DataFrame",
"numpy.std",
"numpy.max",
"pricing.compute_BS_call_price",
"matplotlib.pyp... | [((981, 1169), 'pricing.compute_GARCH_price', 'p.compute_GARCH_price', ([], {'theta': 'theta_hat', 'num_periods': 'num_periods', 'init_price': 'asset_price_t', 'init_sigma': 'GARCH_sigma_t', 'risk_free_rate': 'risk_free_rate', 'num_simulations': 'num_simulations'}), '(theta=theta_hat, num_periods=num_periods, init_price=\n asset_price_t, init_sigma=GARCH_sigma_t, risk_free_rate=risk_free_rate,\n num_simulations=num_simulations)\n', (1002, 1169), True, 'import pricing as p\n'), ((1487, 1521), 'numpy.mean', 'np.mean', (['GARCH_asset_price_array_T'], {}), '(GARCH_asset_price_array_T)\n', (1494, 1521), True, 'import numpy as np\n'), ((1654, 1815), 'pricing.compute_GARCH_call_price', 'p.compute_GARCH_call_price', ([], {'sim_price_array': 'GARCH_asset_price_array_T', 'strike_price': 'strike_price', 'num_periods': 'num_periods', 'risk_free_rate': 'risk_free_rate'}), '(sim_price_array=GARCH_asset_price_array_T,\n strike_price=strike_price, num_periods=num_periods, risk_free_rate=\n risk_free_rate)\n', (1680, 1815), True, 'import pricing as p\n'), ((2076, 2261), 'pricing.compute_GARCH_delta', 'p.compute_GARCH_delta', ([], {'sim_price_array': 'GARCH_asset_price_array_T', 'strike_price': 'strike_price', 'num_periods': 'num_periods', 'current_price': 'asset_price_t', 'risk_free_rate': 'risk_free_rate'}), '(sim_price_array=GARCH_asset_price_array_T,\n strike_price=strike_price, num_periods=num_periods, current_price=\n asset_price_t, risk_free_rate=risk_free_rate)\n', (2097, 2261), True, 'import pricing as p\n'), ((2549, 2715), 'pricing.compute_BS_call_price', 'p.compute_BS_call_price', ([], {'sigma_sq': 'BS_sigma_sq', 'current_price': 'asset_price_t', 'strike_price': 'strike_price', 'risk_free_rate': 'risk_free_rate', 'num_periods': 'num_periods'}), '(sigma_sq=BS_sigma_sq, current_price=asset_price_t,\n strike_price=strike_price, risk_free_rate=risk_free_rate, num_periods=\n num_periods)\n', (2572, 2715), True, 'import pricing as p\n'), ((2903, 3064), 'pricing.compute_BS_delta', 'p.compute_BS_delta', ([], {'sigma_sq': 'BS_sigma_sq', 'current_price': 'asset_price_t', 'strike_price': 'strike_price', 'risk_free_rate': 'risk_free_rate', 'num_periods': 'num_periods'}), '(sigma_sq=BS_sigma_sq, current_price=asset_price_t,\n strike_price=strike_price, risk_free_rate=risk_free_rate, num_periods=\n num_periods)\n', (2921, 3064), True, 'import pricing as p\n'), ((3300, 3371), 'numpy.mean', 'np.mean', (['((GARCH_call_price_array - BS_call_price) / BS_call_price * 100)'], {}), '((GARCH_call_price_array - BS_call_price) / BS_call_price * 100)\n', (3307, 3371), True, 'import numpy as np\n'), ((3401, 3465), 'numpy.std', 'np.std', (['((GARCH_call_price_array - BS_call_price) / BS_call_price)'], {}), '((GARCH_call_price_array - BS_call_price) / BS_call_price)\n', (3407, 3465), True, 'import numpy as np\n'), ((3515, 3586), 'numpy.mean', 'np.mean', (['((GARCH_call_delta_array - BS_call_delta) / BS_call_delta * 100)'], {}), '((GARCH_call_delta_array - BS_call_delta) / BS_call_delta * 100)\n', (3522, 3586), True, 'import numpy as np\n'), ((3616, 3680), 'numpy.std', 'np.std', (['((GARCH_call_delta_array - BS_call_delta) / BS_call_delta)'], {}), '((GARCH_call_delta_array - BS_call_delta) / BS_call_delta)\n', (3622, 3680), True, 'import numpy as np\n'), ((3726, 3960), 'pricing.compute_BS_implied_volatility', 'p.compute_BS_implied_volatility', ([], {'call_option_price': 'GARCH_call_price', 'current_price': 'asset_price_t', 'strike_price': 'strike_price', 'risk_free_rate': 'risk_free_rate', 'num_periods': 'num_periods', 'tolerance': '(1e-05)', 'max_iterations': '(100000.0)'}), '(call_option_price=GARCH_call_price,\n current_price=asset_price_t, strike_price=strike_price, risk_free_rate=\n risk_free_rate, num_periods=num_periods, tolerance=1e-05,\n max_iterations=100000.0)\n', (3757, 3960), True, 'import pricing as p\n'), ((5140, 5207), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {'sep': '""";"""', 'decimal': '""","""', 'parse_dates': "['Datum']"}), "(data_path, sep=';', decimal=',', parse_dates=['Datum'])\n", (5151, 5207), True, 'import pandas as pd\n'), ((5619, 5652), 'garch.compute_log_return', 'g.compute_log_return', (['price_array'], {}), '(price_array)\n', (5639, 5652), True, 'import garch as g\n'), ((6409, 6435), 'numpy.arange', 'np.arange', (['(0.7)', '(1.3)', '(0.001)'], {}), '(0.7, 1.3, 0.001)\n', (6418, 6435), True, 'import numpy as np\n'), ((7303, 7328), 'pandas.DataFrame', 'pd.DataFrame', (['result_list'], {}), '(result_list)\n', (7315, 7328), True, 'import pandas as pd\n'), ((8095, 8137), 'os.path.join', 'os.path.join', (['data_folder', 'result_filename'], {}), '(data_folder, result_filename)\n', (8107, 8137), False, 'import os\n'), ((8865, 8908), 'os.path.join', 'os.path.join', (['data_folder', 'low_fig_filename'], {}), '(data_folder, low_fig_filename)\n', (8877, 8908), False, 'import os\n'), ((8923, 8951), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (8935, 8951), True, 'import matplotlib.pyplot as plt\n'), ((8956, 9047), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'iv_df_low_vol', 'x': '"""moneyness"""', 'y': '"""iv_GARCH"""', 'hue': '"""T"""', 'ci': 'None', 'ax': 'ax'}), "(data=iv_df_low_vol, x='moneyness', y='iv_GARCH', hue='T', ci=\n None, ax=ax)\n", (8968, 9047), True, 'import seaborn as sns\n'), ((9234, 9272), 'matplotlib.pyplot.savefig', 'plt.savefig', (['low_fig_filepath'], {'dpi': '(200)'}), '(low_fig_filepath, dpi=200)\n', (9245, 9272), True, 'import matplotlib.pyplot as plt\n'), ((9393, 9437), 'os.path.join', 'os.path.join', (['data_folder', 'high_fig_filename'], {}), '(data_folder, high_fig_filename)\n', (9405, 9437), False, 'import os\n'), ((9452, 9480), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (9464, 9480), True, 'import matplotlib.pyplot as plt\n'), ((9485, 9577), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'iv_df_high_vol', 'x': '"""moneyness"""', 'y': '"""iv_GARCH"""', 'hue': '"""T"""', 'ci': 'None', 'ax': 'ax'}), "(data=iv_df_high_vol, x='moneyness', y='iv_GARCH', hue='T', ci=\n None, ax=ax)\n", (9497, 9577), True, 'import seaborn as sns\n'), ((9764, 9803), 'matplotlib.pyplot.savefig', 'plt.savefig', (['high_fig_filepath'], {'dpi': '(200)'}), '(high_fig_filepath, dpi=200)\n', (9775, 9803), True, 'import matplotlib.pyplot as plt\n'), ((885, 919), 'numpy.sqrt', 'np.sqrt', (['GARCH_stationary_variance'], {}), '(GARCH_stationary_variance)\n', (892, 919), True, 'import numpy as np\n'), ((4996, 5019), 'numpy.sqrt', 'np.sqrt', (['(GARCH_iv * 365)'], {}), '(GARCH_iv * 365)\n', (5003, 5019), True, 'import numpy as np\n'), ((5375, 5395), 'numpy.flip', 'np.flip', (['price_array'], {}), '(price_array)\n', (5382, 5395), True, 'import numpy as np\n'), ((5420, 5453), 'numpy.flip', 'np.flip', (["price_df['Datum'].values"], {}), "(price_df['Datum'].values)\n", (5427, 5453), True, 'import numpy as np\n'), ((5947, 6017), 'garch_const_lambda.estimate_GARCH_11_theta', 'gc.estimate_GARCH_11_theta', (['return_array', 'risk_premium', 'risk_free_rate'], {}), '(return_array, risk_premium, risk_free_rate)\n', (5973, 6017), True, 'import garch_const_lambda as gc\n'), ((6192, 6247), 'garch.estimate_GARCH_11_theta', 'g.estimate_GARCH_11_theta', (['return_array', 'risk_free_rate'], {}), '(return_array, risk_free_rate)\n', (6217, 6247), True, 'import garch as g\n'), ((5486, 5504), 'numpy.min', 'np.min', (['date_array'], {}), '(date_array)\n', (5492, 5504), True, 'import numpy as np\n'), ((5531, 5549), 'numpy.max', 'np.max', (['date_array'], {}), '(date_array)\n', (5537, 5549), True, 'import numpy as np\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import librosa
import numpy as np
import paddle
import torch
from parallel_wavegan.losses import stft_loss as sl
from scipy import signal
from paddlespeech.t2s.modules.stft_loss import MultiResolutionSTFTLoss
from paddlespeech.t2s.modules.stft_loss import STFT
def test_stft():
stft = STFT(n_fft=1024, hop_length=256, win_length=1024)
x = paddle.uniform([4, 46080])
S = stft.magnitude(x)
window = signal.get_window('hann', 1024, fftbins=True)
D2 = torch.stft(
torch.as_tensor(x.numpy()),
n_fft=1024,
hop_length=256,
win_length=1024,
window=torch.as_tensor(window))
S2 = (D2**2).sum(-1).sqrt()
S3 = np.abs(
librosa.stft(x.numpy()[0], n_fft=1024, hop_length=256, win_length=1024))
print(S2.shape)
print(S.numpy()[0])
print(S2.data.cpu().numpy()[0])
print(S3)
def test_torch_stft():
# NOTE: torch.stft use no window by default
x = np.random.uniform(-1.0, 1.0, size=(46080, ))
window = signal.get_window('hann', 1024, fftbins=True)
D2 = torch.stft(
torch.as_tensor(x),
n_fft=1024,
hop_length=256,
win_length=1024,
window=torch.as_tensor(window))
D3 = librosa.stft(
x, n_fft=1024, hop_length=256, win_length=1024, window='hann')
print(D2[:, :, 0].data.cpu().numpy()[:, 30:60])
print(D3.real[:, 30:60])
# print(D3.imag[:, 30:60])
def test_multi_resolution_stft_loss():
net = MultiResolutionSTFTLoss()
net2 = sl.MultiResolutionSTFTLoss()
x = paddle.uniform([4, 46080])
y = paddle.uniform([4, 46080])
sc, m = net(x, y)
sc2, m2 = net2(torch.as_tensor(x.numpy()), torch.as_tensor(y.numpy()))
print(sc.numpy())
print(sc2.data.cpu().numpy())
print(m.numpy())
print(m2.data.cpu().numpy())
| [
"numpy.random.uniform",
"paddlespeech.t2s.modules.stft_loss.STFT",
"scipy.signal.get_window",
"paddlespeech.t2s.modules.stft_loss.MultiResolutionSTFTLoss",
"parallel_wavegan.losses.stft_loss.MultiResolutionSTFTLoss",
"torch.as_tensor",
"paddle.uniform",
"librosa.stft"
] | [((902, 951), 'paddlespeech.t2s.modules.stft_loss.STFT', 'STFT', ([], {'n_fft': '(1024)', 'hop_length': '(256)', 'win_length': '(1024)'}), '(n_fft=1024, hop_length=256, win_length=1024)\n', (906, 951), False, 'from paddlespeech.t2s.modules.stft_loss import STFT\n'), ((960, 986), 'paddle.uniform', 'paddle.uniform', (['[4, 46080]'], {}), '([4, 46080])\n', (974, 986), False, 'import paddle\n'), ((1026, 1071), 'scipy.signal.get_window', 'signal.get_window', (['"""hann"""', '(1024)'], {'fftbins': '(True)'}), "('hann', 1024, fftbins=True)\n", (1043, 1071), False, 'from scipy import signal\n'), ((1543, 1586), 'numpy.random.uniform', 'np.random.uniform', (['(-1.0)', '(1.0)'], {'size': '(46080,)'}), '(-1.0, 1.0, size=(46080,))\n', (1560, 1586), True, 'import numpy as np\n'), ((1601, 1646), 'scipy.signal.get_window', 'signal.get_window', (['"""hann"""', '(1024)'], {'fftbins': '(True)'}), "('hann', 1024, fftbins=True)\n", (1618, 1646), False, 'from scipy import signal\n'), ((1814, 1889), 'librosa.stft', 'librosa.stft', (['x'], {'n_fft': '(1024)', 'hop_length': '(256)', 'win_length': '(1024)', 'window': '"""hann"""'}), "(x, n_fft=1024, hop_length=256, win_length=1024, window='hann')\n", (1826, 1889), False, 'import librosa\n'), ((2062, 2087), 'paddlespeech.t2s.modules.stft_loss.MultiResolutionSTFTLoss', 'MultiResolutionSTFTLoss', ([], {}), '()\n', (2085, 2087), False, 'from paddlespeech.t2s.modules.stft_loss import MultiResolutionSTFTLoss\n'), ((2099, 2127), 'parallel_wavegan.losses.stft_loss.MultiResolutionSTFTLoss', 'sl.MultiResolutionSTFTLoss', ([], {}), '()\n', (2125, 2127), True, 'from parallel_wavegan.losses import stft_loss as sl\n'), ((2137, 2163), 'paddle.uniform', 'paddle.uniform', (['[4, 46080]'], {}), '([4, 46080])\n', (2151, 2163), False, 'import paddle\n'), ((2172, 2198), 'paddle.uniform', 'paddle.uniform', (['[4, 46080]'], {}), '([4, 46080])\n', (2186, 2198), False, 'import paddle\n'), ((1676, 1694), 'torch.as_tensor', 'torch.as_tensor', (['x'], {}), '(x)\n', (1691, 1694), False, 'import torch\n'), ((1213, 1236), 'torch.as_tensor', 'torch.as_tensor', (['window'], {}), '(window)\n', (1228, 1236), False, 'import torch\n'), ((1780, 1803), 'torch.as_tensor', 'torch.as_tensor', (['window'], {}), '(window)\n', (1795, 1803), False, 'import torch\n')] |
#!/usr/bin/env python
"""
Copyright 2020, University Corporation for Atmospheric Research
See LICENSE.txt for details
"""
import numpy as np
from pyreshaper import iobackend
from . import config
def generate_data(backend='netCDF4'):
"""
Generate dataset for testing purposes
"""
iobackend.set_backend(backend)
# Test Data Generation
for i in range(len(config.slices) + 1):
# Open the file for writing
fname = config.slices[i] if i < len(config.slices) else 'metafile.nc'
fobj = iobackend.NCFile(fname, mode='w')
# Write attributes to file
for name, value in config.fattrs.items():
fobj.setncattr(name, value)
# Create the dimensions in the file
fobj.create_dimension('lat', config.nlat)
fobj.create_dimension('lon', config.nlon)
fobj.create_dimension('time', None)
fobj.create_dimension('strlen', config.nchar)
# Create the coordinate variables & add attributes
lat = fobj.create_variable('lat', 'f', ('lat',))
lon = fobj.create_variable('lon', 'f', ('lon',))
time = fobj.create_variable('time', 'f', ('time',))
# Set the coordinate variable attributes
lat.setncattr('long_name', 'latitude')
lat.setncattr('units', 'degrees_north')
lon.setncattr('long_name', 'longitude')
lon.setncattr('units', 'degrees_east')
time.setncattr('long_name', 'time')
time.setncattr('units', 'days since 01-01-0001')
time.setncattr('calendar', 'noleap')
# Set the values of the coordinate variables
lat[:] = np.linspace(-90, 90, config.nlat, dtype=np.float32)
lon[:] = np.linspace(-180, 180, config.nlon, endpoint=False, dtype=np.float32)
time[:] = np.arange(i * config.ntime, (i + 1) * config.ntime, dtype=np.float32)
# Create the scalar variables
for n in range(len(config.scalars)):
vname = config.scalars[n]
v = fobj.create_variable(vname, 'd', tuple())
v.setncattr('long_name', 'scalar{0}'.format(n))
v.setncattr('units', '[{0}]'.format(vname))
v.assign_value(np.float64(n * 10))
# Create the time-invariant metadata variables
all_timvars = config.timvars + ([] if i < len(config.slices) else config.xtimvars)
for n in range(len(all_timvars)):
vname = all_timvars[n]
v = fobj.create_variable(vname, 'd', ('lat', 'lon'))
v.setncattr('long_name', 'time-invariant metadata {0}'.format(n))
v.setncattr('units', '[{0}]'.format(vname))
v[:] = np.ones((config.nlat, config.nlon), dtype=np.float64) * n
# Create the time-variant character variables
for n in range(len(config.chvars)):
vname = config.chvars[n]
v = fobj.create_variable(vname, 'c', ('time', 'strlen'))
v.setncattr('long_name', 'character array {0}'.format(n))
vdata = [str((n + 1) * m) * (m + 1) for m in range(config.ntime)]
v[:] = (
np.array(vdata, dtype='S{}'.format(config.nchar))
.view('S1')
.reshape(config.ntime, config.nchar)
)
# Create the time-variant metadata variables
for n in range(len(config.tvmvars)):
vname = config.tvmvars[n]
v = fobj.create_variable(vname, 'd', ('time', 'lat', 'lon'))
v.setncattr('long_name', 'time-variant metadata {0}'.format(n))
v.setncattr('units', '[{0}]'.format(vname))
v[:] = np.ones((config.ntime, config.nlat, config.nlon), dtype=np.float64) * n
# Create the time-series variables
for n in range(len(config.tsvars)):
vname = config.tsvars[n]
v = fobj.create_variable(vname, 'd', ('time', 'lat', 'lon'), fill_value=1e36)
v.setncattr('long_name', 'time-series variable {0}'.format(n))
v.setncattr('units', '[{0}]'.format(vname))
v.setncattr('missing_value', 1e36)
vdata = np.ones((config.ntime, config.nlat, config.nlon), dtype=np.float64) * n
vmask = np.random.choice(
[True, False], config.ntime * config.nlat * config.nlon
).reshape(config.ntime, config.nlat, config.nlon)
v[:] = np.ma.MaskedArray(vdata, mask=vmask)
if __name__ == '__main__':
generate_data(backend='netCDF4')
| [
"pyreshaper.iobackend.NCFile",
"numpy.ma.MaskedArray",
"numpy.ones",
"numpy.arange",
"numpy.linspace",
"numpy.random.choice",
"numpy.float64",
"pyreshaper.iobackend.set_backend"
] | [((300, 330), 'pyreshaper.iobackend.set_backend', 'iobackend.set_backend', (['backend'], {}), '(backend)\n', (321, 330), False, 'from pyreshaper import iobackend\n'), ((533, 566), 'pyreshaper.iobackend.NCFile', 'iobackend.NCFile', (['fname'], {'mode': '"""w"""'}), "(fname, mode='w')\n", (549, 566), False, 'from pyreshaper import iobackend\n'), ((1629, 1680), 'numpy.linspace', 'np.linspace', (['(-90)', '(90)', 'config.nlat'], {'dtype': 'np.float32'}), '(-90, 90, config.nlat, dtype=np.float32)\n', (1640, 1680), True, 'import numpy as np\n'), ((1698, 1767), 'numpy.linspace', 'np.linspace', (['(-180)', '(180)', 'config.nlon'], {'endpoint': '(False)', 'dtype': 'np.float32'}), '(-180, 180, config.nlon, endpoint=False, dtype=np.float32)\n', (1709, 1767), True, 'import numpy as np\n'), ((1786, 1855), 'numpy.arange', 'np.arange', (['(i * config.ntime)', '((i + 1) * config.ntime)'], {'dtype': 'np.float32'}), '(i * config.ntime, (i + 1) * config.ntime, dtype=np.float32)\n', (1795, 1855), True, 'import numpy as np\n'), ((4343, 4379), 'numpy.ma.MaskedArray', 'np.ma.MaskedArray', (['vdata'], {'mask': 'vmask'}), '(vdata, mask=vmask)\n', (4360, 4379), True, 'import numpy as np\n'), ((2179, 2197), 'numpy.float64', 'np.float64', (['(n * 10)'], {}), '(n * 10)\n', (2189, 2197), True, 'import numpy as np\n'), ((2641, 2694), 'numpy.ones', 'np.ones', (['(config.nlat, config.nlon)'], {'dtype': 'np.float64'}), '((config.nlat, config.nlon), dtype=np.float64)\n', (2648, 2694), True, 'import numpy as np\n'), ((3595, 3662), 'numpy.ones', 'np.ones', (['(config.ntime, config.nlat, config.nlon)'], {'dtype': 'np.float64'}), '((config.ntime, config.nlat, config.nlon), dtype=np.float64)\n', (3602, 3662), True, 'import numpy as np\n'), ((4080, 4147), 'numpy.ones', 'np.ones', (['(config.ntime, config.nlat, config.nlon)'], {'dtype': 'np.float64'}), '((config.ntime, config.nlat, config.nlon), dtype=np.float64)\n', (4087, 4147), True, 'import numpy as np\n'), ((4172, 4245), 'numpy.random.choice', 'np.random.choice', (['[True, False]', '(config.ntime * config.nlat * config.nlon)'], {}), '([True, False], config.ntime * config.nlat * config.nlon)\n', (4188, 4245), True, 'import numpy as np\n')] |
import numpy as np
import torchvision as thv
import torch
import cv2
#np.random.seed(20)
#torch.manual_seed(20)
def check_data_balance(X, Y):
n_classes = len(np.unique(Y))
label_ct = np.zeros(n_classes)
for i in range(len(Y)):
label = Y[i]
label_ct[label] += 1
return label_ct
def resample_data(X, Y, n_samples = 30000):
new_X = []
new_Y = []
n_classes = len(np.unique(Y))
label_ct = np.zeros(n_classes)
samples_ct = 0
for i in range(len(Y)):
label = Y[i]
if label_ct[label] <= n_samples/n_classes and samples_ct < n_samples:
new_X.append(X[i])
new_Y.append(Y[i])
label_ct[label] += 1
samples_ct += 1
final_X = np.asarray(new_X)
final_Y = np.asarray(new_Y)
return final_X, final_Y
def get_balanced_mnist784(fullset, n_samples, data_normed, batch_size = 32, shuffle = True):
# resample
X, Y = resample_data(fullset.data.numpy(), fullset.targets.numpy(), n_samples) # ~50k / 60k is upper limit for balanced train dataset, ~8k/10k for val
#print("resampled data class balance: ", check_data_balance(X, Y))
# flatten
X = X.reshape((X.shape[0], -1))/1.0 # currently [0, 255]
if data_normed == 1:
X = X/255.0
elif data_normed == -1:
X = 2*X/255.0 - 1.0
# create tensor dataset, dataloader
set = torch.utils.data.TensorDataset(torch.tensor(X), torch.tensor(Y))
loader = torch.utils.data.DataLoader(set, batch_size= batch_size, shuffle= shuffle, num_workers=3)
return set, loader
def get_balanced_mnist_28x28(fullset, n_samples, batch_size = 32, shuffle = True):
# resample
X, Y = resample_data(fullset.data.numpy(), fullset.targets.numpy(), n_samples) # ~50k / 60k is upper limit for balanced train dataset, ~8k/10k for val
# above is n_smaples x 28 x 28 and n_smaples
# we add dimesnion in between for LeNet5 model
X = X[:, None, :, :]
print("l: ", X.shape, Y.shape)
print("l2: ",torch.tensor(X).shape, torch.tensor(Y).shape)
# create tensor dataset, dataloader
set = torch.utils.data.TensorDataset(torch.tensor(X), torch.tensor(Y))
loader = torch.utils.data.DataLoader(set, batch_size= batch_size, shuffle= shuffle, num_workers=3)
return set, loader | [
"torch.utils.data.DataLoader",
"numpy.asarray",
"numpy.zeros",
"torch.tensor",
"numpy.unique"
] | [((188, 207), 'numpy.zeros', 'np.zeros', (['n_classes'], {}), '(n_classes)\n', (196, 207), True, 'import numpy as np\n'), ((410, 429), 'numpy.zeros', 'np.zeros', (['n_classes'], {}), '(n_classes)\n', (418, 429), True, 'import numpy as np\n'), ((675, 692), 'numpy.asarray', 'np.asarray', (['new_X'], {}), '(new_X)\n', (685, 692), True, 'import numpy as np\n'), ((705, 722), 'numpy.asarray', 'np.asarray', (['new_Y'], {}), '(new_Y)\n', (715, 722), True, 'import numpy as np\n'), ((1387, 1478), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['set'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'num_workers': '(3)'}), '(set, batch_size=batch_size, shuffle=shuffle,\n num_workers=3)\n', (1414, 1478), False, 'import torch\n'), ((2108, 2199), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['set'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'num_workers': '(3)'}), '(set, batch_size=batch_size, shuffle=shuffle,\n num_workers=3)\n', (2135, 2199), False, 'import torch\n'), ((161, 173), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (170, 173), True, 'import numpy as np\n'), ((383, 395), 'numpy.unique', 'np.unique', (['Y'], {}), '(Y)\n', (392, 395), True, 'import numpy as np\n'), ((1340, 1355), 'torch.tensor', 'torch.tensor', (['X'], {}), '(X)\n', (1352, 1355), False, 'import torch\n'), ((1357, 1372), 'torch.tensor', 'torch.tensor', (['Y'], {}), '(Y)\n', (1369, 1372), False, 'import torch\n'), ((2061, 2076), 'torch.tensor', 'torch.tensor', (['X'], {}), '(X)\n', (2073, 2076), False, 'import torch\n'), ((2078, 2093), 'torch.tensor', 'torch.tensor', (['Y'], {}), '(Y)\n', (2090, 2093), False, 'import torch\n'), ((1934, 1949), 'torch.tensor', 'torch.tensor', (['X'], {}), '(X)\n', (1946, 1949), False, 'import torch\n'), ((1957, 1972), 'torch.tensor', 'torch.tensor', (['Y'], {}), '(Y)\n', (1969, 1972), False, 'import torch\n')] |
import numpy as np
# Function to get the idle time Xi on machine B for all jobs
def get_idle_time(data, optimal_seq):
# Store the Ai's & Bi's from the given data
a = [data[0][i-1] for i in optimal_seq]
b = [data[1][i-1] for i in optimal_seq]
# This array will store the idle times on machine B (Xi) for each job i
x = []
for i in range(data.shape[1]):
x.append(max(sum(a[:i+1]) - sum(b[:i]) - sum(x[:i]), 0.0))
return x
# Function to obtain the optimal sequence for a n/2 scheduling problem using Johnson's algorithm
def johnsons_algo(x):
# Create a placeholder to store the optimal sequence of jobs
optimal_seq = np.zeros(x.shape[1], dtype=int)
front = 0
back = x.shape[1] - 1
# Obtain the indices of the elememnts of the matrix after sorting in ascending order.
# These indices will be used in scheduling
sorted_indices = np.argsort(x.flatten())
# Repeat the scheduling process until all the jobs are scheduled
i = 0
while front <= back:
smallest_elem_index = np.unravel_index(sorted_indices[i], x.shape)
# Select the job with the smallest Ai or Bi
candidate_job = smallest_elem_index[1] + 1
# Since the way we select the next smallest Ai or Bi does not guarantee that
# a 'unique' (unscheduled) job is selected, we perform an extra check to include
# this job only if it doesn't already exist in the array containing our optimal sequence
if candidate_job not in optimal_seq:
# If the smallest value belongs to machine A, add the Job to start of scheduling
if smallest_elem_index[0] == 0:
optimal_seq[front] = candidate_job
front += 1
# If the smallest value belongs to machine B, add the Job to end of scheduling
elif smallest_elem_index[0] == 1:
optimal_seq[back] = candidate_job
back -= 1
i += 1
return optimal_seq
def shop_floor_scheduling(schedule_file):
x = np.genfromtxt(schedule_file, delimiter=',')
opt_seq = johnsons_algo(x)
idle_time = get_idle_time(x, opt_seq)
print("Optimal Sequence of Jobs:\t", *opt_seq)
print("Idle Time Xi on Machine B for jobs:")
for i, time in enumerate(idle_time):
print(f"\tX{i+1} = {time}")
print("Total idle time on Machine B:\t", sum(idle_time))
print("Total processing time:\t\t", sum(idle_time) + sum(x[1][:]))
def main():
shop_floor_scheduling("schedule.csv")
if __name__ == "__main__":
main() | [
"numpy.zeros",
"numpy.genfromtxt",
"numpy.unravel_index"
] | [((687, 718), 'numpy.zeros', 'np.zeros', (['x.shape[1]'], {'dtype': 'int'}), '(x.shape[1], dtype=int)\n', (695, 718), True, 'import numpy as np\n'), ((2111, 2154), 'numpy.genfromtxt', 'np.genfromtxt', (['schedule_file'], {'delimiter': '""","""'}), "(schedule_file, delimiter=',')\n", (2124, 2154), True, 'import numpy as np\n'), ((1091, 1135), 'numpy.unravel_index', 'np.unravel_index', (['sorted_indices[i]', 'x.shape'], {}), '(sorted_indices[i], x.shape)\n', (1107, 1135), True, 'import numpy as np\n')] |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Apr. 13, 2021
@author: cogljj, wangc
base class for tensorflow and keras regression used for deep neural network
i.e. Multi-layer perceptron regression, CNN, LSTM
"""
#External Modules------------------------------------------------------------------------------------
import copy
import numpy as np
import random as rn
import matplotlib
import platform
from scipy import stats
import os
import utils.importerUtils
tf = utils.importerUtils.importModuleLazyRenamed("tf", globals(), "tensorflow")
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from .KerasBase import KerasBase
#Internal Modules End--------------------------------------------------------------------------------
class KerasRegression(KerasBase):
"""
Multi-layer perceptron classifier constructed using Keras API in TensorFlow
"""
info = {'problemtype':'regression', 'normalize':True}
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super().getInputSpecification()
specs.description = r"""The \xmlNode{KerasRegression}
"""
return specs
def __init__(self):
"""
A constructor that will appropriately intialize a keras deep neural network object
@ In, None
@ Out, None
"""
super().__init__()
self.printTag = 'KerasRegression'
def _getFirstHiddenLayer(self, layerInstant, layerSize, layerDict):
"""
Creates the first hidden layer
@ In, layerInstant, class, layer type from tensorflow.python.keras.layers
@ In, layerSize, int, nodes in layer
@ In, layerDict, dict, layer details
@ Out, layer, tensorflow.python.keras.layers, new layer
"""
return layerInstant(layerSize,input_shape=[None,self.featv.shape[-1]], **layerDict)
def _getLastLayer(self, layerInstant, layerDict):
"""
Creates the last layer
@ In, layerInstant, class, layer type from tensorflow.python.keras.layers
@ In, layerSize, int, nodes in layer
@ In, layerDict, dict, layer details
@ Out, layer, tensorflow.python.keras.layers, new layer
"""
return tf.keras.layers.TimeDistributed(layerInstant(len(self.targv),**layerDict))
def _getTrainingTargetValues(self, names, values):
"""
Gets the target values to train with, which differs depending
on if this is a regression or classifier.
@ In, names, list of names
@ In, values, list of values
@ Out, targetValues, numpy.ndarray of shape (numSamples, numTimesteps, numFeatures)
"""
# Features must be 3d i.e. [numSamples, numTimeSteps, numFeatures]
for target in self.target:
if target not in names:
self.raiseAnError(IOError,'The target '+target+' is not in the training set')
firstTarget = values[names.index(self.target[0])]
targetValues = np.zeros((len(firstTarget), len(firstTarget[0]),
len(self.target)))
for i, target in enumerate(self.target):
self._localNormalizeData(values, names, target)
targetValues[:, :, i] = self._scaleToNormal(values[names.index(target)], target)
return targetValues
def __confidenceLocal__(self,featureVals):
"""
This should return an estimation of the quality of the prediction.
@ In, featureVals,numpy.array, 2-D or 3-D numpy array, [n_samples,n_features]
or shape=[numSamples, numTimeSteps, numFeatures]
@ Out, confidence, float, the confidence
"""
self.raiseAnError(NotImplementedError,'KerasRegression : __confidenceLocal__ method must be implemented!')
def evaluate(self,edict):
"""
Method to perform the evaluation of a point or a set of points through the previous trained supervisedLearning algorithm
NB.the supervisedLearning object is committed to convert the dictionary that is passed (in), into the local format
the interface with the kernels requires.
@ In, edict, dict, evaluation dictionary
@ Out, evaluate, dict, {target: evaluated points}
"""
if type(edict) != dict:
self.raiseAnError(IOError,'method "evaluate". The evaluate request/s need/s to be provided through a dictionary. Type of the in-object is ' + str(type(edict)))
names, values = list(edict.keys()), list(edict.values())
for index in range(len(values)):
resp = self.checkArrayConsistency(values[index], self.isDynamic())
if not resp[0]:
self.raiseAnError(IOError,'In evaluate request for feature '+names[index]+':'+resp[1])
# construct the evaluation matrix
featureValues = []
featureValuesShape = None
for feat in self.features:
if feat in names:
fval = values[names.index(feat)]
resp = self.checkArrayConsistency(fval, self.isDynamic())
if not resp[0]:
self.raiseAnError(IOError,'In training set for feature '+feat+':'+resp[1])
fval = np.asarray(fval)
if featureValuesShape is None:
featureValuesShape = fval.shape
if featureValuesShape != fval.shape:
self.raiseAnError(IOError,'In training set, the number of values provided for feature '+feat+' are not consistent to other features!')
self._localNormalizeData(values,names,feat)
fval = self._scaleToNormal(fval, feat)
featureValues.append(fval)
else:
self.raiseAnError(IOError,'The feature ',feat,' is not in the training set')
featureValues = np.stack(featureValues, axis=-1)
result = self.__evaluateLocal__(featureValues)
pivotParameter = self.pivotID
if type(edict[pivotParameter]) == type([]):
#XXX this should not be needed since sampler should just provide the numpy array.
#Currently the CustomSampler provides all the pivot parameter values instead of the current one.
self.raiseAWarning("Adjusting pivotParameter because incorrect type provided")
result[pivotParameter] = edict[pivotParameter][0]
else:
result[pivotParameter] = edict[pivotParameter]
return result
def __evaluateLocal__(self,featureVals):
"""
Perform regression on samples in featureVals.
classification labels will be returned based on num_classes
@ In, featureVals, numpy.array, 2-D for static case and 3D for time-dependent case, values of features
@ Out, prediction, dict, predicted values
"""
featureVals = self._preprocessInputs(featureVals)
prediction = {}
outcome = self._ROM.predict(featureVals)
for i, target in enumerate(self.target):
prediction[target] = self._invertScaleToNormal(outcome[0, :, i], target)
return prediction
| [
"numpy.stack",
"numpy.asarray"
] | [((6420, 6452), 'numpy.stack', 'np.stack', (['featureValues'], {'axis': '(-1)'}), '(featureValues, axis=-1)\n', (6428, 6452), True, 'import numpy as np\n'), ((5881, 5897), 'numpy.asarray', 'np.asarray', (['fval'], {}), '(fval)\n', (5891, 5897), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
""" Functions to analyze the relevance of features selected with smaller
networks when classifying larger networks.
"""
from joblib import Parallel, delayed
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
import seaborn as sns
from sklearn.model_selection import StratifiedKFold, cross_val_score, train_test_split
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from cost_based_selection import cost_based_methods
def common_features_plot(dict_small, dict_large, dict_fmt = dict()):
""" Plot the graph of evolution of common features selected with different reference tables.
This function returns and plots the number of common features selected when
using larger and smaller networks, depending on the number of features
selected. The best case scenario is represented by the black curve, i.e.
when for each number of selected features, the same features are selected
when using small or large networks. This function also returns the areas
under the curves of the evolution of the number of common selected
features, relatively to the best curve. These areas are computed
for all intervals [1,p] where p takes as value all possible feature subset
sizes, thus p = 1, ..., number of features.
Args:
dict_small (dict):
a dictionary containing as keys the name of the methods, and as
values the corresponding rankings (list) obtained when using smaller
networks.
dict_large (dict):
a dictionary containing as keys the name of the methods, and as
values the corresponding rankings (list) obtained when using larger
networks.
dict_fmt (dict):
a dictionary containing as keys the name of the methods, and as
values a string in the format strings as employed by
matplotlib.pyplot.plot: namely '[marker][line][color]'.
Returns:
dict_common (dict):
a dictionary containing as keys the name of the methods, and as
values the number of common features selected in dict_small and
dict_large. All possible subset sizes p are considered.
dict_areas (dict):
a dictionary containing as keys the name of the methods, and as
values the relative areas between the evolution of
the common selected features curve and the best case scenario.
All possible intervals [1,p] are considered.
"""
# Check that the two dictionaries have the same keys
diff = set(dict_large) - set(dict_small)
if len(diff) > 0:
raise Exception("The keys in both dictionaries must be the same.")
# Recover the number of features
tmpRank = dict_small[random.choice(list(dict_small.keys()))]
nCov = len(tmpRank)
# Initialize a dictionary to store the number of common features per method
dict_common = {key: [None]*nCov for key in dict_small.keys()}
# Initialize a dictionary to store the area under the resulting curves
dict_areas = {key: [None]*nCov for key in dict_small.keys()}
# For each possible subset size, compute the number of common features selected
for i in range(nCov):
for key in dict_small.keys():
dict_common[key][i] = len(_private_common(dict_small[key][:i+1], dict_large[key][:i+1]))
dict_areas[key][i] = np.sum(dict_common[key][:i+1]) / np.sum(list(range(1,i+2,1)))
subset_grid = list(range(1,nCov+1))
# Plot the curves of evolution
fig, ax = plt.subplots(figsize = (13, 8))
ax.step(subset_grid, list(range(1,len(subset_grid)+1,1)), 'k-', label="Best", where='post')
for key in dict_small.keys():
if ( len(dict_fmt) == 0 ) | ( len(dict_fmt) != len(dict_small) ):
ax.step(subset_grid, dict_common[key], label = key, where='post')
else:
ax.step(subset_grid, dict_common[key], dict_fmt[key], label = key, where = 'post')
ax.legend(prop={'size': 12})
ax.set_xlabel("Size of feature subset", fontsize=12)
ax.set_ylabel("Number of common features", fontsize=12)
plt.subplots_adjust(top=0.95, right=0.95)
#fig.show()
return dict_common, dict_areas
def _private_common(lst1, lst2):
""" Function to determine the common elements in two lists. """
return list(set(lst1) & set(lst2))
def difference_accuracy_small_large(dict_small, dict_large, X_val, y_val, subset_size_vec,
classifier_func, args_classifier = dict(), num_fold = 3):
""" Compute the decrease of classification accuracy when using small networks instead of large ones.
This function compute the difference of accuracy to classify networks with
a large number of nodes, when using networks with that many nodes to select
the features, or a smaller number of nodes, in other terms the difference
Acc(large) - Acc(small) is computed. This is done for a sklearn classifier,
and cross-validation is used. The individual accuracies are also returned.
Args:
dict_small (dict):
a dictionary containing as keys the name of the methods, and as
values the corresponding rankings (list) obtained when using smaller
networks.
dict_large (dict):
a dictionary containing as keys the name of the methods, and as
values the corresponding rankings (list) obtained when using larger
networks.
X_val (numpy.ndarray):
the numerical features to use as validation data, where
each row represents an individual, and each column a feature.
These contains all the features, which are not selected yet.
y_val (numpy.ndarray):
a list of integers representing the validation data labels.
subset_size_vec (list):
a list of difference feature subset sizes on which the difference of
accuracy will be computed.
classifier_func (sklearn classifier):
a sklearn classifier, compatible with the function
sklearn.model_selection.cross_val_score. For examples:
sklearn.neighbors.KNeighborsClassifier or sklearn.svm.SVC.
args_classifier (dict):
a dictionary containing as keys the arguments of the classifier_func
function, and as values the argument values.
num_fold (int):
the number of folds to use for the cross-validation.
Returns:
dict_accuracy_decrease (dict):
a dictionary containing as keys, the name of the method used in
dict_small and dict_large, and as values, for each size of feature
subsets, the decrease of classification accuracy observed when
using smaller networks instead of larger networks for the feature
selection step: Acc(large) - Acc(small).
dict_accuracy_large_using_large (dict):
a dictionary containing as keys, the name of the method used in
dict_small and dict_large, and as values, for each size of feature
subsets, the average classification accuracy (across the folds)
when classifying large networks and selecting features with
large networks.
dict_std_large_using_large (dict):
a dictionary containing as keys, the name of the method used in
dict_small and dict_large, and as values, for each size of feature
subsets, the standard deviation of classification accuracy (across the
folds) when classifying large networks and selecting features with
large networks.
dict_accuracy_large_using_small (dict):
a dictionary containing as keys, the name of the method used in
dict_small and dict_large, and as values, for each size of feature
subsets, the average classification accuracy (across the folds)
when classifying large networks and selecting features with
small networks.
dict_std_large_using_small (dict):
a dictionary containing as keys, the name of the method used in
dict_small and dict_large, and as values, for each size of feature
subsets, the standard deviation of classification accuracy (across the
folds) when classifying large networks and selecting features with
small networks.
"""
cross_val = StratifiedKFold(n_splits = num_fold)
num_subsets = len(subset_size_vec)
# Check that the two dictionary have the same keys
diff = set(dict_large) - set(dict_small)
if len(diff) > 0:
raise Exception("The keys in both dictionaries must be the same.")
# Initialize the outputs
dict_accuracy_decrease = {key: [None]*num_subsets for key in dict_small.keys()}
dict_accuracy_large_using_large = {key: [None]*num_subsets for key in dict_small.keys()}
dict_std_large_using_large = {key: [None]*num_subsets for key in dict_small.keys()}
dict_accuracy_large_using_small = {key: [None]*num_subsets for key in dict_small.keys()}
dict_std_large_using_small = {key: [None]*num_subsets for key in dict_small.keys()}
# For each subset size
idx = 0
for subset_size in subset_size_vec:
# For each method
for key in dict_small.keys():
# Deduce the feature to keep using small or large networks
top_small = dict_small[key][:subset_size]
top_large = dict_large[key][:subset_size]
# Reduce X_val accordingly
X_val_using_small = X_val[:,top_small]
X_val_using_large = X_val[:,top_large]
# Train and run the classifier to determine the CV accuracy
classifier = classifier_func(**args_classifier)
scores_using_small = cross_val_score(classifier,
X_val_using_small,
y_val, cv = cross_val)
scores_using_large = cross_val_score(classifier,
X_val_using_large,
y_val, cv = cross_val)
# Fill the outputs
dict_accuracy_large_using_small[key][idx] = scores_using_small.mean()
dict_std_large_using_small[key][idx] = scores_using_small.std()
dict_accuracy_large_using_large[key][idx] = scores_using_large.mean()
dict_std_large_using_large[key][idx] = scores_using_large.std()
dict_accuracy_decrease[key][idx] = dict_accuracy_large_using_large[key][idx] - dict_accuracy_large_using_small[key][idx]
idx += 1
return dict_accuracy_decrease, dict_accuracy_large_using_large, dict_std_large_using_large, dict_accuracy_large_using_small, dict_std_large_using_small
def common_features_difference_accuracy(dfSummaries_small, dfSummaries_large,
dfModIndex_small, dfModIndex_large,
is_disc, subset_size_vec, val_size = 0.5,
random_seed = 123, num_fold = 3,
args_mRMR = dict(),
args_JMI = dict(), args_JMIM = dict(),
args_reliefF_classic = dict(),
args_reliefF_rf = dict(),
args_rf_impurity = dict(),
args_rf_permutation = dict(),
args_svm = dict(),
args_knn = {'n_neighbors':10, 'n_jobs':1}):
""" Display the analyses on common features and decrease of accuracy induced by the use of smaller networks.
This is a wrapper to run the functions common_feature_plot
and difference_accuracy_small_large jointly and easily.
This function might be useful to run multiple replicate analyses.
For every possible subset size of features, it returns the
common number of features selected by the different selection methods, when
using small and large networks, as well as areas under the curves of the
evolution of these numbers depending on the subset sizes.
It also computes the decrease of classification accuracy to classify
large networks, when selecting features using the table formed from
small networks, rather than large networks: Acc(large) - Acc(small).
The different reference tables are divided in train and
validation sets with a proportion of validation data defined by
val_size. The decrease of accuracy is computed on subset sizes
described in subset_size_vec, and the classification accuracy is
determined by cross-validation with num_fold-folds on the proportion
val_size of data. We are using all the filter selection methods
described in our paper and implemented in the module cost_based_methods.py,
so additional arguments for the selection methods must be specified in
dictionaries if the default values are not satisfying. Note also that each
penalization parameter value is set to 0.
Finally, two classifiers are used, an SVM and a k-nearest-neighbors classifier,
for which arguments can be specified by the arguments args_svc and args_knn.
Args:
dfSummaries_small (pandas.core.frame.DataFrame):
the pandas DataFrame containing, for each simulated data (in rows),
the summary statistic values (in columns) computed using small
networks.
dfSummaries_large (pandas.core.frame.DataFrame):
the pandas DataFrame containing, for each simulated data (in rows),
the summary statistic values (in columns) computed using large
networks.
dfModIndex_small (pandas.core.frame.DataFrame):
the panda DataFrame containing the model indexes associated to
the table obtained from small networks.
dfModIndex_large (pandas.core.frame.DataFrame):
the panda DataFrame containing the model indexes associated to
the table obtained from large networks.
is_disc (list):
a list of Booleans, common to dfSummaries_small and
dfSummaries_large, indicating with True if the feature is discrete
and False if continuous.
subset_size_vec (list):
a list containing sizes of feature subsets for which the decrease
of accuracy will be computed.
random_seed (int):
the random seed to use when partitioning the data in train,
validation sets.
num_fold (int):
the number of folds to use for the cross-validation.
args_mRMR (dict):
a dictionary containing as keys the optional arguments for the
cost_based_methods.mRMR function, and as values the argument values.
If unspecified, the default values of mRMR are used.
args_JMI (dict):
a dictionary containing as keys the optional arguments for the
cost_based_methods.JMI function, and as values the argument values.
If unspecified, the default values of JMI are used.
args_JMIM (dict):
a dictionary containing as keys the optional arguments for the
cost_based_methods.JMIM function, and as values the argument values.
If unspecified, the default values of JMIM are used.
args_reliefF_classic (dict):
a dictionary containing as keys the optional arguments for the
cost_based_methods.reliefF function when using
proximity = "distance", and as values the argument values.
If unspecified, the default values of reliefF are used.
args_reliefF_rf (dict):
a dictionary containing as keys the optional arguments for the
cost_based_methods.reliefF function when using
proximity = "rf prox", and as values the argument values.
If unspecified, the default values of reliefF are used.
args_rf_impurity (dict):
a dictionary containing as keys the optional arguments for the
cost_based_methods.pen_rf_importance function when
importance = "impurity", and as values the argument values.
If unspecified, the default values of pen_rf_importance are used.
args_rf_permutation (dict):
a dictionary containing as keys the optional arguments for the
cost_based_methods.pen_rf_importance function when
importance = "permutation", and as values the argument values.
If unspecified, the default values of pen_rf_importance are used.
args_svc (dict):
a dictionary containing as keys the arguments of the SVM classifier
as described in the sklearn.svm.SVC function, and as values the
argument values.
args_knn (dict):
a dictionary containing as keys the arguments of the k-nearest-
neighbor algorithm as described in the sklearn.neighbors.KNeighborsClassifier
function, and as values the argument values.
Returns:
dict_common (dict):
a dictionary containing as keys the name of the methods, and as
values the number of common features selected when using small and
large networks. All possible subset sizes p are considered.
dict_areas (dict):
a dictionary containing as keys the name of the methods, and as
values the relative areas between the evolution of
the common selected features curve and the best case scenario curve.
All possible intervals [1,p] are considered.
decrease_acc_SVM (dict):
for the SVM classifier, a dictionary containing as keys, the name of
the different feature selection methods, and as values, for each
size of feature subsets in subset_size_vec, the decrease of
classification accuracy observed when using smaller networks instead
of larger networks for the selection step: Acc(large) - Acc(small).
decrease_acc_knn (dict):
for the k-nearest-neighbors classifier, a dictionary containing as keys, the name of
the different feature selection methods, and as values, for each
size of feature subsets in subset_size_vec, the decrease of
classification accuracy observed when using smaller networks instead
of larger networks for the selection step: Acc(large) - Acc(small).
"""
### Select features with small networks
X_small = np.array(dfSummaries_small)
y_small = dfModIndex_small.modIndex.tolist()
# For one replicate
# Split the reference table in training and validation set
(X_train_small, X_val_small, y_train_small, y_val_small) = train_test_split(X_small, y_small, test_size=val_size, random_state=random_seed, stratify=y_small)
# For each method, recover the full ranking of the features, without penalization
ranking_mRMR_small, *other = cost_based_methods.mRMR(X = X_train_small, y = y_train_small,
is_disc = is_disc, **args_mRMR)
ranking_JMI_small, *other = cost_based_methods.JMI(X = X_train_small, y = y_train_small,
is_disc = is_disc, **args_JMI)
ranking_JMIM_small, *other = cost_based_methods.JMIM(X = X_train_small, y=y_train_small,
is_disc=is_disc, **args_JMIM)
ranking_reliefF_c_small, *other = cost_based_methods.reliefF(X = X_train_small, y=y_train_small,
proximity = "distance",
**args_reliefF_classic)
ranking_reliefF_rf_small, *other = cost_based_methods.reliefF(X = X_train_small, y=y_train_small,
proximity = "rf prox",
**args_reliefF_classic)
ranking_impurity_small, *other = cost_based_methods.pen_rf_importance(X = X_train_small, y = y_train_small,
imp_type = "impurity",
**args_rf_impurity)
ranking_permut_small, *other = cost_based_methods.pen_rf_importance(X = X_train_small, y = y_train_small,
imp_type = "permutation",
**args_rf_permutation)
### Select features with large networks
X_large = np.array(dfSummaries_large)
y_large = dfModIndex_large.modIndex.tolist()
# Split the reference table in training and validation set
(X_train_large, X_val_large, y_train_large, y_val_large) = train_test_split(X_large, y_large, test_size=val_size, random_state=random_seed, stratify=y_large)
# For each method, recover the full ranking of the features, without penalization
ranking_mRMR_large, *other = cost_based_methods.mRMR(X = X_train_large, y = y_train_large,
is_disc = is_disc, **args_mRMR)
ranking_JMI_large, *other = cost_based_methods.JMI(X = X_train_large, y = y_train_large,
is_disc = is_disc, **args_JMI)
ranking_JMIM_large, *other = cost_based_methods.JMIM(X = X_train_large, y=y_train_large,
is_disc=is_disc, **args_JMIM)
ranking_reliefF_c_large, *other = cost_based_methods.reliefF(X = X_train_large, y=y_train_large,
proximity = "distance",
**args_reliefF_classic)
ranking_reliefF_rf_large, *other = cost_based_methods.reliefF(X = X_train_large, y=y_train_large,
proximity = "rf prox",
**args_reliefF_classic)
ranking_impurity_large, *other = cost_based_methods.pen_rf_importance(X = X_train_large, y = y_train_large,
imp_type = "impurity",
**args_rf_impurity)
ranking_permut_large, *other = cost_based_methods.pen_rf_importance(X = X_train_large, y = y_train_large,
imp_type = "permutation",
**args_rf_permutation)
### To compare the rankings with small and large networks,
### we build one dictionary per reference table, with keys the name of the methods
### and value the corresponding rankings
dict_small_rankings = {"mRMR": ranking_mRMR_small, "JMI": ranking_JMI_small, "JMIM": ranking_JMIM_small,
"reliefF classic": ranking_reliefF_c_small, "reliefF RF prox.": ranking_reliefF_rf_small,
"RF MDI": ranking_impurity_small, "RF MDA": ranking_permut_small}
dict_large_rankings = {"mRMR": ranking_mRMR_large, "JMI": ranking_JMI_large, "JMIM": ranking_JMIM_large,
"reliefF classic": ranking_reliefF_c_large, "reliefF RF prox.": ranking_reliefF_rf_large,
"RF MDI": ranking_impurity_large, "RF MDA": ranking_permut_large}
dict_fmt = {"mRMR": 'b--', "JMI": 'b-.', "JMIM": 'b:', "reliefF classic": 'r--',
"reliefF RF prox.": 'r:', "RF MDI": 'g--', "RF MDA": 'g:'}
dict_common, dict_areas = common_features_plot(dict_small_rankings,
dict_large_rankings,
dict_fmt)
### Now, we want to check the classification accuracy to predict the network classes
### of large networks, when using the features selected with small or large networks
decrease_acc_SVM, *other = difference_accuracy_small_large(dict_small = dict_small_rankings,
dict_large = dict_large_rankings,
X_val = X_val_large,
y_val = y_val_large,
subset_size_vec = subset_size_vec,
classifier_func = SVC,
args_classifier = args_svm,
num_fold = num_fold)
decrease_acc_knn, *other = difference_accuracy_small_large(dict_small = dict_small_rankings,
dict_large = dict_large_rankings,
X_val = X_val_large,
y_val = y_val_large,
subset_size_vec = subset_size_vec,
classifier_func = KNeighborsClassifier,
args_classifier = args_knn,
num_fold = num_fold)
return dict_common, dict_areas, decrease_acc_SVM, decrease_acc_knn
def replication_common_features_difference_accuracy(dfSummaries_small, dfSummaries_large,
dfModIndex_small, dfModIndex_large,
is_disc, subset_size_vec,
val_size = 0.5, num_fold = 3,
args_mRMR = dict(),
args_JMI = dict(),
args_JMIM = dict(),
args_reliefF_classic = dict(),
args_reliefF_rf = dict(),
args_rf_impurity = dict(),
args_rf_permutation = dict(),
args_svm = dict(),
args_knn = {'n_neighbors':10, 'n_jobs':1},
num_rep = 50, num_cores = 1):
""" Launch with replication the function common_features_difference_accuracy
This function launch num_rep times the function
common_features_difference_accuracy, possibly in parallel.
The results must then be analyzed with the function analyze_replication_res.
Args:
The arguments are almost identical to the ones in the function
common_features_difference_accuracy, (see its documentation), excepted
for random_seed that is used to provide different partitioning of the
reference table, depending on the index of replication.
Below are the two new arguments.
num_rep (int):
the number of replications to perform. Each run will be performed
on a different partitioning of the reference table into a training
and validation set. 50 by default.
num_cores (int):
the number of CPU cores to perform parallel computing.
Returns:
replication_res (list):
the resulting list containing the results over multiple runs.
This output must be given to the function analyze_replication_res.
"""
replication_res = Parallel(n_jobs = num_cores)(delayed(common_features_difference_accuracy)(
dfSummaries_small = dfSummaries_small,
dfSummaries_large = dfSummaries_large,
dfModIndex_small = dfModIndex_small,
dfModIndex_large = dfModIndex_large,
is_disc = is_disc,
subset_size_vec = subset_size_vec,
val_size = val_size,
random_seed = seed,
num_fold = num_fold,
args_mRMR = args_mRMR,
args_JMI = args_JMI,
args_JMIM = args_JMIM,
args_reliefF_classic = args_reliefF_classic,
args_reliefF_rf = args_reliefF_rf,
args_rf_impurity = args_rf_impurity,
args_rf_permutation = args_rf_permutation,
args_svm = args_svm,
args_knn = args_knn) for seed in list(range(1,num_rep+1,1)) )
return replication_res
def analyze_replication_res(replication_res, subset_size_vec,
showfliers = True, save = True, plot_reliefF = True):
""" Analyze replicated results returned by common_features_difference_accuracy
This function analyze the results returned when launching the function
common_features_difference_accuracy multiple times. Namely, it returns over
the replicated analysis, the mean and standard deviation of the common
number of features selected with the two different network sizes, the mean
and standard deviation of the associated relative areas, and plots, for the
k-nearest-neighbors (top graph) and SVM (bottom graph) classifiers, the decrease
of accuracy boxplots.
The common_features_difference_accuracy function must be launched on n_jobs
CPU cores, num_rep times in the following way:
replicate_res = Parallel(n_jobs = n_jobs)(delayed(common_features_difference_accuracy)(dfSummaries_small, dfSummaries_large, dfModIndex_small, dfModIndex_large, is_disc, subset_size_vec, val_size, random_seed = seed) for seed in list(range(1,num_rep+1,1)) )
The replicate analysis are thus performed over the same reference table,
but divided in different training - validation sets depending on each random_seed
value.
Args:
replication_res (list):
the resulting list when launching multiple times the function
common_features_difference_accuracy as stated above.
subset_size_vec (list):
a list containing sizes of feature subsets for which the decrease
of accuracy were computed.
showfliers (bool):
a Boolean specifying whether or not the outliers must be included
in the boxplots. True by default.
save (bool):
a Boolean specifying whether or not the resulting graphs need to be
saved in the current file location. True by default.
plot_reliefF (bool):
a Boolean to indicate if the reliefF methods must be included in the
boxplots or not. True by default.
Returns:
dict_avg_common (pandas.core.frame.DataFrame):
a pandas DataFrame containing for each method (in rows) the average
number of common features selected when using small and large
networks, for each subset size (in columns).
dict_std_common (pandas.core.frame.DataFrame):
a pandas DataFrame containing for each method (in rows) the standard
deviation of the number of common features selected when using small
and large networks, for each subset size (in columns).
dict_avg_areas (pandas.core.frame.DataFrame):
a pandas DataFrame containing for each method (in rows) the average
relative areas between the evolution of the common selected features
curve and the best case scenario, for each interval [1,p] with p
in columns.
dict_std_areas (pandas.core.frame.DataFrame):
a pandas DataFrame containing for each method (in rows) the standard
deviation of the relative areas between the evolution of the common
selected features curve and the best case scenario, for each
interval [1,p] with p in columns.
"""
num_rep = len(replication_res)
num_features = len(replication_res[0][0]["mRMR"])
keys_used = replication_res[0][0].keys()
### Compute the average number of common features selected over replicates
### and the average area under the curves of common features evolution,
### and corresponding standard deviations
dict_common_tmp_rep = {}
dict_avg_common = {}
dict_std_common = {}
dict_areas_tmp_rep = {}
dict_avg_areas = {}
dict_std_areas = {}
# Initialization
for key in keys_used:
dict_common_tmp_rep[key] = [[]] * num_features
dict_areas_tmp_rep[key] = [[]] * num_features
dict_avg_common[key] = np.zeros(num_features)
dict_std_common[key] = np.zeros(num_features)
dict_avg_areas[key] = np.zeros(num_features)
dict_std_areas[key] = np.zeros(num_features)
# Store the individual values for each replicate
for key in keys_used:
for feat in range(num_features):
for rep in range(num_rep):
dict_common_tmp_rep[key][feat] = dict_common_tmp_rep[key][feat] + [replication_res[rep][0][key][feat]]
dict_areas_tmp_rep[key][feat] = dict_areas_tmp_rep[key][feat] + [replication_res[rep][1][key][feat]]
# Compute the average and standard deviation over replicated runs
for key in keys_used:
for feat in range(num_features):
dict_avg_common[key][feat] = np.mean(dict_common_tmp_rep[key][feat])
dict_std_common[key][feat] = np.std(dict_common_tmp_rep[key][feat])
dict_avg_areas[key][feat] = np.mean(dict_areas_tmp_rep[key][feat])
dict_std_areas[key][feat] = np.std(dict_areas_tmp_rep[key][feat])
### Recover the decrease of accuracy for the K-NN and SVM classifiers
### stored in a dictionary to plot the boxplots
dict_precision_subset_method_SVM = {'Precision': [], 'Size of feature subset': [], 'Methods': []}
dict_precision_subset_method_KNN = {'Precision': [], 'Size of feature subset': [], 'Methods': []}
for key in keys_used:
k = 0
for subset in subset_size_vec:
for rep in range(num_rep):
dict_precision_subset_method_SVM['Precision'] = dict_precision_subset_method_SVM['Precision'] + [replication_res[rep][2][key][k]]
dict_precision_subset_method_SVM['Size of feature subset'] = dict_precision_subset_method_SVM['Size of feature subset'] + [subset]
dict_precision_subset_method_SVM['Methods'] = dict_precision_subset_method_SVM['Methods'] + [key]
dict_precision_subset_method_KNN['Precision'] = dict_precision_subset_method_KNN['Precision'] + [replication_res[rep][3][key][k]]
dict_precision_subset_method_KNN['Size of feature subset'] = dict_precision_subset_method_KNN['Size of feature subset'] + [subset]
dict_precision_subset_method_KNN['Methods'] = dict_precision_subset_method_KNN['Methods'] + [key]
k = k + 1
df_res_SVM = pd.DataFrame(dict_precision_subset_method_SVM)
df_res_KNN = pd.DataFrame(dict_precision_subset_method_KNN)
if not plot_reliefF:
df_res_KNN_sub = df_res_KNN[(df_res_KNN.Methods != "reliefF classic") & (df_res_KNN.Methods != "reliefF RF prox.")]
df_res_SVM_sub = df_res_SVM[(df_res_SVM.Methods != "reliefF classic") & (df_res_SVM.Methods != "reliefF RF prox.")]
# For K-NN
fig, ax = plt.subplots(figsize=(12,8))
if plot_reliefF:
bb = sns.boxplot(ax=ax, x="Size of feature subset", y="Precision", hue="Methods",
data=df_res_KNN, palette="Set1", showfliers = showfliers)
else:
bb = sns.boxplot(ax=ax, x="Size of feature subset", y="Precision", hue="Methods",
data=df_res_KNN_sub, palette="Set1", showfliers = showfliers)
#ax.set(ylim=(-0.06, 0.06))
bb.set_xlabel("Size of feature subset", fontsize=12)
bb.set_ylabel("Precision", fontsize=12)
plt.subplots_adjust(top=0.95, right=0.95)
if save and plot_reliefF:
plt.savefig('Boxplot_diff_withReliefF_KNN.pdf', pad_inches=0)
elif save and not plot_reliefF:
plt.savefig('Boxplot_diff_withoutReliefF_KNN.pdf', pad_inches=0)
# For SVM
fig, ax = plt.subplots(figsize=(12,8))
if plot_reliefF:
bb = sns.boxplot(ax=ax, x="Size of feature subset", y="Precision", hue="Methods",
data=df_res_SVM, palette="Set1", showfliers = showfliers)
else:
bb = sns.boxplot(ax=ax, x="Size of feature subset", y="Precision", hue="Methods",
data=df_res_SVM_sub, palette="Set1", showfliers = showfliers)
#ax.set(ylim=(-0.06, 0.06))
bb.set_xlabel("Size of feature subset", fontsize=12)
bb.set_ylabel("Precision", fontsize=12)
plt.subplots_adjust(top=0.95, right=0.95)
if save and plot_reliefF:
plt.savefig('Boxplot_diff_withReliefF_SVM.pdf', pad_inches=0)
elif save and not plot_reliefF:
plt.savefig('Boxplot_diff_withoutReliefF_SVM.pdf', pad_inches=0)
# TO DO: Add graphical arguments
df_avg_common = pd.DataFrame.from_dict(dict_avg_common, orient = 'index',
columns = list(range(1,num_features+1,1)))
df_std_common = pd.DataFrame.from_dict(dict_std_common, orient = 'index',
columns = list(range(1,num_features+1,1)))
df_avg_areas = pd.DataFrame.from_dict(dict_avg_areas, orient = 'index',
columns = list(range(1,num_features+1,1)))
df_std_areas = pd.DataFrame.from_dict(dict_std_areas, orient = 'index',
columns = list(range(1,num_features+1,1)))
return df_avg_common, df_std_common, df_avg_areas, df_std_areas
| [
"numpy.sum",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.cross_val_score",
"numpy.mean",
"cost_based_selection.cost_based_methods.JMI",
"pandas.DataFrame",
"numpy.std",
"cost_based_selection.cost_based_methods.reliefF",
"cost_based_selection.cost_based_methods.mRMR",
"cost... | [((3653, 3682), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(13, 8)'}), '(figsize=(13, 8))\n', (3665, 3682), True, 'import matplotlib.pyplot as plt\n'), ((4230, 4271), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.95)', 'right': '(0.95)'}), '(top=0.95, right=0.95)\n', (4249, 4271), True, 'import matplotlib.pyplot as plt\n'), ((8614, 8648), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'num_fold'}), '(n_splits=num_fold)\n', (8629, 8648), False, 'from sklearn.model_selection import StratifiedKFold, cross_val_score, train_test_split\n'), ((18995, 19022), 'numpy.array', 'np.array', (['dfSummaries_small'], {}), '(dfSummaries_small)\n', (19003, 19022), True, 'import numpy as np\n'), ((19232, 19335), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_small', 'y_small'], {'test_size': 'val_size', 'random_state': 'random_seed', 'stratify': 'y_small'}), '(X_small, y_small, test_size=val_size, random_state=\n random_seed, stratify=y_small)\n', (19248, 19335), False, 'from sklearn.model_selection import StratifiedKFold, cross_val_score, train_test_split\n'), ((19455, 19546), 'cost_based_selection.cost_based_methods.mRMR', 'cost_based_methods.mRMR', ([], {'X': 'X_train_small', 'y': 'y_train_small', 'is_disc': 'is_disc'}), '(X=X_train_small, y=y_train_small, is_disc=is_disc,\n **args_mRMR)\n', (19478, 19546), False, 'from cost_based_selection import cost_based_methods\n'), ((19645, 19734), 'cost_based_selection.cost_based_methods.JMI', 'cost_based_methods.JMI', ([], {'X': 'X_train_small', 'y': 'y_train_small', 'is_disc': 'is_disc'}), '(X=X_train_small, y=y_train_small, is_disc=is_disc,\n **args_JMI)\n', (19667, 19734), False, 'from cost_based_selection import cost_based_methods\n'), ((19832, 19923), 'cost_based_selection.cost_based_methods.JMIM', 'cost_based_methods.JMIM', ([], {'X': 'X_train_small', 'y': 'y_train_small', 'is_disc': 'is_disc'}), '(X=X_train_small, y=y_train_small, is_disc=is_disc,\n **args_JMIM)\n', (19855, 19923), False, 'from cost_based_selection import cost_based_methods\n'), ((20025, 20136), 'cost_based_selection.cost_based_methods.reliefF', 'cost_based_methods.reliefF', ([], {'X': 'X_train_small', 'y': 'y_train_small', 'proximity': '"""distance"""'}), "(X=X_train_small, y=y_train_small, proximity=\n 'distance', **args_reliefF_classic)\n", (20051, 20136), False, 'from cost_based_selection import cost_based_methods\n'), ((20315, 20425), 'cost_based_selection.cost_based_methods.reliefF', 'cost_based_methods.reliefF', ([], {'X': 'X_train_small', 'y': 'y_train_small', 'proximity': '"""rf prox"""'}), "(X=X_train_small, y=y_train_small, proximity=\n 'rf prox', **args_reliefF_classic)\n", (20341, 20425), False, 'from cost_based_selection import cost_based_methods\n'), ((20600, 20715), 'cost_based_selection.cost_based_methods.pen_rf_importance', 'cost_based_methods.pen_rf_importance', ([], {'X': 'X_train_small', 'y': 'y_train_small', 'imp_type': '"""impurity"""'}), "(X=X_train_small, y=y_train_small,\n imp_type='impurity', **args_rf_impurity)\n", (20636, 20715), False, 'from cost_based_selection import cost_based_methods\n'), ((20906, 21027), 'cost_based_selection.cost_based_methods.pen_rf_importance', 'cost_based_methods.pen_rf_importance', ([], {'X': 'X_train_small', 'y': 'y_train_small', 'imp_type': '"""permutation"""'}), "(X=X_train_small, y=y_train_small,\n imp_type='permutation', **args_rf_permutation)\n", (20942, 21027), False, 'from cost_based_selection import cost_based_methods\n'), ((21246, 21273), 'numpy.array', 'np.array', (['dfSummaries_large'], {}), '(dfSummaries_large)\n', (21254, 21273), True, 'import numpy as np\n'), ((21458, 21561), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_large', 'y_large'], {'test_size': 'val_size', 'random_state': 'random_seed', 'stratify': 'y_large'}), '(X_large, y_large, test_size=val_size, random_state=\n random_seed, stratify=y_large)\n', (21474, 21561), False, 'from sklearn.model_selection import StratifiedKFold, cross_val_score, train_test_split\n'), ((21681, 21772), 'cost_based_selection.cost_based_methods.mRMR', 'cost_based_methods.mRMR', ([], {'X': 'X_train_large', 'y': 'y_train_large', 'is_disc': 'is_disc'}), '(X=X_train_large, y=y_train_large, is_disc=is_disc,\n **args_mRMR)\n', (21704, 21772), False, 'from cost_based_selection import cost_based_methods\n'), ((21871, 21960), 'cost_based_selection.cost_based_methods.JMI', 'cost_based_methods.JMI', ([], {'X': 'X_train_large', 'y': 'y_train_large', 'is_disc': 'is_disc'}), '(X=X_train_large, y=y_train_large, is_disc=is_disc,\n **args_JMI)\n', (21893, 21960), False, 'from cost_based_selection import cost_based_methods\n'), ((22058, 22149), 'cost_based_selection.cost_based_methods.JMIM', 'cost_based_methods.JMIM', ([], {'X': 'X_train_large', 'y': 'y_train_large', 'is_disc': 'is_disc'}), '(X=X_train_large, y=y_train_large, is_disc=is_disc,\n **args_JMIM)\n', (22081, 22149), False, 'from cost_based_selection import cost_based_methods\n'), ((22251, 22362), 'cost_based_selection.cost_based_methods.reliefF', 'cost_based_methods.reliefF', ([], {'X': 'X_train_large', 'y': 'y_train_large', 'proximity': '"""distance"""'}), "(X=X_train_large, y=y_train_large, proximity=\n 'distance', **args_reliefF_classic)\n", (22277, 22362), False, 'from cost_based_selection import cost_based_methods\n'), ((22541, 22651), 'cost_based_selection.cost_based_methods.reliefF', 'cost_based_methods.reliefF', ([], {'X': 'X_train_large', 'y': 'y_train_large', 'proximity': '"""rf prox"""'}), "(X=X_train_large, y=y_train_large, proximity=\n 'rf prox', **args_reliefF_classic)\n", (22567, 22651), False, 'from cost_based_selection import cost_based_methods\n'), ((22826, 22941), 'cost_based_selection.cost_based_methods.pen_rf_importance', 'cost_based_methods.pen_rf_importance', ([], {'X': 'X_train_large', 'y': 'y_train_large', 'imp_type': '"""impurity"""'}), "(X=X_train_large, y=y_train_large,\n imp_type='impurity', **args_rf_impurity)\n", (22862, 22941), False, 'from cost_based_selection import cost_based_methods\n'), ((23132, 23253), 'cost_based_selection.cost_based_methods.pen_rf_importance', 'cost_based_methods.pen_rf_importance', ([], {'X': 'X_train_large', 'y': 'y_train_large', 'imp_type': '"""permutation"""'}), "(X=X_train_large, y=y_train_large,\n imp_type='permutation', **args_rf_permutation)\n", (23168, 23253), False, 'from cost_based_selection import cost_based_methods\n'), ((35970, 36016), 'pandas.DataFrame', 'pd.DataFrame', (['dict_precision_subset_method_SVM'], {}), '(dict_precision_subset_method_SVM)\n', (35982, 36016), True, 'import pandas as pd\n'), ((36034, 36080), 'pandas.DataFrame', 'pd.DataFrame', (['dict_precision_subset_method_KNN'], {}), '(dict_precision_subset_method_KNN)\n', (36046, 36080), True, 'import pandas as pd\n'), ((36391, 36420), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (36403, 36420), True, 'import matplotlib.pyplot as plt\n'), ((36938, 36979), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.95)', 'right': '(0.95)'}), '(top=0.95, right=0.95)\n', (36957, 36979), True, 'import matplotlib.pyplot as plt\n'), ((37222, 37251), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 8)'}), '(figsize=(12, 8))\n', (37234, 37251), True, 'import matplotlib.pyplot as plt\n'), ((37773, 37814), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.95)', 'right': '(0.95)'}), '(top=0.95, right=0.95)\n', (37792, 37814), True, 'import matplotlib.pyplot as plt\n'), ((28667, 28693), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_cores'}), '(n_jobs=num_cores)\n', (28675, 28693), False, 'from joblib import Parallel, delayed\n'), ((33599, 33621), 'numpy.zeros', 'np.zeros', (['num_features'], {}), '(num_features)\n', (33607, 33621), True, 'import numpy as np\n'), ((33653, 33675), 'numpy.zeros', 'np.zeros', (['num_features'], {}), '(num_features)\n', (33661, 33675), True, 'import numpy as np\n'), ((33706, 33728), 'numpy.zeros', 'np.zeros', (['num_features'], {}), '(num_features)\n', (33714, 33728), True, 'import numpy as np\n'), ((33759, 33781), 'numpy.zeros', 'np.zeros', (['num_features'], {}), '(num_features)\n', (33767, 33781), True, 'import numpy as np\n'), ((36454, 36590), 'seaborn.boxplot', 'sns.boxplot', ([], {'ax': 'ax', 'x': '"""Size of feature subset"""', 'y': '"""Precision"""', 'hue': '"""Methods"""', 'data': 'df_res_KNN', 'palette': '"""Set1"""', 'showfliers': 'showfliers'}), "(ax=ax, x='Size of feature subset', y='Precision', hue='Methods',\n data=df_res_KNN, palette='Set1', showfliers=showfliers)\n", (36465, 36590), True, 'import seaborn as sns\n'), ((36637, 36777), 'seaborn.boxplot', 'sns.boxplot', ([], {'ax': 'ax', 'x': '"""Size of feature subset"""', 'y': '"""Precision"""', 'hue': '"""Methods"""', 'data': 'df_res_KNN_sub', 'palette': '"""Set1"""', 'showfliers': 'showfliers'}), "(ax=ax, x='Size of feature subset', y='Precision', hue='Methods',\n data=df_res_KNN_sub, palette='Set1', showfliers=showfliers)\n", (36648, 36777), True, 'import seaborn as sns\n'), ((37018, 37079), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Boxplot_diff_withReliefF_KNN.pdf"""'], {'pad_inches': '(0)'}), "('Boxplot_diff_withReliefF_KNN.pdf', pad_inches=0)\n", (37029, 37079), True, 'import matplotlib.pyplot as plt\n'), ((37285, 37421), 'seaborn.boxplot', 'sns.boxplot', ([], {'ax': 'ax', 'x': '"""Size of feature subset"""', 'y': '"""Precision"""', 'hue': '"""Methods"""', 'data': 'df_res_SVM', 'palette': '"""Set1"""', 'showfliers': 'showfliers'}), "(ax=ax, x='Size of feature subset', y='Precision', hue='Methods',\n data=df_res_SVM, palette='Set1', showfliers=showfliers)\n", (37296, 37421), True, 'import seaborn as sns\n'), ((37468, 37608), 'seaborn.boxplot', 'sns.boxplot', ([], {'ax': 'ax', 'x': '"""Size of feature subset"""', 'y': '"""Precision"""', 'hue': '"""Methods"""', 'data': 'df_res_SVM_sub', 'palette': '"""Set1"""', 'showfliers': 'showfliers'}), "(ax=ax, x='Size of feature subset', y='Precision', hue='Methods',\n data=df_res_SVM_sub, palette='Set1', showfliers=showfliers)\n", (37479, 37608), True, 'import seaborn as sns\n'), ((37853, 37914), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Boxplot_diff_withReliefF_SVM.pdf"""'], {'pad_inches': '(0)'}), "('Boxplot_diff_withReliefF_SVM.pdf', pad_inches=0)\n", (37864, 37914), True, 'import matplotlib.pyplot as plt\n'), ((10001, 10068), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['classifier', 'X_val_using_small', 'y_val'], {'cv': 'cross_val'}), '(classifier, X_val_using_small, y_val, cv=cross_val)\n', (10016, 10068), False, 'from sklearn.model_selection import StratifiedKFold, cross_val_score, train_test_split\n'), ((10203, 10270), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['classifier', 'X_val_using_large', 'y_val'], {'cv': 'cross_val'}), '(classifier, X_val_using_large, y_val, cv=cross_val)\n', (10218, 10270), False, 'from sklearn.model_selection import StratifiedKFold, cross_val_score, train_test_split\n'), ((34365, 34404), 'numpy.mean', 'np.mean', (['dict_common_tmp_rep[key][feat]'], {}), '(dict_common_tmp_rep[key][feat])\n', (34372, 34404), True, 'import numpy as np\n'), ((34446, 34484), 'numpy.std', 'np.std', (['dict_common_tmp_rep[key][feat]'], {}), '(dict_common_tmp_rep[key][feat])\n', (34452, 34484), True, 'import numpy as np\n'), ((34525, 34563), 'numpy.mean', 'np.mean', (['dict_areas_tmp_rep[key][feat]'], {}), '(dict_areas_tmp_rep[key][feat])\n', (34532, 34563), True, 'import numpy as np\n'), ((34604, 34641), 'numpy.std', 'np.std', (['dict_areas_tmp_rep[key][feat]'], {}), '(dict_areas_tmp_rep[key][feat])\n', (34610, 34641), True, 'import numpy as np\n'), ((37124, 37188), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Boxplot_diff_withoutReliefF_KNN.pdf"""'], {'pad_inches': '(0)'}), "('Boxplot_diff_withoutReliefF_KNN.pdf', pad_inches=0)\n", (37135, 37188), True, 'import matplotlib.pyplot as plt\n'), ((37959, 38023), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""Boxplot_diff_withoutReliefF_SVM.pdf"""'], {'pad_inches': '(0)'}), "('Boxplot_diff_withoutReliefF_SVM.pdf', pad_inches=0)\n", (37970, 38023), True, 'import matplotlib.pyplot as plt\n'), ((3497, 3529), 'numpy.sum', 'np.sum', (['dict_common[key][:i + 1]'], {}), '(dict_common[key][:i + 1])\n', (3503, 3529), True, 'import numpy as np\n'), ((28696, 28740), 'joblib.delayed', 'delayed', (['common_features_difference_accuracy'], {}), '(common_features_difference_accuracy)\n', (28703, 28740), False, 'from joblib import Parallel, delayed\n')] |
"""Test derivation of `et`."""
import iris
import numpy as np
import pytest
from cf_units import Unit
import esmvalcore.preprocessor._derive.et as et
@pytest.fixture
def cubes():
hfls_cube = iris.cube.Cube([[1.0, 2.0], [0.0, -2.0]],
standard_name='surface_upward_latent_heat_flux')
ta_cube = iris.cube.Cube([1.0], standard_name='air_temperature')
return iris.cube.CubeList([hfls_cube, ta_cube])
def test_et_calculation(cubes):
derived_var = et.DerivedVariable()
out_cube = derived_var.calculate(cubes)
np.testing.assert_allclose(
out_cube.data, np.array([[0.03505071, 0.07010142], [0.0,
-0.07010142]]))
assert out_cube.units == Unit('mm day-1')
| [
"iris.cube.CubeList",
"esmvalcore.preprocessor._derive.et.DerivedVariable",
"numpy.array",
"iris.cube.Cube",
"cf_units.Unit"
] | [((198, 293), 'iris.cube.Cube', 'iris.cube.Cube', (['[[1.0, 2.0], [0.0, -2.0]]'], {'standard_name': '"""surface_upward_latent_heat_flux"""'}), "([[1.0, 2.0], [0.0, -2.0]], standard_name=\n 'surface_upward_latent_heat_flux')\n", (212, 293), False, 'import iris\n'), ((334, 388), 'iris.cube.Cube', 'iris.cube.Cube', (['[1.0]'], {'standard_name': '"""air_temperature"""'}), "([1.0], standard_name='air_temperature')\n", (348, 388), False, 'import iris\n'), ((400, 440), 'iris.cube.CubeList', 'iris.cube.CubeList', (['[hfls_cube, ta_cube]'], {}), '([hfls_cube, ta_cube])\n', (418, 440), False, 'import iris\n'), ((493, 513), 'esmvalcore.preprocessor._derive.et.DerivedVariable', 'et.DerivedVariable', ([], {}), '()\n', (511, 513), True, 'import esmvalcore.preprocessor._derive.et as et\n'), ((613, 669), 'numpy.array', 'np.array', (['[[0.03505071, 0.07010142], [0.0, -0.07010142]]'], {}), '([[0.03505071, 0.07010142], [0.0, -0.07010142]])\n', (621, 669), True, 'import numpy as np\n'), ((760, 776), 'cf_units.Unit', 'Unit', (['"""mm day-1"""'], {}), "('mm day-1')\n", (764, 776), False, 'from cf_units import Unit\n')] |
import tkinter as tk
import tkinter.font as tkfont
from tkinter import ttk
from tkinter import filedialog
from PIL import Image, ImageTk # need to import extra module "pip install pillow"
import numpy as np
import os, csv, json, threading
from enum import IntEnum
import pypuclib
from pypuclib import CameraFactory, Camera, XferData, PUC_DATA_MODE, Resolution, Decoder
class FILE_TYPE(IntEnum):
CSV = 0
BINARY = 1
class BinaryReader():
def __init__(self, name):
# read json file
self.dict = dict()
with open(name, mode='rt', encoding='utf-8') as file:
self.dict = json.load(file)
self.file = open(name.replace(".json", ".npy"), "rb")
# read file info
d = np.load(self.file)
self.framesize = self.file.tell()
self.file.seek(0, os.SEEK_END)
self.filesize = self.file.tell()
self.framecount = int(self.filesize / self.framesize)
self.file.seek(0)
# prepare for decode
self.decoder = Decoder(self.dict["quantization"])
self.width = self.dict["width"]
self.height = self.dict["height"]
self.opened = True
def read(self, frameNo, raw = False):
self.file.seek(self.framesize * frameNo)
array = np.load(self.file)
if raw:
return array
else:
return self.decoder.decode(array, Resolution(self.width, self.height))
def readseqNo(self,frameNo):
self.file.seek(self.framesize * frameNo)
array = np.load(self.file)
return self.decoder.extractSequenceNo(array, self.width, self.height)
class FileCreator():
def __init__(self, name, filetype):
if filetype == FILE_TYPE.CSV:
self.file = open(name + ".csv", 'w')
self.writer = csv.writer(self.file, lineterminator='\n')
self.writer.writerow(["SequenceNo", "diff"])
elif filetype == FILE_TYPE.BINARY:
self.file = open(name + ".npy", 'wb')
else:
return
self.oldSeq = 0
self.opened = True
self.filetype = filetype
def write(self, xferData):
if self.opened:
if self.filetype == FILE_TYPE.CSV:
self.write_csv(xferData.sequenceNo())
elif self.filetype == FILE_TYPE.BINARY:
self.write_binary(xferData.sequenceNo(),
xferData.data())
def write_csv(self, seq):
if self.oldSeq != seq:
self.writer.writerow(
[str(seq),
str(seq - self.oldSeq),
"*" if (seq - self.oldSeq) > 1 else ""])
self.oldSeq = seq
def write_binary(self, seq, nparray):
if self.oldSeq != seq:
np.save(self.file, nparray)
self.oldSeq = seq
def create_json(name, cam):
data = dict()
data["framerate"] = cam.framerate()
data["shutter"] = cam.shutter()
data["width"] = cam.resolution().width
data["height"] = cam.resolution().height
data["quantization"] = cam.decoder().quantization()
with open(name+".json", mode='wt', encoding='utf-8') as file:
json.dump(data, file, ensure_ascii=False, indent=2)
def close(self):
if self.opened:
self.file.close()
class SbTextFrame(tk.Frame):
def __init__(self,master):
super().__init__(master)
text = tk.Text(self,wrap='none',undo=True)
x_sb = tk.Scrollbar(self,orient='horizontal')
y_sb = tk.Scrollbar(self,orient='vertical')
x_sb.config(command=text.xview)
y_sb.config(command=text.yview)
text.config(xscrollcommand=x_sb.set,yscrollcommand=y_sb.set)
text.grid(column=0,row=0,sticky='nsew')
x_sb.grid(column=0,row=1,sticky='ew')
y_sb.grid(column=1,row=0,sticky='ns')
self.columnconfigure(0,weight=1)
self.rowconfigure(0,weight=1)
self.text = text
self.x_sb = x_sb
self.y_sb = y_sb
def add_tab(fname):
global tframes,fnames,notebook
tframe=SbTextFrame(notebook)
tframes.append(tframe)
if os.path.isfile(fname):
f=open(fname,'r')
lines=f.readlines()
f.close()
for line in lines:
tframe.text.insert('end',line)
fnames.append(fname)
title=os.path.basename(fname)
notebook.add(tframe,text=title)
class Application(tk.Frame):
def __init__(self, master = None):
super().__init__(master)
master.title("gui_sample")
master.geometry("800x600")
self.pack(expand=1, fill=tk.BOTH, anchor=tk.NW)
self.cam = CameraFactory().create()
self.fcreator = None
self.decoder = self.cam.decoder()
self.framerateValues = [1, 10, 50, 100, 125, 250, 500, 950, 1000,
1500, 2000, 2500, 3000, 3200, 4000, 5000,
8000, 10000, 20000, 25000, 30000]
self.framerateStr = tk.IntVar()
self.resolutionStr = tk.StringVar()
self.shutterStr = tk.StringVar()
self.acqutionVal = tk.IntVar()
self.savefileVal = tk.IntVar()
self.uistopVal = tk.BooleanVar()
self.isRec = False
self.locker = threading.Lock()
self.font = tkfont.Font(self,family="Arial",size=10,weight="bold")
self.createWidget()
self.delay = 15
self.updateID = 0
self.update()
def createWidget(self):
#---------------------------------------------------
# options frame
#---------------------------------------------------
frameWidth=300
self.optionFrame = ttk.LabelFrame(self,
text="options",
width=frameWidth,
relief=tk.RAISED)
self.optionFrame.propagate(False)
self.optionFrame.pack(side=tk.RIGHT, fill=tk.Y, padx=5, pady=5)
#---------------------------------------------------
# framerate
#---------------------------------------------------
self.frameratePanel = ttk.Frame(self.optionFrame,
width=frameWidth,
height=30,
relief=tk.FLAT)
self.frameratePanel.propagate(False)
self.frameratePanel.pack(side=tk.TOP, fill=tk.Y, padx=5, pady=5)
self.framerateLabel = ttk.Label(self.frameratePanel,text="Framerate[fps]", width=20)
self.framerateLabel.pack(side=tk.LEFT, padx=5)
self.framerateList = ttk.Combobox(self.frameratePanel,
values=self.framerateValues,
textvariable=self.framerateStr)
self.framerateList.pack(side=tk.LEFT, padx=5)
self.framerateList.bind("<<ComboboxSelected>>", self.updateFramerate)
#---------------------------------------------------
# shutter
#---------------------------------------------------
self.shutterPanel = ttk.Frame(self.optionFrame,
width=frameWidth,
height=30,
relief=tk.FLAT)
self.shutterPanel.propagate(False)
self.shutterPanel.pack(side=tk.TOP, fill=tk.Y, padx=5, pady=5)
self.shutterLabel = ttk.Label(self.shutterPanel,text="Shutter speed[sec]", width=20)
self.shutterLabel.pack(side=tk.LEFT, padx=5)
self.shutterList = ttk.Combobox(self.shutterPanel,
textvariable=self.shutterStr)
self.shutterList.pack(side=tk.LEFT, padx=5)
self.shutterList.bind("<<ComboboxSelected>>", self.updateShutter)
#---------------------------------------------------
# resolution
#---------------------------------------------------
self.resolutionPanel = ttk.Frame(self.optionFrame,
width=frameWidth,
height=30,
relief=tk.FLAT)
self.resolutionPanel.propagate(False)
self.resolutionPanel.pack(side=tk.TOP, fill=tk.Y, padx=5, pady=5)
self.resolutionLabel = ttk.Label(self.resolutionPanel,text="Resolution[pixel]", width=20)
self.resolutionLabel.pack(side=tk.LEFT, padx=5)
self.resolutionList = ttk.Combobox(self.resolutionPanel,
textvariable=self.resolutionStr)
self.resolutionList.pack(side=tk.LEFT, padx=5)
self.resolutionList.bind("<<ComboboxSelected>>", self.updateResolution)
#---------------------------------------------------
# Acquisition mode
#---------------------------------------------------
self.acquisitionPanel = ttk.Frame(self.optionFrame,
width=frameWidth,
height=30,
relief=tk.FLAT)
self.acquisitionPanel.propagate(False)
self.acquisitionPanel.pack(side=tk.TOP, fill=tk.Y, padx=5, pady=5)
self.acqusitionLabel = ttk.Label(self.acquisitionPanel,text="Acquisition mode", width=18)
self.acqusitionLabel.pack(side=tk.LEFT, padx=5)
self.acqusition1 = tk.Radiobutton(self.acquisitionPanel,
text="single",
value=0,
variable=self.acqutionVal,
command=self.updateAcquisition)
self.acqutsiion2 = tk.Radiobutton(self.acquisitionPanel,
text="continuous",
value=1,
variable=self.acqutionVal,
command=self.updateAcquisition)
self.acqusition1.pack(side=tk.LEFT, padx = 5)
self.acqutsiion2.pack(side=tk.LEFT, padx = 5)
#---------------------------------------------------
# savefiles
#---------------------------------------------------
self.savefilesPanel = ttk.Frame(self.optionFrame,
width=frameWidth,
height=30,
relief=tk.FLAT)
self.savefilesPanel.propagate(False)
self.savefilesPanel.pack(side=tk.TOP, fill=tk.Y, padx=5, pady=5)
self.savefilesLabel = ttk.Label(self.savefilesPanel,text="Save file", width=18)
self.savefilesLabel.pack(side=tk.LEFT, padx=5)
self.savefile_csv = tk.Radiobutton(self.savefilesPanel,
text="csv",
value=FILE_TYPE.CSV.value,
variable=self.savefileVal)
self.savefile_bin = tk.Radiobutton(self.savefilesPanel,
text="binary",
value=FILE_TYPE.BINARY.value,
variable=self.savefileVal,)
self.savefile_csv.pack(side=tk.LEFT, padx = 5)
self.savefile_bin.pack(side=tk.LEFT, padx = 5)
#---------------------------------------------------
# record button
#---------------------------------------------------
self.recPanel = ttk.Frame(self.optionFrame,
width=frameWidth,
height=30,
relief=tk.FLAT)
self.recPanel.propagate(False)
self.recPanel.pack(side=tk.BOTTOM, fill=tk.Y, padx=5, pady=5)
self.recButton = ttk.Button(self.recPanel,
text = "REC",
command=self.rec,
width=15)
self.recButton.pack(side=tk.RIGHT,anchor="center", expand=True)
self.uistopCheck = ttk.Checkbutton(self.recPanel,
text="Stop Live",
variable=self.uistopVal,
command=self.uistop)
self.uistopCheck.pack(side=tk.RIGHT,anchor="center", expand=True)
#---------------------------------------------------
# canvas
#---------------------------------------------------
self.canvas = tk.Canvas(self, width=1296, height=1080)
self.canvas.pack(side=tk.RIGHT, fill=tk.BOTH, expand=True, padx=5, pady=5)
#---------------------------------------------------
# initialize ui
#---------------------------------------------------
self.framerateList.set(self.cam.framerate())
self.updateResolutionList()
self.updateShutterList()
self.updateAcquisition()
def update(self):
data = self.cam.grab()
self.updatecanvas(data)
self.updateID = self.after(self.delay, self.update)
def updatecanvas(self, data):
cw = self.canvas.winfo_width()
ch = self.canvas.winfo_height()
w = data.resolution().width
h = data.resolution().height
scale = 1
if cw > 1 and ch > 1:
scale = cw/w if cw/w < ch/h else ch/h
array = self.decoder.decode(data)
i = Image.fromarray(array).resize((int(w*scale), int(h*scale)))
self.img = ImageTk.PhotoImage(image=i)
self.canvas.delete("all")
pos = [(cw-i.width)/2,(ch-i.height)/2]
self.canvas.create_image(pos[0], pos[1], anchor="nw", image=self.img)
self.canvas.create_text(pos[0]+5, pos[1]+5, anchor="nw",
text="SequeceNo:" + str(data.sequenceNo()),
font=self.font, fill="limeGreen")
def updateFramerate(self, e):
rate = self.framerateStr.get()
self.cam.setFramerateShutter(rate, rate)
self.updateResolutionList()
self.updateShutterList()
def updateShutter(self, e):
resStr = self.shutterStr.get().split("1/")
self.cam.setShutter(int(resStr[1]))
def updateResolution(self, e):
resStr = self.resolutionStr.get().split("x")
self.cam.setResolution(int(resStr[0]), int(resStr[1]))
def updateResolutionList(self):
resMax = self.cam.resolutionMax()
resLimit = self.cam.resolutionLimit()
hStep = resLimit.limitH.step
hMin = resLimit.limitH.min
hMax = resMax.height
wStep = resLimit.limitW.step
wMin = resLimit.limitW.min
wMax = resMax.width
resW = range(wMin, wMax+1, wStep if wStep != 0 else 1)
resH = range(hMin, hMax+1, hStep if hStep != 0 else 1)
resValues = []
for h in resH:
for w in resW:
resValues.append(str(w)+"x"+str(h))
self.resolutionList.config(values=resValues)
res = self.cam.resolution()
self.resolutionList.set(str(res.width)+"x"+str(res.height))
def updateShutterList(self):
fps = self.cam.framerate()
shutValues = []
for s in self.framerateValues:
if s >= fps:
shutValues.append("1/" + str(s))
self.shutterList.config(values = shutValues)
shutter = self.cam.shutter()
self.shutterList.set("1/" + str(shutter))
def updateAcquisition(self):
acq = self.acqutionVal.get()
if acq and not self.cam.isXferring():
self.cam.beginXfer(self.cppCallback)
if not acq and self.cam.isXferring():
self.cam.endXfer()
def cppCallback(self, xferData):
self.locker.acquire()
if self.isRec:
self.fcreator.write(xferData)
self.locker.release()
def rec(self):
self.locker.acquire()
self.isRec = not self.isRec
if self.isRec:
self.recButton.state(["pressed"])
self.fcreator = FileCreator("test", self.savefileVal.get())
else:
self.recButton.state(["!pressed"])
self.fcreator.close()
FileCreator.create_json("test", self.cam)
self.locker.release()
def uistop(self):
if self.uistopVal.get():
self.after_cancel(self.updateID)
else:
self.updateID = self.after(self.delay, self.update)
def terminate(self):
self.after_cancel(self.updateID)
self.cam.close()
if self.fcreator is not None:
self.fcreator.close()
class FileApplication(tk.Frame):
def __init__(self, master = None):
super().__init__(master)
master.geometry("800x600")
self.pack(expand=1, fill=tk.BOTH, anchor=tk.NW)
self.font = tkfont.Font(self,family="Arial",size=10,weight="bold")
self.createWidget()
def createWidget(self):
#---------------------------------------------------
# options frame
#---------------------------------------------------
frameWidth=300
self.optionFrame = ttk.LabelFrame(self,
text="file data",
width=frameWidth,
relief=tk.RAISED)
self.optionFrame.propagate(False)
self.optionFrame.pack(side=tk.RIGHT, fill=tk.Y, padx=5, pady=5)
#---------------------------------------------------
# file open
#---------------------------------------------------
self.framecountPanel = ttk.Frame(self.optionFrame,
width=frameWidth,
height=30,
relief=tk.FLAT)
self.framecountPanel.propagate(False)
self.framecountPanel.pack(side=tk.TOP, fill=tk.Y, padx=5, pady=5)
self.framecount_text = tk.StringVar()
self.framecount_text.set("file framecount = %d" % 0)
self.fileframelabel = tk.Label(self.framecountPanel, textvariable = self.framecount_text)
self.fileframelabel.pack(side=tk.LEFT, padx=5)
self.filePanel = ttk.Frame(self.optionFrame,
width=frameWidth,
height=30,
relief=tk.FLAT)
self.filePanel.propagate(False)
self.filePanel.pack(side=tk.BOTTOM, fill=tk.Y, padx=5, pady=5)
self.fileButton = ttk.Button(self.filePanel,
text = "OPEN",
command=self.openfile,
width=15)
self.fileButton.pack(side=tk.RIGHT,anchor="center", expand=True)
#---------------------------------------------------
# canvas
#---------------------------------------------------
self.canvas = tk.Canvas(self, width=1296, height=1080)
self.canvas.pack(side=tk.RIGHT, fill=tk.BOTH, expand=True, padx=5, pady=5)
def createimagedata(self, data, seqNo):
cw = self.canvas.winfo_width()
ch = self.canvas.winfo_height()
w = self.reader.width
h = self.reader.height
scale = 1
if cw > 1 and ch > 1:
scale = cw/w if cw/w < ch/h else ch/h
array = data
i = Image.fromarray(array).resize((int(w*scale), int(h*scale)))
self.img = ImageTk.PhotoImage(image=i)
self.canvas.delete("all")
pos = [(cw-i.width)/2,(ch-i.height)/2]
self.canvas.create_image(pos[0], pos[1], anchor="nw", image=self.img)
self.canvas.create_text(pos[0]+5, pos[1]+5, anchor="nw",
text="SequeceNo:" + str(seqNo + self.iniFileSeqNo),
font=self.font, fill="limeGreen")
def openfile(self):
dir = os.path.abspath(os.path.dirname(__file__))
type = [("データファイル","*.json")]
fname = filedialog.askopenfilename(filetypes=type, initialdir=dir)
self.reader = BinaryReader(fname)
data = self.reader.read(0)
self.iniFileSeqNo = self.reader.readseqNo(0)
self.createimagedata(data, self.iniFileSeqNo)
self.filespinboxLabel = ttk.Label(self.filePanel, text="Frame:", width=8)
self.filespinboxLabel.pack(side=tk.LEFT, padx=5)
self.filespinBox = ttk.Spinbox(self.filePanel,
textvariable=0,
from_=0,
to=self.reader.framecount,
command=self.updatecanvas,
increment=1,
)
self.filespinBox.pack(side=tk.LEFT, padx=5)
self.framecount_text.set("file framecount = %d" % self.reader.framecount)
def updatecanvas(self):
seqNo = int(self.filespinBox.get())
data = self.reader.read(seqNo)
self.createimagedata(data, seqNo)
def main():
global root,notebook,tframes,fnames
root = tk.Tk()
root.title('tabbed editor')
root.geometry('800x600')
notebook = ttk.Notebook(root)
notebook.pack(fill='both',expand=1)
app = Application(master = root)
notebook.add(app, text="cam")
fileapp = FileApplication(master = root)
notebook.add(fileapp, text="file")
app.mainloop()
app.terminate()
if __name__ == '__main__':
main()
| [
"tkinter.StringVar",
"tkinter.Text",
"numpy.load",
"tkinter.ttk.Label",
"tkinter.ttk.Spinbox",
"pypuclib.Resolution",
"tkinter.font.Font",
"os.path.isfile",
"tkinter.BooleanVar",
"tkinter.Label",
"tkinter.ttk.LabelFrame",
"os.path.dirname",
"tkinter.filedialog.askopenfilename",
"tkinter.tt... | [((4170, 4191), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (4184, 4191), False, 'import os, csv, json, threading\n'), ((4371, 4394), 'os.path.basename', 'os.path.basename', (['fname'], {}), '(fname)\n', (4387, 4394), False, 'import os, csv, json, threading\n'), ((21349, 21356), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (21354, 21356), True, 'import tkinter as tk\n'), ((21434, 21452), 'tkinter.ttk.Notebook', 'ttk.Notebook', (['root'], {}), '(root)\n', (21446, 21452), False, 'from tkinter import ttk\n'), ((733, 751), 'numpy.load', 'np.load', (['self.file'], {}), '(self.file)\n', (740, 751), True, 'import numpy as np\n'), ((1015, 1049), 'pypuclib.Decoder', 'Decoder', (["self.dict['quantization']"], {}), "(self.dict['quantization'])\n", (1022, 1049), False, 'from pypuclib import CameraFactory, Camera, XferData, PUC_DATA_MODE, Resolution, Decoder\n'), ((1268, 1286), 'numpy.load', 'np.load', (['self.file'], {}), '(self.file)\n', (1275, 1286), True, 'import numpy as np\n'), ((1524, 1542), 'numpy.load', 'np.load', (['self.file'], {}), '(self.file)\n', (1531, 1542), True, 'import numpy as np\n'), ((3459, 3496), 'tkinter.Text', 'tk.Text', (['self'], {'wrap': '"""none"""', 'undo': '(True)'}), "(self, wrap='none', undo=True)\n", (3466, 3496), True, 'import tkinter as tk\n'), ((3510, 3549), 'tkinter.Scrollbar', 'tk.Scrollbar', (['self'], {'orient': '"""horizontal"""'}), "(self, orient='horizontal')\n", (3522, 3549), True, 'import tkinter as tk\n'), ((3564, 3601), 'tkinter.Scrollbar', 'tk.Scrollbar', (['self'], {'orient': '"""vertical"""'}), "(self, orient='vertical')\n", (3576, 3601), True, 'import tkinter as tk\n'), ((5024, 5035), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (5033, 5035), True, 'import tkinter as tk\n'), ((5065, 5079), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (5077, 5079), True, 'import tkinter as tk\n'), ((5106, 5120), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (5118, 5120), True, 'import tkinter as tk\n'), ((5148, 5159), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (5157, 5159), True, 'import tkinter as tk\n'), ((5187, 5198), 'tkinter.IntVar', 'tk.IntVar', ([], {}), '()\n', (5196, 5198), True, 'import tkinter as tk\n'), ((5224, 5239), 'tkinter.BooleanVar', 'tk.BooleanVar', ([], {}), '()\n', (5237, 5239), True, 'import tkinter as tk\n'), ((5289, 5305), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (5303, 5305), False, 'import os, csv, json, threading\n'), ((5326, 5383), 'tkinter.font.Font', 'tkfont.Font', (['self'], {'family': '"""Arial"""', 'size': '(10)', 'weight': '"""bold"""'}), "(self, family='Arial', size=10, weight='bold')\n", (5337, 5383), True, 'import tkinter.font as tkfont\n'), ((5708, 5780), 'tkinter.ttk.LabelFrame', 'ttk.LabelFrame', (['self'], {'text': '"""options"""', 'width': 'frameWidth', 'relief': 'tk.RAISED'}), "(self, text='options', width=frameWidth, relief=tk.RAISED)\n", (5722, 5780), False, 'from tkinter import ttk\n'), ((6196, 6268), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.optionFrame'], {'width': 'frameWidth', 'height': '(30)', 'relief': 'tk.FLAT'}), '(self.optionFrame, width=frameWidth, height=30, relief=tk.FLAT)\n', (6205, 6268), False, 'from tkinter import ttk\n'), ((6538, 6601), 'tkinter.ttk.Label', 'ttk.Label', (['self.frameratePanel'], {'text': '"""Framerate[fps]"""', 'width': '(20)'}), "(self.frameratePanel, text='Framerate[fps]', width=20)\n", (6547, 6601), False, 'from tkinter import ttk\n'), ((6686, 6785), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.frameratePanel'], {'values': 'self.framerateValues', 'textvariable': 'self.framerateStr'}), '(self.frameratePanel, values=self.framerateValues, textvariable\n =self.framerateStr)\n', (6698, 6785), False, 'from tkinter import ttk\n'), ((7168, 7240), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.optionFrame'], {'width': 'frameWidth', 'height': '(30)', 'relief': 'tk.FLAT'}), '(self.optionFrame, width=frameWidth, height=30, relief=tk.FLAT)\n', (7177, 7240), False, 'from tkinter import ttk\n'), ((7498, 7563), 'tkinter.ttk.Label', 'ttk.Label', (['self.shutterPanel'], {'text': '"""Shutter speed[sec]"""', 'width': '(20)'}), "(self.shutterPanel, text='Shutter speed[sec]', width=20)\n", (7507, 7563), False, 'from tkinter import ttk\n'), ((7644, 7705), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.shutterPanel'], {'textvariable': 'self.shutterStr'}), '(self.shutterPanel, textvariable=self.shutterStr)\n', (7656, 7705), False, 'from tkinter import ttk\n'), ((8049, 8121), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.optionFrame'], {'width': 'frameWidth', 'height': '(30)', 'relief': 'tk.FLAT'}), '(self.optionFrame, width=frameWidth, height=30, relief=tk.FLAT)\n', (8058, 8121), False, 'from tkinter import ttk\n'), ((8397, 8464), 'tkinter.ttk.Label', 'ttk.Label', (['self.resolutionPanel'], {'text': '"""Resolution[pixel]"""', 'width': '(20)'}), "(self.resolutionPanel, text='Resolution[pixel]', width=20)\n", (8406, 8464), False, 'from tkinter import ttk\n'), ((8551, 8618), 'tkinter.ttk.Combobox', 'ttk.Combobox', (['self.resolutionPanel'], {'textvariable': 'self.resolutionStr'}), '(self.resolutionPanel, textvariable=self.resolutionStr)\n', (8563, 8618), False, 'from tkinter import ttk\n'), ((8981, 9053), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.optionFrame'], {'width': 'frameWidth', 'height': '(30)', 'relief': 'tk.FLAT'}), '(self.optionFrame, width=frameWidth, height=30, relief=tk.FLAT)\n', (8990, 9053), False, 'from tkinter import ttk\n'), ((9334, 9401), 'tkinter.ttk.Label', 'ttk.Label', (['self.acquisitionPanel'], {'text': '"""Acquisition mode"""', 'width': '(18)'}), "(self.acquisitionPanel, text='Acquisition mode', width=18)\n", (9343, 9401), False, 'from tkinter import ttk\n'), ((9485, 9610), 'tkinter.Radiobutton', 'tk.Radiobutton', (['self.acquisitionPanel'], {'text': '"""single"""', 'value': '(0)', 'variable': 'self.acqutionVal', 'command': 'self.updateAcquisition'}), "(self.acquisitionPanel, text='single', value=0, variable=self\n .acqutionVal, command=self.updateAcquisition)\n", (9499, 9610), True, 'import tkinter as tk\n'), ((9801, 9930), 'tkinter.Radiobutton', 'tk.Radiobutton', (['self.acquisitionPanel'], {'text': '"""continuous"""', 'value': '(1)', 'variable': 'self.acqutionVal', 'command': 'self.updateAcquisition'}), "(self.acquisitionPanel, text='continuous', value=1, variable=\n self.acqutionVal, command=self.updateAcquisition)\n", (9815, 9930), True, 'import tkinter as tk\n'), ((10375, 10447), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.optionFrame'], {'width': 'frameWidth', 'height': '(30)', 'relief': 'tk.FLAT'}), '(self.optionFrame, width=frameWidth, height=30, relief=tk.FLAT)\n', (10384, 10447), False, 'from tkinter import ttk\n'), ((10717, 10775), 'tkinter.ttk.Label', 'ttk.Label', (['self.savefilesPanel'], {'text': '"""Save file"""', 'width': '(18)'}), "(self.savefilesPanel, text='Save file', width=18)\n", (10726, 10775), False, 'from tkinter import ttk\n'), ((10859, 10964), 'tkinter.Radiobutton', 'tk.Radiobutton', (['self.savefilesPanel'], {'text': '"""csv"""', 'value': 'FILE_TYPE.CSV.value', 'variable': 'self.savefileVal'}), "(self.savefilesPanel, text='csv', value=FILE_TYPE.CSV.value,\n variable=self.savefileVal)\n", (10873, 10964), True, 'import tkinter as tk\n'), ((11118, 11230), 'tkinter.Radiobutton', 'tk.Radiobutton', (['self.savefilesPanel'], {'text': '"""binary"""', 'value': 'FILE_TYPE.BINARY.value', 'variable': 'self.savefileVal'}), "(self.savefilesPanel, text='binary', value=FILE_TYPE.BINARY.\n value, variable=self.savefileVal)\n", (11132, 11230), True, 'import tkinter as tk\n'), ((11637, 11709), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.optionFrame'], {'width': 'frameWidth', 'height': '(30)', 'relief': 'tk.FLAT'}), '(self.optionFrame, width=frameWidth, height=30, relief=tk.FLAT)\n', (11646, 11709), False, 'from tkinter import ttk\n'), ((11946, 12011), 'tkinter.ttk.Button', 'ttk.Button', (['self.recPanel'], {'text': '"""REC"""', 'command': 'self.rec', 'width': '(15)'}), "(self.recPanel, text='REC', command=self.rec, width=15)\n", (11956, 12011), False, 'from tkinter import ttk\n'), ((12223, 12321), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.recPanel'], {'text': '"""Stop Live"""', 'variable': 'self.uistopVal', 'command': 'self.uistop'}), "(self.recPanel, text='Stop Live', variable=self.uistopVal,\n command=self.uistop)\n", (12238, 12321), False, 'from tkinter import ttk\n'), ((12683, 12723), 'tkinter.Canvas', 'tk.Canvas', (['self'], {'width': '(1296)', 'height': '(1080)'}), '(self, width=1296, height=1080)\n', (12692, 12723), True, 'import tkinter as tk\n'), ((13674, 13701), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'image': 'i'}), '(image=i)\n', (13692, 13701), False, 'from PIL import Image, ImageTk\n'), ((17002, 17059), 'tkinter.font.Font', 'tkfont.Font', (['self'], {'family': '"""Arial"""', 'size': '(10)', 'weight': '"""bold"""'}), "(self, family='Arial', size=10, weight='bold')\n", (17013, 17059), True, 'import tkinter.font as tkfont\n'), ((17311, 17385), 'tkinter.ttk.LabelFrame', 'ttk.LabelFrame', (['self'], {'text': '"""file data"""', 'width': 'frameWidth', 'relief': 'tk.RAISED'}), "(self, text='file data', width=frameWidth, relief=tk.RAISED)\n", (17325, 17385), False, 'from tkinter import ttk\n'), ((17802, 17874), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.optionFrame'], {'width': 'frameWidth', 'height': '(30)', 'relief': 'tk.FLAT'}), '(self.optionFrame, width=frameWidth, height=30, relief=tk.FLAT)\n', (17811, 17874), False, 'from tkinter import ttk\n'), ((18148, 18162), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (18160, 18162), True, 'import tkinter as tk\n'), ((18254, 18319), 'tkinter.Label', 'tk.Label', (['self.framecountPanel'], {'textvariable': 'self.framecount_text'}), '(self.framecountPanel, textvariable=self.framecount_text)\n', (18262, 18319), True, 'import tkinter as tk\n'), ((18403, 18475), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.optionFrame'], {'width': 'frameWidth', 'height': '(30)', 'relief': 'tk.FLAT'}), '(self.optionFrame, width=frameWidth, height=30, relief=tk.FLAT)\n', (18412, 18475), False, 'from tkinter import ttk\n'), ((18718, 18790), 'tkinter.ttk.Button', 'ttk.Button', (['self.filePanel'], {'text': '"""OPEN"""', 'command': 'self.openfile', 'width': '(15)'}), "(self.filePanel, text='OPEN', command=self.openfile, width=15)\n", (18728, 18790), False, 'from tkinter import ttk\n'), ((19140, 19180), 'tkinter.Canvas', 'tk.Canvas', (['self'], {'width': '(1296)', 'height': '(1080)'}), '(self, width=1296, height=1080)\n', (19149, 19180), True, 'import tkinter as tk\n'), ((19662, 19689), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', ([], {'image': 'i'}), '(image=i)\n', (19680, 19689), False, 'from PIL import Image, ImageTk\n'), ((20209, 20267), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {'filetypes': 'type', 'initialdir': 'dir'}), '(filetypes=type, initialdir=dir)\n', (20235, 20267), False, 'from tkinter import filedialog\n'), ((20495, 20544), 'tkinter.ttk.Label', 'ttk.Label', (['self.filePanel'], {'text': '"""Frame:"""', 'width': '(8)'}), "(self.filePanel, text='Frame:', width=8)\n", (20504, 20544), False, 'from tkinter import ttk\n'), ((20630, 20754), 'tkinter.ttk.Spinbox', 'ttk.Spinbox', (['self.filePanel'], {'textvariable': '(0)', 'from_': '(0)', 'to': 'self.reader.framecount', 'command': 'self.updatecanvas', 'increment': '(1)'}), '(self.filePanel, textvariable=0, from_=0, to=self.reader.\n framecount, command=self.updatecanvas, increment=1)\n', (20641, 20754), False, 'from tkinter import ttk\n'), ((616, 631), 'json.load', 'json.load', (['file'], {}), '(file)\n', (625, 631), False, 'import os, csv, json, threading\n'), ((1796, 1838), 'csv.writer', 'csv.writer', (['self.file'], {'lineterminator': '"""\n"""'}), "(self.file, lineterminator='\\n')\n", (1806, 1838), False, 'import os, csv, json, threading\n'), ((2786, 2813), 'numpy.save', 'np.save', (['self.file', 'nparray'], {}), '(self.file, nparray)\n', (2793, 2813), True, 'import numpy as np\n'), ((3221, 3272), 'json.dump', 'json.dump', (['data', 'file'], {'ensure_ascii': '(False)', 'indent': '(2)'}), '(data, file, ensure_ascii=False, indent=2)\n', (3230, 3272), False, 'import os, csv, json, threading\n'), ((20128, 20153), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (20143, 20153), False, 'import os, csv, json, threading\n'), ((1388, 1423), 'pypuclib.Resolution', 'Resolution', (['self.width', 'self.height'], {}), '(self.width, self.height)\n', (1398, 1423), False, 'from pypuclib import CameraFactory, Camera, XferData, PUC_DATA_MODE, Resolution, Decoder\n'), ((4682, 4697), 'pypuclib.CameraFactory', 'CameraFactory', ([], {}), '()\n', (4695, 4697), False, 'from pypuclib import CameraFactory, Camera, XferData, PUC_DATA_MODE, Resolution, Decoder\n'), ((13595, 13617), 'PIL.Image.fromarray', 'Image.fromarray', (['array'], {}), '(array)\n', (13610, 13617), False, 'from PIL import Image, ImageTk\n'), ((19583, 19605), 'PIL.Image.fromarray', 'Image.fromarray', (['array'], {}), '(array)\n', (19598, 19605), False, 'from PIL import Image, ImageTk\n')] |
import torch
import numpy as np
from training.training import Trainer
from common.replay_buffer import PrioritizedReplayBuffer
# Use GPU, if available
USE_CUDA = torch.cuda.is_available()
def Variable(x): return x.cuda() if USE_CUDA else x
class PriorDQN(Trainer):
def __init__(self, parameters):
super(PriorDQN, self).__init__(parameters)
self.replay_buffer = PrioritizedReplayBuffer(
self.buffersize, parameters["alpha"])
self.beta_start = parameters["beta_start"]
self.beta_frames = parameters["beta_frames"]
def push_to_buffer(self, state, action, reward, next_state, done):
self.replay_buffer.push(state, action, reward, next_state, done)
def beta_by_frame(self, frame_idx):
beta = self.beta_start + frame_idx * \
(1.0 - self.beta_start) / self.beta_frames
return min(1.0, beta)
def compute_td_loss(self, batch_size, frame_idx):
beta = self.beta_by_frame(frame_idx)
if len(self.replay_buffer) < batch_size:
return None
state, action, reward, next_state, done, indices, weights = self.replay_buffer.sample(
batch_size, beta)
state = Variable(torch.FloatTensor(np.float32(state)))
next_state = Variable(torch.FloatTensor(np.float32(next_state)))
action = Variable(torch.LongTensor(action))
reward = Variable(torch.FloatTensor(reward))
done = Variable(torch.FloatTensor(done))
weights = Variable(torch.FloatTensor(weights))
q_values = self.current_model(state)
q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)
next_q_values = self.current_model(next_state)
next_q_state_values = self.target_model(next_state)
next_q_value = next_q_state_values.gather(
1, torch.max(next_q_values, 1)[1].unsqueeze(1)).squeeze(1)
expected_q_value = reward + self.gamma * next_q_value * (1 - done)
loss = (q_value - Variable(expected_q_value.data)).pow(2) * weights
loss[loss.gt(1)] = 1
prios = loss + 1e-5
loss = loss.mean()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.replay_buffer.update_priorities(indices, prios.data.cpu().numpy())
return loss
| [
"torch.LongTensor",
"numpy.float32",
"torch.FloatTensor",
"torch.cuda.is_available",
"torch.max",
"common.replay_buffer.PrioritizedReplayBuffer"
] | [((164, 189), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (187, 189), False, 'import torch\n'), ((387, 448), 'common.replay_buffer.PrioritizedReplayBuffer', 'PrioritizedReplayBuffer', (['self.buffersize', "parameters['alpha']"], {}), "(self.buffersize, parameters['alpha'])\n", (410, 448), False, 'from common.replay_buffer import PrioritizedReplayBuffer\n'), ((1349, 1373), 'torch.LongTensor', 'torch.LongTensor', (['action'], {}), '(action)\n', (1365, 1373), False, 'import torch\n'), ((1401, 1426), 'torch.FloatTensor', 'torch.FloatTensor', (['reward'], {}), '(reward)\n', (1418, 1426), False, 'import torch\n'), ((1452, 1475), 'torch.FloatTensor', 'torch.FloatTensor', (['done'], {}), '(done)\n', (1469, 1475), False, 'import torch\n'), ((1504, 1530), 'torch.FloatTensor', 'torch.FloatTensor', (['weights'], {}), '(weights)\n', (1521, 1530), False, 'import torch\n'), ((1230, 1247), 'numpy.float32', 'np.float32', (['state'], {}), '(state)\n', (1240, 1247), True, 'import numpy as np\n'), ((1298, 1320), 'numpy.float32', 'np.float32', (['next_state'], {}), '(next_state)\n', (1308, 1320), True, 'import numpy as np\n'), ((1829, 1856), 'torch.max', 'torch.max', (['next_q_values', '(1)'], {}), '(next_q_values, 1)\n', (1838, 1856), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
Linear solve / likelihood tests.
"""
import starry
import numpy as np
from scipy.linalg import cho_solve
from scipy.stats import multivariate_normal
import pytest
import itertools
@pytest.fixture(autouse=True)
def data():
# Instantiate a dipole map
map = starry.Map(ydeg=1, reflected=True)
amp_true = 0.75
inc_true = 60
y_true = np.array([1, 0.1, 0.2, 0.3])
map.amp = amp_true
map[1, :] = y_true[1:]
map.inc = inc_true
# Generate a synthetic light curve with just a little noise
theta = np.linspace(0, 360, 100)
phi = 3.5 * theta
xs = np.cos(phi * np.pi / 180)
ys = 0.1 * np.cos(phi * np.pi / 180)
zs = np.sin(phi * np.pi / 180)
kwargs = dict(theta=theta, xs=xs, ys=ys, zs=zs)
flux = map.flux(**kwargs).eval()
sigma = 1e-5
np.random.seed(1)
flux += np.random.randn(len(theta)) * sigma
return (map, kwargs, amp_true, inc_true, y_true, sigma, flux)
# Parameter combinations we'll test
vals = ["scalar", "vector", "matrix", "cholesky"]
woodbury = [False, True]
solve_inputs = itertools.product(vals, vals)
lnlike_inputs = itertools.product(vals, vals, woodbury)
@pytest.mark.parametrize("L,C", solve_inputs)
def test_solve(L, C, data):
map, kwargs, amp_true, inc_true, y_true, sigma, flux = data
# Place a generous prior on the map coefficients
if L == "scalar":
map.set_prior(L=1)
elif L == "vector":
map.set_prior(L=np.ones(map.Ny))
elif L == "matrix":
map.set_prior(L=np.eye(map.Ny))
elif L == "cholesky":
map.set_prior(cho_L=np.eye(map.Ny))
# Provide the dataset
if C == "scalar":
map.set_data(flux, C=sigma ** 2)
elif C == "vector":
map.set_data(flux, C=np.ones(len(flux)) * sigma ** 2)
elif C == "matrix":
map.set_data(flux, C=np.eye(len(flux)) * sigma ** 2)
elif C == "cholesky":
map.set_data(flux, cho_C=np.eye(len(flux)) * sigma)
# Solve the linear problem
map.inc = inc_true
mu, cho_cov = map.solve(**kwargs)
mu = mu.eval()
cho_cov = cho_cov.eval()
# Ensure the likelihood of the true value is close to that of
# the MAP solution
cov = cho_cov.dot(cho_cov.T)
LnL0 = multivariate_normal.logpdf(mu, mean=mu, cov=cov)
LnL = multivariate_normal.logpdf(amp_true * y_true, mean=mu, cov=cov)
assert LnL0 - LnL < 5.00
# Check that we can draw from the posterior
map.draw()
@pytest.mark.parametrize("L,C,woodbury", lnlike_inputs)
def test_lnlike(L, C, woodbury, data):
"""Test the log marginal likelihood method."""
map, kwargs, amp_true, inc_true, y_true, sigma, flux = data
# Place a generous prior on the map coefficients
if L == "scalar":
map.set_prior(L=1)
elif L == "vector":
map.set_prior(L=np.ones(map.Ny))
elif L == "matrix":
map.set_prior(L=np.eye(map.Ny))
elif L == "cholesky":
map.set_prior(cho_L=np.eye(map.Ny))
# Provide the dataset
if C == "scalar":
map.set_data(flux, C=sigma ** 2)
elif C == "vector":
map.set_data(flux, C=np.ones(len(flux)) * sigma ** 2)
elif C == "matrix":
map.set_data(flux, C=np.eye(len(flux)) * sigma ** 2)
elif C == "cholesky":
map.set_data(flux, cho_C=np.eye(len(flux)) * sigma)
# Compute the marginal log likelihood for different inclinations
incs = [15, 30, 45, 60, 75, 90]
ll = np.zeros_like(incs, dtype=float)
for i, inc in enumerate(incs):
map.inc = inc
ll[i] = map.lnlike(woodbury=woodbury, **kwargs).eval()
# Verify that we get the correct inclination
assert incs[np.argmax(ll)] == 60
assert np.allclose(ll[np.argmax(ll)], 974.221605) # benchmarked
| [
"starry.Map",
"numpy.zeros_like",
"numpy.random.seed",
"numpy.eye",
"numpy.argmax",
"pytest.fixture",
"numpy.ones",
"numpy.sin",
"numpy.array",
"numpy.linspace",
"itertools.product",
"numpy.cos",
"pytest.mark.parametrize",
"scipy.stats.multivariate_normal.logpdf"
] | [((212, 240), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (226, 240), False, 'import pytest\n'), ((1089, 1118), 'itertools.product', 'itertools.product', (['vals', 'vals'], {}), '(vals, vals)\n', (1106, 1118), False, 'import itertools\n'), ((1135, 1174), 'itertools.product', 'itertools.product', (['vals', 'vals', 'woodbury'], {}), '(vals, vals, woodbury)\n', (1152, 1174), False, 'import itertools\n'), ((1178, 1222), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""L,C"""', 'solve_inputs'], {}), "('L,C', solve_inputs)\n", (1201, 1222), False, 'import pytest\n'), ((2459, 2513), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""L,C,woodbury"""', 'lnlike_inputs'], {}), "('L,C,woodbury', lnlike_inputs)\n", (2482, 2513), False, 'import pytest\n'), ((295, 329), 'starry.Map', 'starry.Map', ([], {'ydeg': '(1)', 'reflected': '(True)'}), '(ydeg=1, reflected=True)\n', (305, 329), False, 'import starry\n'), ((381, 409), 'numpy.array', 'np.array', (['[1, 0.1, 0.2, 0.3]'], {}), '([1, 0.1, 0.2, 0.3])\n', (389, 409), True, 'import numpy as np\n'), ((560, 584), 'numpy.linspace', 'np.linspace', (['(0)', '(360)', '(100)'], {}), '(0, 360, 100)\n', (571, 584), True, 'import numpy as np\n'), ((616, 641), 'numpy.cos', 'np.cos', (['(phi * np.pi / 180)'], {}), '(phi * np.pi / 180)\n', (622, 641), True, 'import numpy as np\n'), ((692, 717), 'numpy.sin', 'np.sin', (['(phi * np.pi / 180)'], {}), '(phi * np.pi / 180)\n', (698, 717), True, 'import numpy as np\n'), ((828, 845), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (842, 845), True, 'import numpy as np\n'), ((2240, 2288), 'scipy.stats.multivariate_normal.logpdf', 'multivariate_normal.logpdf', (['mu'], {'mean': 'mu', 'cov': 'cov'}), '(mu, mean=mu, cov=cov)\n', (2266, 2288), False, 'from scipy.stats import multivariate_normal\n'), ((2299, 2362), 'scipy.stats.multivariate_normal.logpdf', 'multivariate_normal.logpdf', (['(amp_true * y_true)'], {'mean': 'mu', 'cov': 'cov'}), '(amp_true * y_true, mean=mu, cov=cov)\n', (2325, 2362), False, 'from scipy.stats import multivariate_normal\n'), ((3433, 3465), 'numpy.zeros_like', 'np.zeros_like', (['incs'], {'dtype': 'float'}), '(incs, dtype=float)\n', (3446, 3465), True, 'import numpy as np\n'), ((657, 682), 'numpy.cos', 'np.cos', (['(phi * np.pi / 180)'], {}), '(phi * np.pi / 180)\n', (663, 682), True, 'import numpy as np\n'), ((3652, 3665), 'numpy.argmax', 'np.argmax', (['ll'], {}), '(ll)\n', (3661, 3665), True, 'import numpy as np\n'), ((3699, 3712), 'numpy.argmax', 'np.argmax', (['ll'], {}), '(ll)\n', (3708, 3712), True, 'import numpy as np\n'), ((1467, 1482), 'numpy.ones', 'np.ones', (['map.Ny'], {}), '(map.Ny)\n', (1474, 1482), True, 'import numpy as np\n'), ((2820, 2835), 'numpy.ones', 'np.ones', (['map.Ny'], {}), '(map.Ny)\n', (2827, 2835), True, 'import numpy as np\n'), ((1532, 1546), 'numpy.eye', 'np.eye', (['map.Ny'], {}), '(map.Ny)\n', (1538, 1546), True, 'import numpy as np\n'), ((2885, 2899), 'numpy.eye', 'np.eye', (['map.Ny'], {}), '(map.Ny)\n', (2891, 2899), True, 'import numpy as np\n'), ((1602, 1616), 'numpy.eye', 'np.eye', (['map.Ny'], {}), '(map.Ny)\n', (1608, 1616), True, 'import numpy as np\n'), ((2955, 2969), 'numpy.eye', 'np.eye', (['map.Ny'], {}), '(map.Ny)\n', (2961, 2969), True, 'import numpy as np\n')] |
import numpy as np
import copy
import logging
from .varform import VarForm
from .utils import validate_objective, contains_and_raised, state_to_ampl_counts, obj_from_statevector
logger = logging.getLogger(__name__)
class ObjectiveWrapper:
"""Objective Function Wrapper
Wraps variational form object and an objective into something that can be passed to an optimizer.
Remembers all the previous values.
"""
def __init__(self, obj, objective_parameters=None, varform_description=None, backend_description=None, problem_description=None, execute_parameters=None):
"""Constuctor.
Args:
obj (function) : takes a list of 0,1 and returns objective function value for that vector
varform_description (dict) : See varform.py
problem_description (dict) : See varform.py
backend_description (dict) : See varform.py
execute_parameters (dict) : Parameters passed to execute function (e.g. {'shots': 8000})
objective_parameters (dict) : Parameters for objective function.
Accepted fields:
'save_vals' (bool) -- save values of the objective function
Note: statistic on the value of objective function (e.g. mean) is saved automatically
'save_resstrs' (bool) -- save all raw resstrs
'nprocesses' : number of processes to use for objective function evaluation (only used for statevector_simulator)
'precomputed_energies' (np.array): array of precomputed energies, should be the same as the diagonal of the cost Hamiltonian
"""
validate_objective(obj, varform_description['num_qubits'])
if backend_description['package'] == 'qiskit' and 'statevector' in backend_description['name'] and varform_description['num_qubits'] > 10:
logger.warning(f"obj_from_statevector used with statevector simulator is slow for large number of qubits\nUse qasm_simulator instead.")
self.obj = obj
self.varform_description = varform_description
self.problem_description = problem_description
self.execute_parameters = execute_parameters
self.objective_parameters = objective_parameters
self.backend_description = backend_description
if 'package' in self.varform_description and self.varform_description['package'] == 'mpsbackend':
import mpsbackend.variational_forms as mps_variational_forms
varform_parameters = {k : v for k,v in varform_description.items() if k != 'name' and k != 'package'}
self.var_form = getattr(mps_variational_forms, varform_description['name'])(**varform_parameters)
else:
self.var_form = VarForm(varform_description=varform_description, problem_description=problem_description)
self.num_parameters = self.var_form.num_parameters
del self.var_form.num_parameters
if self.objective_parameters is None or 'precomputed_energies' not in self.objective_parameters:
self.precomputed_energies = None
else:
self.precomputed_energies = self.objective_parameters['precomputed_energies']
self.vals_statistic = []
self.vals = []
self.points = []
self.resstrs = []
self.is_periodic = True
def get_obj(self):
"""Returns objective function
"""
def f(theta):
self.points.append(copy.deepcopy(theta))
resstrs = self.var_form.run(theta, backend_description=self.backend_description, execute_parameters=self.execute_parameters)
if contains_and_raised(self.objective_parameters, 'save_resstrs'):
self.resstrs.append(resstrs)
if self.backend_description['package'] == 'qiskit' and 'statevector' in self.backend_description['name']:
objective_value = obj_from_statevector(resstrs, self.obj, precomputed=self.precomputed_energies)
else:
vals = [self.obj(x[::-1]) for x in resstrs] # reverse because of qiskit notation
if contains_and_raised(self.objective_parameters, 'save_vals'):
self.vals.append(vals)
# TODO: should allow for different statistics (e.g. CVAR)
objective_value = np.mean(vals)
logger.info(f"called at step {len(self.vals_statistic)}, objective: {objective_value} at point {theta}")
self.vals_statistic.append(objective_value)
return objective_value
return f
| [
"copy.deepcopy",
"numpy.mean",
"logging.getLogger"
] | [((188, 215), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (205, 215), False, 'import logging\n'), ((3623, 3643), 'copy.deepcopy', 'copy.deepcopy', (['theta'], {}), '(theta)\n', (3636, 3643), False, 'import copy\n'), ((4486, 4499), 'numpy.mean', 'np.mean', (['vals'], {}), '(vals)\n', (4493, 4499), True, 'import numpy as np\n')] |
# pylint: disable=C0103
"""
defines:
- model = delete_bad_shells(model, max_theta=175., max_skew=70., max_aspect_ratio=100.,
max_taper_ratio=4.0)
- eids_to_delete = get_bad_shells(model, xyz_cid0, nid_map, max_theta=175., max_skew=70.,
max_aspect_ratio=100., max_taper_ratio=4.0)
"""
from typing import List
import numpy as np
from cpylog import SimpleLogger
from pyNastran.bdf.bdf import BDF
SIDE_MAP = {}
SIDE_MAP['CHEXA'] = {
1 : [4, 3, 2, 1],
2 : [1, 2, 6, 5],
3 : [2, 3, 7, 6],
4 : [3, 4, 8, 7],
5 : [4, 1, 5, 8],
6 : [5, 6, 7, 8],
}
PIOVER2 = np.pi / 2.
PIOVER3 = np.pi / 3.
def delete_bad_shells(model: BDF,
min_theta: float=0.1, max_theta: float=175.,
max_skew: float=70., max_aspect_ratio: float=100.,
max_taper_ratio: float=4.0,
max_warping: float=90.) -> BDF:
"""
Removes bad CQUAD4/CTRIA3 elements
Parameters
----------
model : BDF ()
this should be equivalenced
min_theta : float; default=0.1
the maximum interior angle (degrees)
max_theta : float; default=175.
the maximum interior angle (degrees)
max_skew : float; default=70.
the maximum skew angle (degrees)
max_aspect_ratio : float; default=100.
the max aspect ratio
taper_ratio : float; default=4.0
the taper ratio; applies to CQUAD4s only
max_warping: float: default=20.0
the maximum warp angle (degrees)
"""
xyz_cid0 = model.get_xyz_in_coord(cid=0, fdtype='float32')
nid_map = get_node_map(model)
eids_to_delete = get_bad_shells(model, xyz_cid0, nid_map,
min_theta=min_theta, max_theta=max_theta,
max_skew=max_skew,
max_aspect_ratio=max_aspect_ratio,
max_taper_ratio=max_taper_ratio,
max_warping=max_warping)
for eid in eids_to_delete:
del model.elements[eid]
model.log.info('deleted %s bad CTRIA3/CQUAD4s' % len(eids_to_delete))
model.validate()
return model
def get_node_map(model):
"""gets an nid->inid mapper"""
nid_map = {}
for i, nid in enumerate(sorted(model.nodes.keys())):
nid_map[nid] = i
return nid_map
def get_bad_shells(model: BDF, xyz_cid0, nid_map,
min_theta: float=0.1, max_theta: float=175., max_skew: float=70.,
max_aspect_ratio: float=100., max_taper_ratio: float=4.0,
max_warping: float=60.0) -> List[int]:
"""
Get the bad shell elements
Parameters
----------
model : BDF()
the model object
xyz_cid0 : (N, 3) float ndarray
the xyz coordinates in cid=0
nid_map : dict[nid] : index
nid : int
the node id
index : int
the index of the node id in xyz_cid0
min_theta : float; default=0.1
the maximum interior angle (degrees)
max_theta : float; default=175.
the maximum interior angle (degrees)
max_skew : float; default=70.
the maximum skew angle (degrees)
max_aspect_ratio : float; default=100.
the max aspect ratio
taper_ratio : float; default=4.0
the taper ratio; applies to CQUAD4s only
max_warping: float: default=60.0
the maximum warp angle (degrees)
Returns
-------
eids_failed : List[int]
element ids that fail the criteria
shells with a edge length=0.0 are automatically added
"""
log = model.log
min_theta_quad = min_theta
min_theta_tri = min_theta
min_theta_quad = np.radians(min_theta_quad)
min_theta_tri = np.radians(min_theta_tri)
max_theta = np.radians(max_theta)
max_skew = np.radians(max_skew)
max_warping = np.radians(max_warping)
eids_failed = []
for eid, element in sorted(model.elements.items()):
if element.type == 'CQUAD4':
node_ids = element.node_ids
#pid = element.Pid()
#for nid in node_ids:
#if nid is not None:
#nid_to_pid_map[nid].append(pid)
#self.eid_to_nid_map[eid] = node_ids
n1, n2, n3, n4 = [nid_map[nid] for nid in node_ids]
p1 = xyz_cid0[n1, :]
p2 = xyz_cid0[n2, :]
p3 = xyz_cid0[n3, :]
p4 = xyz_cid0[n4, :]
if _is_bad_quad(eid, p1, p2, p3, p4, log,
max_aspect_ratio, min_theta_quad, max_theta, max_skew,
max_taper_ratio, max_warping):
eids_failed.append(eid)
elif element.type == 'CTRIA3':
node_ids = element.node_ids
#pid = element.Pid()
#self.eid_to_nid_map[eid] = node_ids
#for nid in node_ids:
#if nid is not None:
#nid_to_pid_map[nid].append(pid)
n1, n2, n3 = [nid_map[nid] for nid in node_ids]
p1 = xyz_cid0[n1, :]
p2 = xyz_cid0[n2, :]
p3 = xyz_cid0[n3, :]
if _is_bad_tri(eid, p1, p2, p3, log,
max_aspect_ratio, min_theta_tri, max_theta, max_skew):
eids_failed.append(eid)
return eids_failed
def _is_bad_quad(eid: int, p1, p2, p3, p4,
log: SimpleLogger,
max_aspect_ratio: float,
min_theta_quad: float,
max_theta: float,
max_skew: float,
max_taper_ratio: float,
max_warping: float) -> bool:
"""identifies if a CQUAD4 has poor quality"""
is_bad_quad = True
v21 = p2 - p1
v32 = p3 - p2
v43 = p4 - p3
v14 = p1 - p4
#aspect_ratio = max(p12, p23, p34, p14) / max(p12, p23, p34, p14)
lengths = np.linalg.norm([v21, v32, v43, v14], axis=1)
#assert len(lengths) == 3, lengths
length_min = lengths.min()
if length_min == 0.0:
log.debug(f'eid={eid} failed length_min check; length_min={length_min}')
return is_bad_quad
# -------------------------------------------------------------------------------
aspect_ratio = lengths.max() / length_min
if aspect_ratio > max_aspect_ratio:
log.debug(f'eid={eid} failed aspect_ratio check; AR={aspect_ratio:.2f} > {max_aspect_ratio}')
return is_bad_quad
# -------------------------------------------------------------------------------
p12 = (p1 + p2) / 2.
p23 = (p2 + p3) / 2.
p34 = (p3 + p4) / 2.
p14 = (p4 + p1) / 2.
normal = np.cross(p3 - p1, p4 - p2) # v42 x v31
# e3
# 4-------3
# | |
# |e4 | e2
# 1-------2
# e1
e13 = p34 - p12
e42 = p23 - p14
norm_e13 = np.linalg.norm(e13)
norm_e42 = np.linalg.norm(e42)
cos_skew1 = (e13 @ e42) / (norm_e13 * norm_e42)
cos_skew2 = (e13 @ -e42) / (norm_e13 * norm_e42)
skew = np.pi / 2. - np.abs(np.arccos(np.clip([cos_skew1, cos_skew2], -1., 1.))).min()
if skew > max_skew:
log.debug('eid=%s failed max_skew check; skew=%.2f' % (eid, np.degrees(skew)))
return is_bad_quad
# -------------------------------------------------------------------------------
area1 = 0.5 * np.linalg.norm(np.cross(-v14, v21)) # v41 x v21
area2 = 0.5 * np.linalg.norm(np.cross(-v21, v32)) # v12 x v32
area3 = 0.5 * np.linalg.norm(np.cross(v43, v32)) # v43 x v32
area4 = 0.5 * np.linalg.norm(np.cross(v14, -v43)) # v14 x v34
aavg = (area1 + area2 + area3 + area4) / 4.
taper_ratioi = (abs(area1 - aavg) + abs(area2 - aavg) +
abs(area3 - aavg) + abs(area4 - aavg)) / aavg
if taper_ratioi > max_taper_ratio:
log.debug('eid=%s failed taper_ratio check; taper=%.2f' % (eid, taper_ratioi))
return is_bad_quad
# -------------------------------------------------------------------------------
#if 0:
#areai = 0.5 * np.linalg.norm(normal)
## still kind of in development
##
## the ratio of the ideal area to the actual area
## this is an hourglass check
#areas = [
#np.linalg.norm(np.cross(-v14, v21)), # v41 x v21
#np.linalg.norm(np.cross(v32, -v21)), # v32 x v12
#np.linalg.norm(np.cross(v43, -v32)), # v43 x v23
#np.linalg.norm(np.cross(v14, v43)), # v14 x v43
#]
#area_ratioi1 = areai / min(areas)
#area_ratioi2 = max(areas) / areai
#area_ratioi = max(area_ratioi1, area_ratioi2)
# -------------------------------------------------------------------------------
# ixj = k
# dot the local normal with the normal vector
# then take the norm of that to determine the angle relative to the normal
# then take the sign of that to see if we're pointing roughly towards the normal
# np.sign(np.linalg.norm(np.dot(
# a x b = ab sin(theta)
# a x b / ab = sin(theta)
# sin(theta) < 0. -> normal is flipped
n2 = np.sign(np.cross(v21, v32) @ normal)
n3 = np.sign(np.cross(v32, v43) @ normal)
n4 = np.sign(np.cross(v43, v14) @ normal)
n1 = np.sign(np.cross(v14, v21) @ normal)
n = np.array([n1, n2, n3, n4])
theta_additional = np.where(n < 0, 2*np.pi, 0.)
norm_v21 = np.linalg.norm(v21)
norm_v32 = np.linalg.norm(v32)
norm_v43 = np.linalg.norm(v43)
norm_v14 = np.linalg.norm(v14)
cos_theta1 = (v21 @ -v14) / (norm_v21 * norm_v14)
cos_theta2 = (v32 @ -v21) / (norm_v32 * norm_v21)
cos_theta3 = (v43 @ -v32) / (norm_v43 * norm_v32)
cos_theta4 = (v14 @ -v43) / (norm_v14 * norm_v43)
interior_angle = np.arccos(np.clip(
[cos_theta1, cos_theta2, cos_theta3, cos_theta4], -1., 1.))
theta = n * interior_angle + theta_additional
theta_mini = theta.min()
theta_maxi = theta.max()
if theta_mini < min_theta_quad:
log.debug('eid=%s failed min_theta check; theta=%.2f' % (
eid, np.degrees(theta_mini)))
return is_bad_quad
if theta_maxi > max_theta:
log.debug('eid=%s failed max_theta check; theta=%.2f' % (
eid, np.degrees(theta_maxi)))
return is_bad_quad
# -------------------------------------------------------------------------------
# warping
v31 = p3 - p1
v42 = p4 - p2
v41 = -v14
n123 = np.cross(v21, v31)
n134 = np.cross(v31, v41)
#v1 o v2 = v1 * v2 cos(theta)
cos_warp1 = (n123 @ n134) / (np.linalg.norm(n123) * np.linalg.norm(n134))
# split the quad in the order direction and take the maximum of the two splits
# 4---3
# | \ |
# | \|
# 1---2
n124 = np.cross(v21, v41)
n234 = np.cross(v32, v42)
cos_warp2 = (n124 @ n234) / (np.linalg.norm(n124) * np.linalg.norm(n234))
max_warpi = np.abs(np.arccos(
np.clip([cos_warp1, cos_warp2], -1., 1.))).max()
if max_warpi > max_warping:
log.debug('eid=%s failed max_warping check; theta=%.2f' % (
eid, np.degrees(max_warpi)))
#print('eid=%s theta_min=%-5.2f theta_max=%-5.2f '
#'skew=%-5.2f AR=%-5.2f taper_ratioi=%.2f' % (
#eid,
#np.degrees(theta_mini), np.degrees(theta_maxi),
#np.degrees(skew), aspect_ratio, taper_ratioi))
is_bad_quad = False
return is_bad_quad
def _is_bad_tri(eid: int, p1, p2, p3, log: SimpleLogger,
max_aspect_ratio: float,
min_theta_tri: float,
max_theta: float,
max_skew: float) -> bool:
"""identifies if a CTRIA3 has poor quality"""
is_bad_tri = True
v21 = p2 - p1
v32 = p3 - p2
v13 = p1 - p3
lengths = np.linalg.norm([v21, v32, v13], axis=1)
length_min = lengths.min()
if length_min == 0.0:
log.debug('eid=%s failed length_min check; length_min=%s' % (
eid, length_min))
return is_bad_tri
#assert len(lengths) == 3, lengths
aspect_ratio = lengths.max() / length_min
if aspect_ratio > max_aspect_ratio:
log.debug('eid=%s failed aspect_ratio check; AR=%s' % (eid, aspect_ratio))
return is_bad_tri
cos_theta1 = (v21 @ -v13) / (np.linalg.norm(v21) * np.linalg.norm(v13))
cos_theta2 = (v32 @ -v21) / (np.linalg.norm(v32) * np.linalg.norm(v21))
cos_theta3 = (v13 @ -v32) / (np.linalg.norm(v13) * np.linalg.norm(v32))
theta = np.arccos(np.clip(
[cos_theta1, cos_theta2, cos_theta3], -1., 1.))
theta_mini = theta.min()
theta_maxi = theta.max()
if theta_mini < min_theta_tri:
log.debug('eid=%s failed min_theta check; theta=%s' % (
eid, np.degrees(theta_mini)))
return is_bad_tri
if theta_maxi > max_theta:
log.debug('eid=%s failed max_theta check; theta=%s' % (
eid, np.degrees(theta_maxi)))
return is_bad_tri
# 3
# / \
# e3/ \ e2
# / /\
# / / \
# 1---/----2
# e1
e1 = (p1 + p2) / 2.
e2 = (p2 + p3) / 2.
e3 = (p3 + p1) / 2.
e21 = e2 - e1
e31 = e3 - e1
e32 = e3 - e2
norm_e21 = np.linalg.norm(e21)
norm_e31 = np.linalg.norm(e31)
norm_e32 = np.linalg.norm(e32)
e3_p2 = e3 - p2
e2_p1 = e2 - p1
e1_p3 = e1 - p3
norm_e3_p2 = np.linalg.norm(e3_p2)
norm_e2_p1 = np.linalg.norm(e2_p1)
norm_e1_p3 = np.linalg.norm(e1_p3)
cos_skew1 = (e2_p1 @ e31) / (norm_e2_p1 * norm_e31)
cos_skew2 = (e2_p1 @ -e31) / (norm_e2_p1 * norm_e31)
cos_skew3 = (e3_p2 @ e21) / (norm_e3_p2 * norm_e21)
cos_skew4 = (e3_p2 @ -e21) / (norm_e3_p2 * norm_e21)
cos_skew5 = (e1_p3 @ e32) / (norm_e1_p3 * norm_e32)
cos_skew6 = (e1_p3 @ -e32) / (norm_e1_p3 * norm_e32)
skew = np.pi / 2. - np.abs(np.arccos(
np.clip([cos_skew1, cos_skew2, cos_skew3,
cos_skew4, cos_skew5, cos_skew6], -1., 1.)
)).min()
if skew > max_skew:
log.debug('eid=%s failed max_skew check; skew=%s' % (eid, np.degrees(skew)))
return is_bad_tri
is_bad_tri = False
# warping doesn't happen to CTRIA3s
#print('eid=%s theta_min=%-5.2f theta_max=%-5.2f skew=%-5.2f AR=%-5.2f' % (
#eid,
#np.degrees(theta_mini), np.degrees(theta_maxi),
#np.degrees(skew), aspect_ratio))
return is_bad_tri
def element_quality(model, nids=None, xyz_cid0=None, nid_map=None):
"""
Gets various measures of element quality
Parameters
----------
model : BDF()
a cross-referenced model
nids : (nnodes, ) int ndarray; default=None
the nodes of the model in sorted order
includes GRID, SPOINT, & EPOINTs
xyz_cid0 : (nnodes, 3) float ndarray; default=None
the associated global xyz locations
nid_map : Dict[nid]->index; default=None
a mapper dictionary
Returns
-------
quality : Dict[name] : (nelements, ) float ndarray
Various quality metrics
names : min_interior_angle, max_interior_angle, dideal_theta,
max_skew_angle, max_aspect_ratio,
area_ratio, taper_ratio, min_edge_length
values : The result is ``np.nan`` if element type does not define
the parameter. For example, CELAS1 doesn't have an
aspect ratio.
Notes
-----
- pulled from nastran_io.py
"""
if nids is None or xyz_cid0 is None:
out = model.get_displacement_index_xyz_cp_cd(
fdtype='float64', idtype='int32', sort_ids=True)
unused_icd_transform, icp_transform, xyz_cp, nid_cp_cd = out
nids = nid_cp_cd[:, 0]
xyz_cid0 = model.transform_xyzcp_to_xyz_cid(
xyz_cp, nids, icp_transform, cid=0,
in_place=False)
if nid_map is None:
nid_map = {}
for i, nid in enumerate(nids):
nid_map[nid] = i
all_nids = nids
del nids
# these normals point inwards
# 4
# / | \
# / | \
# 3-------2
# \ | /
# \ | /
# 1
_ctetra_faces = (
(0, 1, 2), # (1, 2, 3),
(0, 3, 1), # (1, 4, 2),
(0, 3, 2), # (1, 3, 4),
(1, 3, 2), # (2, 4, 3),
)
# these normals point inwards
#
#
#
#
# /4-----3
# / /
# / 5 /
# / \ /
# / \ /
# 1---------2
_cpyram_faces = (
(0, 1, 2, 3), # (1, 2, 3, 4),
(1, 4, 2), # (2, 5, 3),
(2, 4, 3), # (3, 5, 4),
(0, 3, 4), # (1, 4, 5),
(0, 4, 1), # (1, 5, 2),
)
# these normals point inwards
# /6
# / | \
# / | \
# 3\ | \
# | \ /4-----5
# | \/ /
# | / \ /
# | / \ /
# | / \ /
# 1---------2
_cpenta_faces = (
(0, 2, 1), # (1, 3, 2),
(3, 4, 5), # (4, 5, 6),
(0, 1, 4, 3), # (1, 2, 5, 4), # bottom
(1, 2, 5, 4), # (2, 3, 6, 5), # right
(0, 3, 5, 2), # (1, 4, 6, 3), # left
)
# these normals point inwards
# 8----7
# /| /|
# / | / |
# / 5-/--6
# 4-----3 /
# | / | /
# | / | /
# 1-----2
_chexa_faces = (
(4, 5, 6, 7), # (5, 6, 7, 8),
(0, 3, 2, 1), # (1, 4, 3, 2),
(1, 2, 6, 5), # (2, 3, 7, 6),
(2, 3, 7, 6), # (3, 4, 8, 7),
(0, 4, 7, 3), # (1, 5, 8, 4),
(0, 6, 5, 4), # (1, 7, 6, 5),
)
# quality
nelements = len(model.elements)
min_interior_angle = np.zeros(nelements, 'float32')
max_interior_angle = np.zeros(nelements, 'float32')
dideal_theta = np.zeros(nelements, 'float32')
max_skew_angle = np.zeros(nelements, 'float32')
max_warp_angle = np.zeros(nelements, 'float32')
max_aspect_ratio = np.zeros(nelements, 'float32')
#area = np.zeros(nelements, 'float32')
area_ratio = np.zeros(nelements, 'float32')
taper_ratio = np.zeros(nelements, 'float32')
min_edge_length = np.zeros(nelements, 'float32')
#normals = np.full((nelements, 3), np.nan, 'float32')
#nids_list = []
ieid = 0
for unused_eid, elem in sorted(model.elements.items()):
if ieid % 5000 == 0 and ieid > 0:
print(' map_elements = %i' % ieid)
etype = elem.type
nids = None
inids = None
dideal_thetai = np.nan
min_thetai = np.nan
max_thetai = np.nan
#max_thetai = np.nan
max_skew = np.nan
max_warp = np.nan
aspect_ratio = np.nan
#areai = np.nan
area_ratioi = np.nan
taper_ratioi = np.nan
min_edge_lengthi = np.nan
#normali = np.nan
if etype in ['CTRIA3', 'CTRIAR', 'CTRAX3', 'CPLSTN3']:
nids = elem.nodes
inids = np.searchsorted(all_nids, nids)
p1, p2, p3 = xyz_cid0[inids, :]
out = tri_quality(p1, p2, p3)
(areai, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi) = out
#normali = np.cross(p1 - p2, p1 - p3)
elif etype in ['CQUAD4', 'CQUADR', 'CPLSTN4', 'CQUADX4']:
nids = elem.nodes
inids = np.searchsorted(all_nids, nids)
p1, p2, p3, p4 = xyz_cid0[inids, :]
out = quad_quality(elem, p1, p2, p3, p4)
(areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out
elif etype == 'CTRIA6':
nids = elem.nodes
if None in nids:
inids = np.searchsorted(all_nids, nids[:3])
nids = nids[:3]
p1, p2, p3 = xyz_cid0[inids, :]
else:
inids = np.searchsorted(all_nids, nids)
p1, p2, p3, p4, unused_p5, unused_p6 = xyz_cid0[inids, :]
out = tri_quality(p1, p2, p3)
(areai, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi) = out
elif etype == 'CQUAD8':
nids = elem.nodes
if None in nids:
inids = np.searchsorted(all_nids, nids[:4])
nids = nids[:4]
p1, p2, p3, p4 = xyz_cid0[inids, :]
else:
inids = np.searchsorted(all_nids, nids)
p1, p2, p3, p4 = xyz_cid0[inids[:4], :]
out = quad_quality(elem, p1, p2, p3, p4)
(areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out
#normali = np.cross(p1 - p3, p2 - p4)
elif etype == 'CSHEAR':
nids = elem.nodes
inids = np.searchsorted(all_nids, nids)
p1, p2, p3, p4 = xyz_cid0[inids, :]
out = quad_quality(elem, p1, p2, p3, p4)
(areai, taper_ratioi, area_ratioi, max_skew, aspect_ratio,
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi, max_warp) = out
elif etype == 'CTETRA':
nids = elem.nodes
if None in nids:
nids = nids[:4]
inids = np.searchsorted(all_nids, nids)
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(
_ctetra_faces, nids, nid_map, xyz_cid0)
elif etype == 'CHEXA':
nids = elem.nodes
if None in nids:
nids = nids[:8]
inids = np.searchsorted(all_nids, nids)
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(
_chexa_faces, nids, nid_map, xyz_cid0)
elif etype == 'CPENTA':
nids = elem.nodes
if None in nids:
nids = nids[:6]
inids = np.searchsorted(all_nids, nids)
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(
_cpenta_faces, nids, nid_map, xyz_cid0)
elif etype == 'CPYRAM':
# TODO: assuming 5
nids = elem.nodes
if None in nids:
nids = nids[:5]
inids = np.searchsorted(all_nids, nids)
min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(
_cpyram_faces, nids, nid_map, xyz_cid0)
elif etype in ['CELAS2', 'CELAS4', 'CDAMP4']:
# these can have empty nodes and have no property
# CELAS1: 1/2 GRID/SPOINT and pid
# CELAS2: 1/2 GRID/SPOINT, k, ge, and s
# CELAS3: 1/2 SPOINT and pid
# CELAS4: 1/2 SPOINT and k
continue
#nids = elem.nodes
#assert nids[0] != nids[1]
#if None in nids:
#assert nids[0] is not None, nids
#assert nids[1] is None, nids
#nids = [nids[0]]
#else:
#nids = elem.nodes
#assert nids[0] != nids[1]
#inids = np.searchsorted(all_nids, nids)
elif etype in ['CBUSH', 'CBUSH1D', 'CBUSH2D',
'CELAS1', 'CELAS3',
'CDAMP1', 'CDAMP2', 'CDAMP3', 'CDAMP5',
'CFAST', 'CGAP', 'CVISC']:
#nids = elem.nodes
#assert nids[0] != nids[1]
#assert None not in nids, 'nids=%s\n%s' % (nids, elem)
#inids = np.searchsorted(all_nids, nids)
continue
elif etype in ['CBAR', 'CBEAM']:
nids = elem.nodes
inids = np.searchsorted(all_nids, nids)
p1, p2 = xyz_cid0[inids, :]
min_edge_lengthi = np.linalg.norm(p2 - p1)
elif etype in ['CROD', 'CTUBE']:
nids = elem.nodes
inids = np.searchsorted(all_nids, nids)
p1, p2 = xyz_cid0[inids, :]
min_edge_lengthi = np.linalg.norm(p2 - p1)
#nnodes = 2
#dim = 1
elif etype == 'CONROD':
nids = elem.nodes
inids = np.searchsorted(all_nids, nids)
p1, p2 = xyz_cid0[inids, :]
min_edge_lengthi = np.linalg.norm(p2 - p1)
#------------------------------
# rare
#elif etype == 'CIHEX1':
#nids = elem.nodes
#pid = elem.pid
#cell_type = cell_type_hexa8
#inids = np.searchsorted(all_nids, nids)
#min_thetai, max_thetai, dideal_thetai, min_edge_lengthi = get_min_max_theta(
#_chexa_faces, nids, nid_map, xyz_cid0)
#nnodes = 8
#dim = 3
elif etype == 'CHBDYE':
continue
#self.eid_map[eid] = ieid
#eid_solid = elem.eid2
#side = elem.side
#element_solid = model.elements[eid_solid]
#mapped_inids = SIDE_MAP[element_solid.type][side]
#side_inids = [nid - 1 for nid in mapped_inids]
#nodes = element_solid.node_ids
##nnodes = len(side_inids)
#nids = [nodes[inid] for inid in side_inids]
#inids = np.searchsorted(all_nids, nids)
#if len(side_inids) == 4:
#pass
#else:
#msg = 'element_solid:\n%s' % (str(element_solid))
#msg += 'mapped_inids = %s\n' % mapped_inids
#msg += 'side_inids = %s\n' % side_inids
#msg += 'nodes = %s\n' % nodes
##msg += 'side_nodes = %s\n' % side_nodes
#raise NotImplementedError(msg)
else:
#raise NotImplementedError(elem)
nelements -= 1
continue
#nids_list.append(nnodes)
#nids_list.extend(inids)
#normals[ieid] = normali
#eids_array[ieid] = eid
#pids_array[ieid] = pid
#dim_array[ieid] = dim
#cell_types_array[ieid] = cell_type
#cell_offsets_array[ieid] = cell_offset # I assume the problem is here
#cell_offset += nnodes + 1
#eid_map[eid] = ieid
min_interior_angle[ieid] = min_thetai
max_interior_angle[ieid] = max_thetai
dideal_theta[ieid] = dideal_thetai
max_skew_angle[ieid] = max_skew
max_warp_angle[ieid] = max_warp
max_aspect_ratio[ieid] = aspect_ratio
#area[ieid] = areai
area_ratio[ieid] = area_ratioi
taper_ratio[ieid] = taper_ratioi
min_edge_length[ieid] = min_edge_lengthi
ieid += 1
quality = {
'min_interior_angle' : min_interior_angle,
'max_interior_angle' : max_interior_angle,
'dideal_theta' : dideal_theta,
'max_skew_angle' : max_skew_angle,
'max_warp_angle' : max_warp_angle,
'max_aspect_ratio' : max_aspect_ratio,
#'area' : area,
'area_ratio' : area_ratio,
'taper_ratio' : taper_ratio,
'min_edge_length' : min_edge_length,
}
return quality
def tri_quality(p1, p2, p3):
"""
gets the quality metrics for a tri
area, max_skew, aspect_ratio, min_theta, max_theta, dideal_theta, min_edge_length
"""
e1 = (p1 + p2) / 2.
e2 = (p2 + p3) / 2.
e3 = (p3 + p1) / 2.
# 3
# / \
# e3/ \ e2
# / /\
# / / \
# 1---/----2
# e1
e21 = e2 - e1
e31 = e3 - e1
e32 = e3 - e2
e3_p2 = e3 - p2
e2_p1 = e2 - p1
e1_p3 = e1 - p3
v21 = p2 - p1
v32 = p3 - p2
v13 = p1 - p3
length21 = np.linalg.norm(v21)
length32 = np.linalg.norm(v32)
length13 = np.linalg.norm(v13)
min_edge_length = min(length21, length32, length13)
area = 0.5 * np.linalg.norm(np.cross(v21, v13))
ne31 = np.linalg.norm(e31)
ne21 = np.linalg.norm(e21)
ne32 = np.linalg.norm(e32)
ne2_p1 = np.linalg.norm(e2_p1)
ne3_p2 = np.linalg.norm(e3_p2)
ne1_p3 = np.linalg.norm(e1_p3)
cos_skew1 = (e2_p1 @ e31) / (ne2_p1 * ne31)
cos_skew2 = (e2_p1 @ -e31) / (ne2_p1 * ne31)
cos_skew3 = (e3_p2 @ e21) / (ne3_p2 * ne21)
cos_skew4 = (e3_p2 @ -e21) / (ne3_p2 * ne21)
cos_skew5 = (e1_p3 @ e32) / (ne1_p3 * ne32)
cos_skew6 = (e1_p3 @ -e32) / (ne1_p3 * ne32)
max_skew = np.pi / 2. - np.abs(np.arccos(np.clip([
cos_skew1, cos_skew2, cos_skew3,
cos_skew4, cos_skew5, cos_skew6], -1., 1.))).min()
lengths = np.linalg.norm([v21, v32, v13], axis=1)
#assert len(lengths) == 3, lengths
length_min = lengths.min()
if length_min == 0.0:
aspect_ratio = np.nan
# assume length_min = length21 = nan, so:
# cos_theta1 = nan
# thetas = [nan, b, c]
# min_theta = max_theta = dideal_theta = nan
min_theta = np.nan
max_theta = np.nan
dideal_theta = np.nan
else:
aspect_ratio = lengths.max() / length_min
cos_theta1 = (v21 @ -v13) / (length21 * length13)
cos_theta2 = (v32 @ -v21) / (length32 * length21)
cos_theta3 = (v13 @ -v32) / (length13 * length32)
thetas = np.arccos(np.clip([cos_theta1, cos_theta2, cos_theta3], -1., 1.))
min_theta = thetas.min()
max_theta = thetas.max()
dideal_theta = max(max_theta - PIOVER3, PIOVER3 - min_theta)
#theta_deg = np.degrees(np.arccos(max_cos_theta))
#if theta_deg < 60.:
#print('p1=%s' % xyz_cid0[p1, :])
#print('p2=%s' % xyz_cid0[p2, :])
#print('p3=%s' % xyz_cid0[p3, :])
#print('theta1=%s' % np.degrees(np.arccos(cos_theta1)))
#print('theta2=%s' % np.degrees(np.arccos(cos_theta2)))
#print('theta3=%s' % np.degrees(np.arccos(cos_theta3)))
#print('max_theta=%s' % theta_deg)
#asdf
return area, max_skew, aspect_ratio, min_theta, max_theta, dideal_theta, min_edge_length
def quad_quality(element, p1, p2, p3, p4):
"""gets the quality metrics for a quad"""
v21 = p2 - p1
v32 = p3 - p2
v43 = p4 - p3
v14 = p1 - p4
length21 = np.linalg.norm(v21)
length32 = np.linalg.norm(v32)
length43 = np.linalg.norm(v43)
length14 = np.linalg.norm(v14)
min_edge_length = min(length21, length32, length43, length14)
p12 = (p1 + p2) / 2.
p23 = (p2 + p3) / 2.
p34 = (p3 + p4) / 2.
p14 = (p4 + p1) / 2.
v31 = p3 - p1
v42 = p4 - p2
normal = np.cross(v31, v42)
area = 0.5 * np.linalg.norm(normal)
# still kind of in development
#
# the ratio of the ideal area to the actual area
# this is an hourglass check
areas = [
np.linalg.norm(np.cross(-v14, v21)), # v41 x v21
np.linalg.norm(np.cross(v32, -v21)), # v32 x v12
np.linalg.norm(np.cross(v43, -v32)), # v43 x v23
np.linalg.norm(np.cross(v14, v43)), # v14 x v43
]
#
# for:
# area=1; area1=0.5 -> area_ratioi1=2.0; area_ratio=2.0
# area=1; area1=2.0 -> area_ratioi2=2.0; area_ratio=2.0
min_area = min(areas)
if min_area == 0.:
print('nan min area; area=%g areas=%s:\n%s' % (area, areas, element))
#nodes = element.nodes
#print(' n_%i = %s' % (nodes[0], p1))
#print(' n_%i = %s' % (nodes[1], p2))
#print(' n_%i = %s' % (nodes[2], p3))
#print(' n_%i = %s' % (nodes[3], p4))
area_ratio = np.nan
#raise RuntimeError('bad quad...')
else:
area_ratioi1 = area / min_area
area_ratioi2 = max(areas) / area
area_ratio = max(area_ratioi1, area_ratioi2)
area1 = 0.5 * areas[0] # v41 x v21
area2 = 0.5 * np.linalg.norm(np.cross(-v21, v32)) # v12 x v32
area3 = 0.5 * np.linalg.norm(np.cross(v43, v32)) # v43 x v32
area4 = 0.5 * np.linalg.norm(np.cross(v14, -v43)) # v14 x v34
aavg = (area1 + area2 + area3 + area4) / 4.
taper_ratio = (abs(area1 - aavg) + abs(area2 - aavg) +
abs(area3 - aavg) + abs(area4 - aavg)) / aavg
# e3
# 4-------3
# | |
# |e4 | e2
# 1-------2
# e1
e13 = p34 - p12
e42 = p23 - p14
ne42 = np.linalg.norm(e42)
ne13 = np.linalg.norm(e13)
cos_skew1 = (e13 @ e42) / (ne13 * ne42)
cos_skew2 = (e13 @ -e42) / (ne13 * ne42)
max_skew = np.pi / 2. - np.abs(np.arccos(
np.clip([cos_skew1, cos_skew2], -1., 1.))).min()
#aspect_ratio = max(p12, p23, p34, p14) / max(p12, p23, p34, p14)
lengths = np.linalg.norm([v21, v32, v43, v14], axis=1)
#assert len(lengths) == 3, lengths
aspect_ratio = lengths.max() / lengths.min()
cos_theta1 = (v21 @ -v14) / (length21 * length14)
cos_theta2 = (v32 @ -v21) / (length32 * length21)
cos_theta3 = (v43 @ -v32) / (length43 * length32)
cos_theta4 = (v14 @ -v43) / (length14 * length43)
#max_thetai = np.arccos([cos_theta1, cos_theta2, cos_theta3, cos_theta4]).max()
# dot the local normal with the normal vector
# then take the norm of that to determine the angle relative to the normal
# then take the sign of that to see if we're pointing roughly towards the normal
#
# np.sign(np.linalg.norm(np.dot(
# a x b = ab sin(theta)
# a x b / ab = sin(theta)
# sin(theta) < 0. -> normal is flipped
normal2 = np.sign(np.cross(v21, v32) @ normal)
normal3 = np.sign(np.cross(v32, v43) @ normal)
normal4 = np.sign(np.cross(v43, v14) @ normal)
normal1 = np.sign(np.cross(v14, v21) @ normal)
n = np.array([normal1, normal2, normal3, normal4])
theta_additional = np.where(n < 0, 2*np.pi, 0.)
theta = n * np.arccos(np.clip(
[cos_theta1, cos_theta2, cos_theta3, cos_theta4], -1., 1.)) + theta_additional
min_theta = theta.min()
max_theta = theta.max()
dideal_theta = max(max_theta - PIOVER2, PIOVER2 - min_theta)
#print('theta_max = ', theta_max)
# warp angle
# split the quad and find the normals of each triangl
# find the angle between the two triangles
#
# 4---3
# | / |
# |/ |
# 1---2
#
v41 = -v14
n123 = np.cross(v21, v31)
n134 = np.cross(v31, v41)
#v1 o v2 = v1 * v2 cos(theta)
cos_warp1 = (n123 @ n134) / (np.linalg.norm(n123) * np.linalg.norm(n134))
# split the quad in the order direction and take the maximum of the two splits
# 4---3
# | \ |
# | \|
# 1---2
n124 = np.cross(v21, v41)
n234 = np.cross(v32, v42)
cos_warp2 = (n124 @ n234) / (np.linalg.norm(n124) * np.linalg.norm(n234))
max_warp = np.abs(np.arccos(
np.clip([cos_warp1, cos_warp2], -1., 1.))).max()
out = (area, taper_ratio, area_ratio, max_skew, aspect_ratio,
min_theta, max_theta, dideal_theta, min_edge_length, max_warp)
return out
def get_min_max_theta(faces, all_node_ids, nid_map, xyz_cid0):
"""get the min/max thetas for CTETRA, CPENTA, CHEXA, CPYRAM"""
cos_thetas = []
ideal_theta = []
#print('faces =', faces)
#assert len(faces) > 0, 'faces=%s nids=%s' % (faces, all_node_ids)
for face in faces:
if len(face) == 3:
node_ids = all_node_ids[face[0]], all_node_ids[face[1]], all_node_ids[face[2]]
n1, n2, n3 = [nid_map[nid] for nid in node_ids[:3]]
v21 = xyz_cid0[n2, :] - xyz_cid0[n1, :]
v32 = xyz_cid0[n3, :] - xyz_cid0[n2, :]
v13 = xyz_cid0[n1, :] - xyz_cid0[n3, :]
length21 = np.linalg.norm(v21)
length32 = np.linalg.norm(v32)
length13 = np.linalg.norm(v13)
min_edge_length = min(length21, length32, length13)
cos_theta1 = (v21 @ -v13) / (length21 * length13)
cos_theta2 = (v32 @ -v21) / (length32 * length21)
cos_theta3 = (v13 @ -v32) / (length13 * length32)
cos_thetas.extend([cos_theta1, cos_theta2, cos_theta3])
ideal_theta.extend([PIOVER3, PIOVER3, PIOVER3])
elif len(face) == 4:
try:
node_ids = (all_node_ids[face[0]], all_node_ids[face[1]],
all_node_ids[face[2]], all_node_ids[face[3]])
except KeyError:
print(face)
print(node_ids)
raise
n1, n2, n3, n4 = [nid_map[nid] for nid in node_ids[:4]]
v21 = xyz_cid0[n2, :] - xyz_cid0[n1, :]
v32 = xyz_cid0[n3, :] - xyz_cid0[n2, :]
v43 = xyz_cid0[n4, :] - xyz_cid0[n3, :]
v14 = xyz_cid0[n1, :] - xyz_cid0[n4, :]
length21 = np.linalg.norm(v21)
length32 = np.linalg.norm(v32)
length43 = np.linalg.norm(v43)
length14 = np.linalg.norm(v14)
min_edge_length = min(length21, length32, length43, length14)
cos_theta1 = (v21 @ -v14) / (length21 * length14)
cos_theta2 = (v32 @ -v21) / (length32 * length21)
cos_theta3 = (v43 @ -v32) / (length43 * length32)
cos_theta4 = (v14 @ -v43) / (length14 * length43)
cos_thetas.extend([cos_theta1, cos_theta2, cos_theta3, cos_theta4])
ideal_theta.extend([PIOVER2, PIOVER2, PIOVER2, PIOVER2])
else:
raise NotImplementedError(face)
thetas = np.arccos(cos_thetas)
ideal_theta = np.array(ideal_theta)
ideal_thetai = max((thetas - ideal_theta).max(), (ideal_theta - thetas).min())
min_thetai = thetas.min()
max_thetai = thetas.max()
return min_thetai, max_thetai, ideal_thetai, min_edge_length
| [
"numpy.radians",
"numpy.degrees",
"numpy.cross",
"numpy.zeros",
"numpy.clip",
"numpy.searchsorted",
"numpy.where",
"numpy.array",
"numpy.linalg.norm",
"numpy.arccos"
] | [((3754, 3780), 'numpy.radians', 'np.radians', (['min_theta_quad'], {}), '(min_theta_quad)\n', (3764, 3780), True, 'import numpy as np\n'), ((3801, 3826), 'numpy.radians', 'np.radians', (['min_theta_tri'], {}), '(min_theta_tri)\n', (3811, 3826), True, 'import numpy as np\n'), ((3843, 3864), 'numpy.radians', 'np.radians', (['max_theta'], {}), '(max_theta)\n', (3853, 3864), True, 'import numpy as np\n'), ((3880, 3900), 'numpy.radians', 'np.radians', (['max_skew'], {}), '(max_skew)\n', (3890, 3900), True, 'import numpy as np\n'), ((3919, 3942), 'numpy.radians', 'np.radians', (['max_warping'], {}), '(max_warping)\n', (3929, 3942), True, 'import numpy as np\n'), ((5923, 5967), 'numpy.linalg.norm', 'np.linalg.norm', (['[v21, v32, v43, v14]'], {'axis': '(1)'}), '([v21, v32, v43, v14], axis=1)\n', (5937, 5967), True, 'import numpy as np\n'), ((6674, 6700), 'numpy.cross', 'np.cross', (['(p3 - p1)', '(p4 - p2)'], {}), '(p3 - p1, p4 - p2)\n', (6682, 6700), True, 'import numpy as np\n'), ((6862, 6881), 'numpy.linalg.norm', 'np.linalg.norm', (['e13'], {}), '(e13)\n', (6876, 6881), True, 'import numpy as np\n'), ((6897, 6916), 'numpy.linalg.norm', 'np.linalg.norm', (['e42'], {}), '(e42)\n', (6911, 6916), True, 'import numpy as np\n'), ((9284, 9310), 'numpy.array', 'np.array', (['[n1, n2, n3, n4]'], {}), '([n1, n2, n3, n4])\n', (9292, 9310), True, 'import numpy as np\n'), ((9335, 9366), 'numpy.where', 'np.where', (['(n < 0)', '(2 * np.pi)', '(0.0)'], {}), '(n < 0, 2 * np.pi, 0.0)\n', (9343, 9366), True, 'import numpy as np\n'), ((9380, 9399), 'numpy.linalg.norm', 'np.linalg.norm', (['v21'], {}), '(v21)\n', (9394, 9399), True, 'import numpy as np\n'), ((9415, 9434), 'numpy.linalg.norm', 'np.linalg.norm', (['v32'], {}), '(v32)\n', (9429, 9434), True, 'import numpy as np\n'), ((9450, 9469), 'numpy.linalg.norm', 'np.linalg.norm', (['v43'], {}), '(v43)\n', (9464, 9469), True, 'import numpy as np\n'), ((9485, 9504), 'numpy.linalg.norm', 'np.linalg.norm', (['v14'], {}), '(v14)\n', (9499, 9504), True, 'import numpy as np\n'), ((10439, 10457), 'numpy.cross', 'np.cross', (['v21', 'v31'], {}), '(v21, v31)\n', (10447, 10457), True, 'import numpy as np\n'), ((10469, 10487), 'numpy.cross', 'np.cross', (['v31', 'v41'], {}), '(v31, v41)\n', (10477, 10487), True, 'import numpy as np\n'), ((10743, 10761), 'numpy.cross', 'np.cross', (['v21', 'v41'], {}), '(v21, v41)\n', (10751, 10761), True, 'import numpy as np\n'), ((10773, 10791), 'numpy.cross', 'np.cross', (['v32', 'v42'], {}), '(v32, v42)\n', (10781, 10791), True, 'import numpy as np\n'), ((11760, 11799), 'numpy.linalg.norm', 'np.linalg.norm', (['[v21, v32, v13]'], {'axis': '(1)'}), '([v21, v32, v13], axis=1)\n', (11774, 11799), True, 'import numpy as np\n'), ((13169, 13188), 'numpy.linalg.norm', 'np.linalg.norm', (['e21'], {}), '(e21)\n', (13183, 13188), True, 'import numpy as np\n'), ((13204, 13223), 'numpy.linalg.norm', 'np.linalg.norm', (['e31'], {}), '(e31)\n', (13218, 13223), True, 'import numpy as np\n'), ((13239, 13258), 'numpy.linalg.norm', 'np.linalg.norm', (['e32'], {}), '(e32)\n', (13253, 13258), True, 'import numpy as np\n'), ((13337, 13358), 'numpy.linalg.norm', 'np.linalg.norm', (['e3_p2'], {}), '(e3_p2)\n', (13351, 13358), True, 'import numpy as np\n'), ((13376, 13397), 'numpy.linalg.norm', 'np.linalg.norm', (['e2_p1'], {}), '(e2_p1)\n', (13390, 13397), True, 'import numpy as np\n'), ((13415, 13436), 'numpy.linalg.norm', 'np.linalg.norm', (['e1_p3'], {}), '(e1_p3)\n', (13429, 13436), True, 'import numpy as np\n'), ((17565, 17595), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (17573, 17595), True, 'import numpy as np\n'), ((17621, 17651), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (17629, 17651), True, 'import numpy as np\n'), ((17671, 17701), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (17679, 17701), True, 'import numpy as np\n'), ((17723, 17753), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (17731, 17753), True, 'import numpy as np\n'), ((17775, 17805), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (17783, 17805), True, 'import numpy as np\n'), ((17829, 17859), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (17837, 17859), True, 'import numpy as np\n'), ((17920, 17950), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (17928, 17950), True, 'import numpy as np\n'), ((17969, 17999), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (17977, 17999), True, 'import numpy as np\n'), ((18022, 18052), 'numpy.zeros', 'np.zeros', (['nelements', '"""float32"""'], {}), "(nelements, 'float32')\n", (18030, 18052), True, 'import numpy as np\n'), ((27466, 27485), 'numpy.linalg.norm', 'np.linalg.norm', (['v21'], {}), '(v21)\n', (27480, 27485), True, 'import numpy as np\n'), ((27501, 27520), 'numpy.linalg.norm', 'np.linalg.norm', (['v32'], {}), '(v32)\n', (27515, 27520), True, 'import numpy as np\n'), ((27536, 27555), 'numpy.linalg.norm', 'np.linalg.norm', (['v13'], {}), '(v13)\n', (27550, 27555), True, 'import numpy as np\n'), ((27676, 27695), 'numpy.linalg.norm', 'np.linalg.norm', (['e31'], {}), '(e31)\n', (27690, 27695), True, 'import numpy as np\n'), ((27707, 27726), 'numpy.linalg.norm', 'np.linalg.norm', (['e21'], {}), '(e21)\n', (27721, 27726), True, 'import numpy as np\n'), ((27738, 27757), 'numpy.linalg.norm', 'np.linalg.norm', (['e32'], {}), '(e32)\n', (27752, 27757), True, 'import numpy as np\n'), ((27771, 27792), 'numpy.linalg.norm', 'np.linalg.norm', (['e2_p1'], {}), '(e2_p1)\n', (27785, 27792), True, 'import numpy as np\n'), ((27806, 27827), 'numpy.linalg.norm', 'np.linalg.norm', (['e3_p2'], {}), '(e3_p2)\n', (27820, 27827), True, 'import numpy as np\n'), ((27841, 27862), 'numpy.linalg.norm', 'np.linalg.norm', (['e1_p3'], {}), '(e1_p3)\n', (27855, 27862), True, 'import numpy as np\n'), ((28323, 28362), 'numpy.linalg.norm', 'np.linalg.norm', (['[v21, v32, v13]'], {'axis': '(1)'}), '([v21, v32, v13], axis=1)\n', (28337, 28362), True, 'import numpy as np\n'), ((29919, 29938), 'numpy.linalg.norm', 'np.linalg.norm', (['v21'], {}), '(v21)\n', (29933, 29938), True, 'import numpy as np\n'), ((29954, 29973), 'numpy.linalg.norm', 'np.linalg.norm', (['v32'], {}), '(v32)\n', (29968, 29973), True, 'import numpy as np\n'), ((29989, 30008), 'numpy.linalg.norm', 'np.linalg.norm', (['v43'], {}), '(v43)\n', (30003, 30008), True, 'import numpy as np\n'), ((30024, 30043), 'numpy.linalg.norm', 'np.linalg.norm', (['v14'], {}), '(v14)\n', (30038, 30043), True, 'import numpy as np\n'), ((30260, 30278), 'numpy.cross', 'np.cross', (['v31', 'v42'], {}), '(v31, v42)\n', (30268, 30278), True, 'import numpy as np\n'), ((31950, 31969), 'numpy.linalg.norm', 'np.linalg.norm', (['e42'], {}), '(e42)\n', (31964, 31969), True, 'import numpy as np\n'), ((31981, 32000), 'numpy.linalg.norm', 'np.linalg.norm', (['e13'], {}), '(e13)\n', (31995, 32000), True, 'import numpy as np\n'), ((32277, 32321), 'numpy.linalg.norm', 'np.linalg.norm', (['[v21, v32, v43, v14]'], {'axis': '(1)'}), '([v21, v32, v43, v14], axis=1)\n', (32291, 32321), True, 'import numpy as np\n'), ((33282, 33328), 'numpy.array', 'np.array', (['[normal1, normal2, normal3, normal4]'], {}), '([normal1, normal2, normal3, normal4])\n', (33290, 33328), True, 'import numpy as np\n'), ((33352, 33383), 'numpy.where', 'np.where', (['(n < 0)', '(2 * np.pi)', '(0.0)'], {}), '(n < 0, 2 * np.pi, 0.0)\n', (33360, 33383), True, 'import numpy as np\n'), ((33873, 33891), 'numpy.cross', 'np.cross', (['v21', 'v31'], {}), '(v21, v31)\n', (33881, 33891), True, 'import numpy as np\n'), ((33903, 33921), 'numpy.cross', 'np.cross', (['v31', 'v41'], {}), '(v31, v41)\n', (33911, 33921), True, 'import numpy as np\n'), ((34177, 34195), 'numpy.cross', 'np.cross', (['v21', 'v41'], {}), '(v21, v41)\n', (34185, 34195), True, 'import numpy as np\n'), ((34207, 34225), 'numpy.cross', 'np.cross', (['v32', 'v42'], {}), '(v32, v42)\n', (34215, 34225), True, 'import numpy as np\n'), ((36989, 37010), 'numpy.arccos', 'np.arccos', (['cos_thetas'], {}), '(cos_thetas)\n', (36998, 37010), True, 'import numpy as np\n'), ((37029, 37050), 'numpy.array', 'np.array', (['ideal_theta'], {}), '(ideal_theta)\n', (37037, 37050), True, 'import numpy as np\n'), ((9752, 9820), 'numpy.clip', 'np.clip', (['[cos_theta1, cos_theta2, cos_theta3, cos_theta4]', '(-1.0)', '(1.0)'], {}), '([cos_theta1, cos_theta2, cos_theta3, cos_theta4], -1.0, 1.0)\n', (9759, 9820), True, 'import numpy as np\n'), ((12470, 12526), 'numpy.clip', 'np.clip', (['[cos_theta1, cos_theta2, cos_theta3]', '(-1.0)', '(1.0)'], {}), '([cos_theta1, cos_theta2, cos_theta3], -1.0, 1.0)\n', (12477, 12526), True, 'import numpy as np\n'), ((30296, 30318), 'numpy.linalg.norm', 'np.linalg.norm', (['normal'], {}), '(normal)\n', (30310, 30318), True, 'import numpy as np\n'), ((7370, 7389), 'numpy.cross', 'np.cross', (['(-v14)', 'v21'], {}), '(-v14, v21)\n', (7378, 7389), True, 'import numpy as np\n'), ((7436, 7455), 'numpy.cross', 'np.cross', (['(-v21)', 'v32'], {}), '(-v21, v32)\n', (7444, 7455), True, 'import numpy as np\n'), ((7502, 7520), 'numpy.cross', 'np.cross', (['v43', 'v32'], {}), '(v43, v32)\n', (7510, 7520), True, 'import numpy as np\n'), ((7568, 7587), 'numpy.cross', 'np.cross', (['v14', '(-v43)'], {}), '(v14, -v43)\n', (7576, 7587), True, 'import numpy as np\n'), ((9109, 9127), 'numpy.cross', 'np.cross', (['v21', 'v32'], {}), '(v21, v32)\n', (9117, 9127), True, 'import numpy as np\n'), ((9155, 9173), 'numpy.cross', 'np.cross', (['v32', 'v43'], {}), '(v32, v43)\n', (9163, 9173), True, 'import numpy as np\n'), ((9201, 9219), 'numpy.cross', 'np.cross', (['v43', 'v14'], {}), '(v43, v14)\n', (9209, 9219), True, 'import numpy as np\n'), ((9247, 9265), 'numpy.cross', 'np.cross', (['v14', 'v21'], {}), '(v14, v21)\n', (9255, 9265), True, 'import numpy as np\n'), ((10555, 10575), 'numpy.linalg.norm', 'np.linalg.norm', (['n123'], {}), '(n123)\n', (10569, 10575), True, 'import numpy as np\n'), ((10578, 10598), 'numpy.linalg.norm', 'np.linalg.norm', (['n134'], {}), '(n134)\n', (10592, 10598), True, 'import numpy as np\n'), ((10825, 10845), 'numpy.linalg.norm', 'np.linalg.norm', (['n124'], {}), '(n124)\n', (10839, 10845), True, 'import numpy as np\n'), ((10848, 10868), 'numpy.linalg.norm', 'np.linalg.norm', (['n234'], {}), '(n234)\n', (10862, 10868), True, 'import numpy as np\n'), ((12252, 12271), 'numpy.linalg.norm', 'np.linalg.norm', (['v21'], {}), '(v21)\n', (12266, 12271), True, 'import numpy as np\n'), ((12274, 12293), 'numpy.linalg.norm', 'np.linalg.norm', (['v13'], {}), '(v13)\n', (12288, 12293), True, 'import numpy as np\n'), ((12328, 12347), 'numpy.linalg.norm', 'np.linalg.norm', (['v32'], {}), '(v32)\n', (12342, 12347), True, 'import numpy as np\n'), ((12350, 12369), 'numpy.linalg.norm', 'np.linalg.norm', (['v21'], {}), '(v21)\n', (12364, 12369), True, 'import numpy as np\n'), ((12404, 12423), 'numpy.linalg.norm', 'np.linalg.norm', (['v13'], {}), '(v13)\n', (12418, 12423), True, 'import numpy as np\n'), ((12426, 12445), 'numpy.linalg.norm', 'np.linalg.norm', (['v32'], {}), '(v32)\n', (12440, 12445), True, 'import numpy as np\n'), ((18818, 18849), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (18833, 18849), True, 'import numpy as np\n'), ((27644, 27662), 'numpy.cross', 'np.cross', (['v21', 'v13'], {}), '(v21, v13)\n', (27652, 27662), True, 'import numpy as np\n'), ((29002, 29058), 'numpy.clip', 'np.clip', (['[cos_theta1, cos_theta2, cos_theta3]', '(-1.0)', '(1.0)'], {}), '([cos_theta1, cos_theta2, cos_theta3], -1.0, 1.0)\n', (29009, 29058), True, 'import numpy as np\n'), ((30484, 30503), 'numpy.cross', 'np.cross', (['(-v14)', 'v21'], {}), '(-v14, v21)\n', (30492, 30503), True, 'import numpy as np\n'), ((30541, 30560), 'numpy.cross', 'np.cross', (['v32', '(-v21)'], {}), '(v32, -v21)\n', (30549, 30560), True, 'import numpy as np\n'), ((30598, 30617), 'numpy.cross', 'np.cross', (['v43', '(-v32)'], {}), '(v43, -v32)\n', (30606, 30617), True, 'import numpy as np\n'), ((30655, 30673), 'numpy.cross', 'np.cross', (['v14', 'v43'], {}), '(v14, v43)\n', (30663, 30673), True, 'import numpy as np\n'), ((31469, 31488), 'numpy.cross', 'np.cross', (['(-v21)', 'v32'], {}), '(-v21, v32)\n', (31477, 31488), True, 'import numpy as np\n'), ((31535, 31553), 'numpy.cross', 'np.cross', (['v43', 'v32'], {}), '(v43, v32)\n', (31543, 31553), True, 'import numpy as np\n'), ((31600, 31619), 'numpy.cross', 'np.cross', (['v14', '(-v43)'], {}), '(v14, -v43)\n', (31608, 31619), True, 'import numpy as np\n'), ((33092, 33110), 'numpy.cross', 'np.cross', (['v21', 'v32'], {}), '(v21, v32)\n', (33100, 33110), True, 'import numpy as np\n'), ((33143, 33161), 'numpy.cross', 'np.cross', (['v32', 'v43'], {}), '(v32, v43)\n', (33151, 33161), True, 'import numpy as np\n'), ((33194, 33212), 'numpy.cross', 'np.cross', (['v43', 'v14'], {}), '(v43, v14)\n', (33202, 33212), True, 'import numpy as np\n'), ((33245, 33263), 'numpy.cross', 'np.cross', (['v14', 'v21'], {}), '(v14, v21)\n', (33253, 33263), True, 'import numpy as np\n'), ((33989, 34009), 'numpy.linalg.norm', 'np.linalg.norm', (['n123'], {}), '(n123)\n', (34003, 34009), True, 'import numpy as np\n'), ((34012, 34032), 'numpy.linalg.norm', 'np.linalg.norm', (['n134'], {}), '(n134)\n', (34026, 34032), True, 'import numpy as np\n'), ((34259, 34279), 'numpy.linalg.norm', 'np.linalg.norm', (['n124'], {}), '(n124)\n', (34273, 34279), True, 'import numpy as np\n'), ((34282, 34302), 'numpy.linalg.norm', 'np.linalg.norm', (['n234'], {}), '(n234)\n', (34296, 34302), True, 'import numpy as np\n'), ((35207, 35226), 'numpy.linalg.norm', 'np.linalg.norm', (['v21'], {}), '(v21)\n', (35221, 35226), True, 'import numpy as np\n'), ((35250, 35269), 'numpy.linalg.norm', 'np.linalg.norm', (['v32'], {}), '(v32)\n', (35264, 35269), True, 'import numpy as np\n'), ((35293, 35312), 'numpy.linalg.norm', 'np.linalg.norm', (['v13'], {}), '(v13)\n', (35307, 35312), True, 'import numpy as np\n'), ((19223, 19254), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (19238, 19254), True, 'import numpy as np\n'), ((33408, 33476), 'numpy.clip', 'np.clip', (['[cos_theta1, cos_theta2, cos_theta3, cos_theta4]', '(-1.0)', '(1.0)'], {}), '([cos_theta1, cos_theta2, cos_theta3, cos_theta4], -1.0, 1.0)\n', (33415, 33476), True, 'import numpy as np\n'), ((36298, 36317), 'numpy.linalg.norm', 'np.linalg.norm', (['v21'], {}), '(v21)\n', (36312, 36317), True, 'import numpy as np\n'), ((36341, 36360), 'numpy.linalg.norm', 'np.linalg.norm', (['v32'], {}), '(v32)\n', (36355, 36360), True, 'import numpy as np\n'), ((36384, 36403), 'numpy.linalg.norm', 'np.linalg.norm', (['v43'], {}), '(v43)\n', (36398, 36403), True, 'import numpy as np\n'), ((36427, 36446), 'numpy.linalg.norm', 'np.linalg.norm', (['v14'], {}), '(v14)\n', (36441, 36446), True, 'import numpy as np\n'), ((7204, 7220), 'numpy.degrees', 'np.degrees', (['skew'], {}), '(skew)\n', (7214, 7220), True, 'import numpy as np\n'), ((10058, 10080), 'numpy.degrees', 'np.degrees', (['theta_mini'], {}), '(theta_mini)\n', (10068, 10080), True, 'import numpy as np\n'), ((10224, 10246), 'numpy.degrees', 'np.degrees', (['theta_maxi'], {}), '(theta_maxi)\n', (10234, 10246), True, 'import numpy as np\n'), ((10913, 10955), 'numpy.clip', 'np.clip', (['[cos_warp1, cos_warp2]', '(-1.0)', '(1.0)'], {}), '([cos_warp1, cos_warp2], -1.0, 1.0)\n', (10920, 10955), True, 'import numpy as np\n'), ((11079, 11100), 'numpy.degrees', 'np.degrees', (['max_warpi'], {}), '(max_warpi)\n', (11089, 11100), True, 'import numpy as np\n'), ((12710, 12732), 'numpy.degrees', 'np.degrees', (['theta_mini'], {}), '(theta_mini)\n', (12720, 12732), True, 'import numpy as np\n'), ((12873, 12895), 'numpy.degrees', 'np.degrees', (['theta_maxi'], {}), '(theta_maxi)\n', (12883, 12895), True, 'import numpy as np\n'), ((14034, 14050), 'numpy.degrees', 'np.degrees', (['skew'], {}), '(skew)\n', (14044, 14050), True, 'import numpy as np\n'), ((34346, 34388), 'numpy.clip', 'np.clip', (['[cos_warp1, cos_warp2]', '(-1.0)', '(1.0)'], {}), '([cos_warp1, cos_warp2], -1.0, 1.0)\n', (34353, 34388), True, 'import numpy as np\n'), ((7063, 7105), 'numpy.clip', 'np.clip', (['[cos_skew1, cos_skew2]', '(-1.0)', '(1.0)'], {}), '([cos_skew1, cos_skew2], -1.0, 1.0)\n', (7070, 7105), True, 'import numpy as np\n'), ((13829, 13919), 'numpy.clip', 'np.clip', (['[cos_skew1, cos_skew2, cos_skew3, cos_skew4, cos_skew5, cos_skew6]', '(-1.0)', '(1.0)'], {}), '([cos_skew1, cos_skew2, cos_skew3, cos_skew4, cos_skew5, cos_skew6],\n -1.0, 1.0)\n', (13836, 13919), True, 'import numpy as np\n'), ((19629, 19664), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids[:3]'], {}), '(all_nids, nids[:3])\n', (19644, 19664), True, 'import numpy as np\n'), ((19787, 19818), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (19802, 19818), True, 'import numpy as np\n'), ((28199, 28289), 'numpy.clip', 'np.clip', (['[cos_skew1, cos_skew2, cos_skew3, cos_skew4, cos_skew5, cos_skew6]', '(-1.0)', '(1.0)'], {}), '([cos_skew1, cos_skew2, cos_skew3, cos_skew4, cos_skew5, cos_skew6],\n -1.0, 1.0)\n', (28206, 28289), True, 'import numpy as np\n'), ((32144, 32186), 'numpy.clip', 'np.clip', (['[cos_skew1, cos_skew2]', '(-1.0)', '(1.0)'], {}), '([cos_skew1, cos_skew2], -1.0, 1.0)\n', (32151, 32186), True, 'import numpy as np\n'), ((20171, 20206), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids[:4]'], {}), '(all_nids, nids[:4])\n', (20186, 20206), True, 'import numpy as np\n'), ((20333, 20364), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (20348, 20364), True, 'import numpy as np\n'), ((20764, 20795), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (20779, 20795), True, 'import numpy as np\n'), ((21198, 21229), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (21213, 21229), True, 'import numpy as np\n'), ((21518, 21549), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (21533, 21549), True, 'import numpy as np\n'), ((21838, 21869), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (21853, 21869), True, 'import numpy as np\n'), ((22190, 22221), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (22205, 22221), True, 'import numpy as np\n'), ((23574, 23605), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (23589, 23605), True, 'import numpy as np\n'), ((23677, 23700), 'numpy.linalg.norm', 'np.linalg.norm', (['(p2 - p1)'], {}), '(p2 - p1)\n', (23691, 23700), True, 'import numpy as np\n'), ((23792, 23823), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (23807, 23823), True, 'import numpy as np\n'), ((23895, 23918), 'numpy.linalg.norm', 'np.linalg.norm', (['(p2 - p1)'], {}), '(p2 - p1)\n', (23909, 23918), True, 'import numpy as np\n'), ((24046, 24077), 'numpy.searchsorted', 'np.searchsorted', (['all_nids', 'nids'], {}), '(all_nids, nids)\n', (24061, 24077), True, 'import numpy as np\n'), ((24149, 24172), 'numpy.linalg.norm', 'np.linalg.norm', (['(p2 - p1)'], {}), '(p2 - p1)\n', (24163, 24172), True, 'import numpy as np\n')] |
#!/usr/bin/env python
'''Generates mesh files and point clouds for randomly generated rectangular blocks.'''
# python
import time
# scipy
from scipy.io import savemat
from numpy import array, mean
# self
import point_cloud
from rl_agent import RlAgent
from rl_environment import RlEnvironment
def main():
'''Entrypoint to the program.'''
# PARAMETERS =====================================================================================
# system
gpuId = 0
# objects
objectHeight = [0.007, 0.013]
objectRadius = [0.030, 0.045]
nObjects = 1000
# view
viewCenter = array([0,0,0])
viewKeepout = 0.60
viewWorkspace = [(-1.0,1.0),(-1.0,1.0),(-1.0,1.0)]
# visualization/saving
showViewer = False
showSteps = False
plotImages = False
# INITIALIZATION =================================================================================
rlEnv = RlEnvironment(showViewer, removeTable=True)
rlAgent = RlAgent(rlEnv, gpuId)
# RUN TEST =======================================================================================
for objIdx in xrange(nObjects):
obj = rlEnv.PlaceCylinderAtOrigin(objectHeight, objectRadius, "cylinder-{}".format(objIdx), True)
cloud, normals = rlAgent.GetFullCloudAndNormals(viewCenter, viewKeepout, viewWorkspace)
point_cloud.SaveMat("cylinder-{}.mat".format(objIdx), cloud, normals)
rlAgent.PlotCloud(cloud)
if plotImages:
point_cloud.Plot(cloud, normals, 2)
if showSteps:
raw_input("Placed cylinder-{}.".format(objIdx))
rlEnv.RemoveObjectSet([obj])
if __name__ == "__main__":
main()
exit()
| [
"rl_environment.RlEnvironment",
"rl_agent.RlAgent",
"numpy.array",
"point_cloud.Plot"
] | [((588, 604), 'numpy.array', 'array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (593, 604), False, 'from numpy import array, mean\n'), ((878, 921), 'rl_environment.RlEnvironment', 'RlEnvironment', (['showViewer'], {'removeTable': '(True)'}), '(showViewer, removeTable=True)\n', (891, 921), False, 'from rl_environment import RlEnvironment\n'), ((934, 955), 'rl_agent.RlAgent', 'RlAgent', (['rlEnv', 'gpuId'], {}), '(rlEnv, gpuId)\n', (941, 955), False, 'from rl_agent import RlAgent\n'), ((1417, 1452), 'point_cloud.Plot', 'point_cloud.Plot', (['cloud', 'normals', '(2)'], {}), '(cloud, normals, 2)\n', (1433, 1452), False, 'import point_cloud\n')] |
"""
Driver Script - Medical Decisions Diabetes Treatment
"""
import pandas as pd
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
from copy import copy
import math
import time
from MedicalDecisionDiabetesModel import MedicalDecisionDiabetesModel as MDDM
from MedicalDecisionDiabetesModel import Beta
from MedicalDecisionDiabetesPolicy import MDDMPolicy
def formatFloatList(L,p):
sFormat = "{{:.{}f}} ".format(p) * len(L)
outL = sFormat.format(*L)
return outL.split()
def normalizeCounter(counter):
total = sum(counter.values(), 0.0)
for key in counter:
counter[key] /= total
return counter
# unit testing
if __name__ == "__main__":
'''
this is an example of creating a model, choosing the decision according to the policy of choice,
and running the model for a fixed time period (in months)
'''
# initial parameters
seed = 19783167
print_excel_file=False
# in order: Metformin, Sensitizer, Secretagoge, Alpha-glucosidase inhibitor, Peptide analog.
x_names = ['M', 'Sens', 'Secr', 'AGI', 'PA']
policy_names = ['UCB', 'IE', 'PureExploitation', 'PureExploration']
#reading parameter file and initializing variables
file = 'MDDMparameters.xlsx'
S0 = pd.read_excel(file, sheet_name = 'parameters1')
additional_params = pd.read_excel(file, sheet_name = 'parameters2')
policy_str = additional_params.loc['policy', 0]
policy_list = policy_str.split()
# each time step is 1 month.
t_stop = int(additional_params.loc['N', 0]) # number of times we test the drugs
L = int(additional_params.loc['L', 0]) # number of samples
theta_range_1 = np.arange(additional_params.loc['theta_start', 0],\
additional_params.loc['theta_end', 0],\
additional_params.loc['increment', 0])
# dictionaries to store the stats for different values of theta
theta_obj = {p:[] for p in policy_names}
theta_obj_std = {p:[] for p in policy_names}
#data structures to output the algorithm details
mu_star_labels = [x+"_mu_*" for x in x_names]
mu_labels = [x+"_mu_bar" for x in x_names]
sigma_labels = [x+"_sigma_bar" for x in x_names]
N_labels = [x+"_N_bar" for x in x_names]
labelsOutputPath = ["Policy","Truth_type","Theta","Sample_path"] + mu_star_labels + ["Best_Treatment", "n"] + mu_labels + sigma_labels + N_labels + ["Decision","W","CumReward","isBest"]
output_path = []
# data structures to accumulate best treatment count
best_treat = {(p,theta):[] for p in policy_list for theta in theta_range_1} #one list for each (p,theta) pair - each list is along the sample paths
best_treat_count_list = {(p,theta):[] for p in policy_list for theta in theta_range_1} #one list for each (p,theta) pair - the list is along the sample paths - each element of the list is accumulated during the experiments
best_treat_Counter_hist = {(p,theta):[] for p in policy_list for theta in theta_range_1 }
best_treat_Chosen_hist = {(p,theta):[] for p in policy_list for theta in theta_range_1 }
# data structures to accumulate the decisions
decision_Given_Best_Treat_list = {(p,theta,d):[] for p in policy_list for theta in theta_range_1 for d in x_names}
decision_Given_Best_Treat_Counter = {(p,theta,d):[] for p in policy_list for theta in theta_range_1 for d in x_names}
decision_ALL_list = {(p,theta):[] for p in policy_list for theta in theta_range_1 }
decision_ALL_Counter = {(p,theta):[] for p in policy_list for theta in theta_range_1 }
#initialing the model
Model = MDDM(x_names, x_names, S0, additional_params)
Model.printTruth()
Model.printState()
P = MDDMPolicy(Model, policy_names,seed)
# =============================================================================
# running the policy
# =============================================================================
for policy_chosen in policy_list:
print("Starting policy {}".format(policy_chosen))
P_make_decision = getattr(P,policy_chosen)
# loop over theta (theta)
policy_start = time.time()
for theta in theta_range_1:
Model.prng = np.random.RandomState(seed)
P.prng = np.random.RandomState(seed)
F_hat = []
last_state_dict = {x:[0.,0.,0] for x in x_names }
states_avg = {}
# loop over sample paths
for l in range(1,L+1):
# get a fresh copy of the model
model_copy = copy(Model)
# sample the truth - the truth is going to be the same for the N experiments in the budget
model_copy.exog_info_sample_mu()
#print("sampled_mu: ", formatFloatList(list(model_copy.mu.values()),3) )
# determine the best treatment for the sampled truth
best_treatment = max(model_copy.mu, key=model_copy.mu.get)
best_treat[(policy_chosen,theta)].append(best_treatment)
best_treat_count = 0
decision_list = []
# prepare record for output
mu_output = [model_copy.mu[x] for x in x_names]
record_sample_l = [policy_chosen, Model.truth_type,theta,l] + mu_output + [best_treatment]
# loop over time (N, in notes)
for n in range(t_stop):
# formating pre-decision state for output
state_mu = [getattr(model_copy.state,x)[0] for x in x_names]
state_sigma = [1/math.sqrt(getattr(model_copy.state,x)[1]) for x in x_names]
state_N = [getattr(model_copy.state,x)[2] for x in x_names]
# make decision based on chosen policy
decision = P_make_decision(model_copy, theta)
decision_list.append(decision)
# step forward in time sampling the reduction, updating the objective fucntion and the state
exog_info = model_copy.step(decision)
best_treat_count += decision==best_treatment and 1 or 0
# adding record for output
record_sample_t = [n] + state_mu + state_sigma + state_N + [decision, exog_info['reduction'],model_copy.obj,decision==best_treatment and 1 or 0]
output_path.append(record_sample_l + record_sample_t)
# updating end of experiments stats
F_hat.append(model_copy.obj)
last_state_dict.update({x:[last_state_dict[x][0] + getattr(model_copy.state,x)[0],last_state_dict[x][1] + getattr(model_copy.state,x)[1],last_state_dict[x][2] + getattr(model_copy.state,x)[2]] for x in x_names })
best_treat_count_list[(policy_chosen,theta)].append(best_treat_count)
decision_Given_Best_Treat_list[(policy_chosen,theta,best_treatment)] += decision_list
decision_ALL_list[(policy_chosen,theta)] += decision_list
# updating end of theta stats
F_hat_mean = np.array(F_hat).mean()
F_hat_var = np.sum(np.square(np.array(F_hat) - F_hat_mean))/(L-1)
theta_obj[policy_chosen].append(F_hat_mean)
theta_obj_std[policy_chosen].append(np.sqrt(F_hat_var/L))
print("Finishing policy = {}, Truth_type {} and theta = {}. F_bar_mean = {:.3f} and F_bar_std = {:.3f}".format(policy_chosen,Model.truth_type,theta,F_hat_mean,np.sqrt(F_hat_var/L)))
states_avg = {x:[last_state_dict[x][0]/L,last_state_dict[x][1]/L,last_state_dict[x][2]/L] for x in x_names}
print("Averages along {} iterations and {} budget trial:".format(L,t_stop))
for x in x_names:
print("Treatment {}: m_bar {:.2f}, beta_bar {:.2f} and N {}".format(x,states_avg[x][0],states_avg[x][1],states_avg[x][2]))
best_treat_Counter = Counter(best_treat[(policy_chosen,theta)])
best_treat_Counter_hist.update({(policy_chosen,theta):best_treat_Counter})
hist, bin_edges = np.histogram(np.array(best_treat_count_list[(policy_chosen,theta)]), t_stop)
best_treat_Chosen_hist.update({(policy_chosen,theta):hist})
print("Histogram best_treatment")
print(normalizeCounter(best_treat_Counter))
print("Histogram decisions")
decision_ALL_Counter[(policy_chosen,theta)] = normalizeCounter(Counter(decision_ALL_list[(policy_chosen,theta)] ))
print(decision_ALL_Counter[(policy_chosen,theta)])
decision_Given_Best_Treat_dict = {x:dict(normalizeCounter(Counter(decision_Given_Best_Treat_list[(policy_chosen,theta,x)]))) for x in Model.x_names}
decision_df = pd.DataFrame(decision_Given_Best_Treat_dict)
print(decision_df.head())
print("\n\n")
# updating end of policy stats
policy_end = time.time()
print("Ending policy {}. Elapsed time {} secs\n\n\n".format(policy_chosen,policy_end - policy_start))
# =============================================================================
# Outputing to Excel
# =============================================================================
if print_excel_file:
print_init_time = time.time()
dfOutputPath = pd.DataFrame.from_records(output_path,columns=labelsOutputPath)
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter('DetailedOutput{}.xlsx'.format(Model.truth_type), engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object.
dfOutputPath.to_excel(writer, sheet_name='output')
writer.save()
print_end_time = time.time()
print("Finished printing the excel file. Elapsed time {}".format(print_end_time-print_init_time))
# =============================================================================
# Generating Plots
# =============================================================================
l = len(theta_range_1)
inc = additional_params.loc['increment', 0]
fig1, axsubs = plt.subplots(1,2)
fig1.suptitle('Comparison of policies for the Medical Decisions Diabetes Model: \n (N = {}, L = {}, Truth_type = {} )'.format(t_stop, L, Model.truth_type) )
color_list = ['b','g','r','m']
nPolicies = list(range(len(policy_list)))
for policy_chosen,p in zip(policy_list,nPolicies):
axsubs[0].plot(theta_range_1, theta_obj[policy_chosen], "{}o-".format(color_list[p]),label = "{}".format(policy_chosen))
axsubs[0].set_title('Mean')
axsubs[0].legend()
axsubs[0].set_xlabel('theta')
axsubs[0].set_ylabel('estimated value for (F_bar)')
axsubs[1].plot(theta_range_1, theta_obj_std[policy_chosen], "{}+:".format(color_list[p]),label = "{}".format(policy_chosen))
axsubs[1].set_title('Std')
axsubs[1].legend()
axsubs[1].set_xlabel('theta')
#axsubs[1].set_ylabel('estimated value for (F_bar)')
plt.show()
fig1.savefig('Policy_Comparison_{}.jpg'.format(Model.truth_type))
#fig = plt.figure()
#plt.title('Comparison of policies for the Medical Decisions Diabetes Model: \n (N = {}, L = {}, Truth_type = {} )'.format(t_stop, L, Model.truth_type))
#color_list = ['b','g','r','m']
#nPolicies = list(range(len(policy_list)))
#for policy_chosen,p in zip(policy_list,nPolicies):
# plt.plot(theta_range_1, theta_obj[policy_chosen], "{}o-".format(color_list[p]),label = "mean for {}".format(policy_chosen))
# if plot_std:
# plt.plot(theta_range_1, theta_obj_std[policy_chosen], "{}+:".format(color_list[p]),label = "std for {}".format(policy_chosen))
#plt.legend()
#plt.xlabel('theta')
#plt.ylabel('estimated value (F_bar)')
#plt.show()
#fig.savefig('Policy_Comparison_{}.jpg'.format(Model.truth_type))
| [
"pandas.DataFrame",
"matplotlib.pyplot.show",
"MedicalDecisionDiabetesPolicy.MDDMPolicy",
"copy.copy",
"numpy.random.RandomState",
"time.time",
"pandas.read_excel",
"numpy.arange",
"numpy.array",
"pandas.DataFrame.from_records",
"collections.Counter",
"MedicalDecisionDiabetesModel.MedicalDecis... | [((1306, 1351), 'pandas.read_excel', 'pd.read_excel', (['file'], {'sheet_name': '"""parameters1"""'}), "(file, sheet_name='parameters1')\n", (1319, 1351), True, 'import pandas as pd\n'), ((1378, 1423), 'pandas.read_excel', 'pd.read_excel', (['file'], {'sheet_name': '"""parameters2"""'}), "(file, sheet_name='parameters2')\n", (1391, 1423), True, 'import pandas as pd\n'), ((1732, 1865), 'numpy.arange', 'np.arange', (["additional_params.loc['theta_start', 0]", "additional_params.loc['theta_end', 0]", "additional_params.loc['increment', 0]"], {}), "(additional_params.loc['theta_start', 0], additional_params.loc[\n 'theta_end', 0], additional_params.loc['increment', 0])\n", (1741, 1865), True, 'import numpy as np\n'), ((3712, 3757), 'MedicalDecisionDiabetesModel.MedicalDecisionDiabetesModel', 'MDDM', (['x_names', 'x_names', 'S0', 'additional_params'], {}), '(x_names, x_names, S0, additional_params)\n', (3716, 3757), True, 'from MedicalDecisionDiabetesModel import MedicalDecisionDiabetesModel as MDDM\n'), ((3818, 3855), 'MedicalDecisionDiabetesPolicy.MDDMPolicy', 'MDDMPolicy', (['Model', 'policy_names', 'seed'], {}), '(Model, policy_names, seed)\n', (3828, 3855), False, 'from MedicalDecisionDiabetesPolicy import MDDMPolicy\n'), ((10674, 10692), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (10686, 10692), True, 'import matplotlib.pyplot as plt\n'), ((11597, 11607), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11605, 11607), True, 'import matplotlib.pyplot as plt\n'), ((4281, 4292), 'time.time', 'time.time', ([], {}), '()\n', (4290, 4292), False, 'import time\n'), ((9465, 9476), 'time.time', 'time.time', ([], {}), '()\n', (9474, 9476), False, 'import time\n'), ((9824, 9835), 'time.time', 'time.time', ([], {}), '()\n', (9833, 9835), False, 'import time\n'), ((9859, 9923), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['output_path'], {'columns': 'labelsOutputPath'}), '(output_path, columns=labelsOutputPath)\n', (9884, 9923), True, 'import pandas as pd\n'), ((10269, 10280), 'time.time', 'time.time', ([], {}), '()\n', (10278, 10280), False, 'import time\n'), ((4354, 4381), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (4375, 4381), True, 'import numpy as np\n'), ((4403, 4430), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (4424, 4430), True, 'import numpy as np\n'), ((8397, 8438), 'collections.Counter', 'Counter', (['best_treat[policy_chosen, theta]'], {}), '(best_treat[policy_chosen, theta])\n', (8404, 8438), False, 'from collections import Counter\n'), ((9268, 9312), 'pandas.DataFrame', 'pd.DataFrame', (['decision_Given_Best_Treat_dict'], {}), '(decision_Given_Best_Treat_dict)\n', (9280, 9312), True, 'import pandas as pd\n'), ((4737, 4748), 'copy.copy', 'copy', (['Model'], {}), '(Model)\n', (4741, 4748), False, 'from copy import copy\n'), ((7757, 7779), 'numpy.sqrt', 'np.sqrt', (['(F_hat_var / L)'], {}), '(F_hat_var / L)\n', (7764, 7779), True, 'import numpy as np\n'), ((8583, 8636), 'numpy.array', 'np.array', (['best_treat_count_list[policy_chosen, theta]'], {}), '(best_treat_count_list[policy_chosen, theta])\n', (8591, 8636), True, 'import numpy as np\n'), ((8965, 9013), 'collections.Counter', 'Counter', (['decision_ALL_list[policy_chosen, theta]'], {}), '(decision_ALL_list[policy_chosen, theta])\n', (8972, 9013), False, 'from collections import Counter\n'), ((7551, 7566), 'numpy.array', 'np.array', (['F_hat'], {}), '(F_hat)\n', (7559, 7566), True, 'import numpy as np\n'), ((7950, 7972), 'numpy.sqrt', 'np.sqrt', (['(F_hat_var / L)'], {}), '(F_hat_var / L)\n', (7957, 7972), True, 'import numpy as np\n'), ((9151, 9215), 'collections.Counter', 'Counter', (['decision_Given_Best_Treat_list[policy_chosen, theta, x]'], {}), '(decision_Given_Best_Treat_list[policy_chosen, theta, x])\n', (9158, 9215), False, 'from collections import Counter\n'), ((7615, 7630), 'numpy.array', 'np.array', (['F_hat'], {}), '(F_hat)\n', (7623, 7630), True, 'import numpy as np\n')] |
# Author: <NAME> <<EMAIL>>
# License: MIT
from collections import defaultdict
import numpy as np
from wittgenstein.base_functions import truncstr
from wittgenstein.utils import rnd
class BinTransformer:
def __init__(self, n_discretize_bins=10, names_precision=2, verbosity=0):
self.n_discretize_bins = n_discretize_bins
self.names_precision = names_precision
self.verbosity = verbosity
self.bins_ = None
def __str__(self):
return str(self.bins_)
__repr__ = __str__
def __bool__(self):
return not not self.bins_
def isempty(self):
return not self.bins_ is None and not self.bins_
def fit_or_fittransform_(self, df, ignore_feats=[]):
"""Transform df using pre-fit bins, or, if unfit, fit self and transform df"""
# Binning has already been fit
if self.bins_:
return self.transform(df)
# Binning disabled
elif not self.n_discretize_bins:
return df
# Binning enabled, and binner needs to be fit
else:
self.fit(df, ignore_feats=ignore_feats)
df, bins = self.transform(df, ignore_feats=ignore_feats)
self.bins = bins
return df
def fit_transform(self, df, ignore_feats=[]):
self.fit(df, ignore_feats=ignore_feats)
return self.transform(df)
def transform(self, df, ignore_feats=[]):
"""Return df with seemingly continuous features binned, and the bin_transformer or None depending on whether binning occurs."""
if n_discretize_bins is None:
return df
if self.bins_ == {}:
return df
isbinned = False
continuous_feats = find_continuous_feats(df, ignore_feats=ignore_feats)
if self.n_discretize_bins:
if continuous_feats:
if self.verbosity == 1:
print(f"binning data...\n")
elif self.verbosity >= 2:
print(f"binning features {continuous_feats}...")
binned_df = df.copy()
bin_transformer = fit_bins(
binned_df, output=False, ignore_feats=ignore_feats,
)
binned_df = bin_transform(binned_df, bin_transformer)
isbinned = True
else:
n_unique_values = sum(
[len(u) for u in [df[f].unique() for f in continuous_feats]]
)
warning_str = f"There are {len(continuous_feats)} features to be treated as continuous: {continuous_feats}. \n Treating {n_unique_values} numeric values as nominal or discrete. To auto-discretize features, assign a value to parameter 'n_discretize_bins.'"
_warn(warning_str, RuntimeWarning, filename="base", funcname="transform")
if isbinned:
self.bins_ = bin_transformer
return binned_df, bin_transformer
else:
return df
def find_continuous_feats(self, df, ignore_feats=[]):
"""Return names of df features that seem to be continuous."""
if not self.n_discretize_bins:
return []
# Find numeric features
cont_feats = df.select_dtypes(np.number).columns
# Remove discrete features
cont_feats = [
f for f in cont_feats if len(df[f].unique()) > self.n_discretize_bins
]
# Remove ignore features
cont_feats = [f for f in cont_feats if f not in ignore_feats]
return cont_feats
def fit(self, df, output=False, ignore_feats=[]):
"""
Returns a dict definings fits for numerical features
A fit is an ordered list of tuples defining each bin's range (min is exclusive; max is inclusive)
Returned dict allows for fitting to training data and applying the same fit to test data
to avoid information leak.
"""
def _fit_feat(df, feat):
"""Return list of tuples defining bin ranges for a numerical feature using simple linear search"""
if len(df) == 0:
return []
n_discretize_bins = min(
self.n_discretize_bins, len(df[feat].unique())
) # In case there are fewer unique values than n_discretize_bins
bin_size = len(df) // n_discretize_bins
sorted_df = df.sort_values(by=[feat])
sorted_values = sorted_df[feat].tolist()
sizes = [] # for verbosity output
if self.verbosity >= 4:
print(
f"{feat}: fitting {len(df[feat].unique())} unique vals into {n_discretize_bins} bins"
)
bin_ranges = [] # result
bin_num = 0 # current bin number
ceil_i = -1 # current bin ceiling index
ceil_val = None # current bin upper bound
floor_i = 0 # current bin start index
floor_val = sorted_df.iloc[0][feat] # current bin floor value
prev_finish_val = None # prev bin upper bound
while bin_num < n_discretize_bins and floor_i < len(sorted_values):
# jump to tentative ceiling index
ceil_i = min(floor_i + bin_size, len(sorted_df) - 1)
ceil_val = sorted_df.iloc[ceil_i][feat]
# increment ceiling index until encounter a new value to ensure next bin size is correct
while (
ceil_i < len(sorted_df) - 1 # not last bin
and sorted_df.iloc[ceil_i][feat]
== ceil_val # keep looking for a new value
):
ceil_i += 1
# found ceiling index. update values
if self.verbosity >= 4:
sizes.append(ceil_i - floor_i)
print(
f"bin #{bin_num}, floor idx {floor_i} value: {sorted_df.iloc[floor_i][feat]}, ceiling idx {ceil_i} value: {sorted_df.iloc[ceil_i][feat]}"
)
bin_range = (floor_val, ceil_val)
bin_ranges.append(bin_range)
# update for next bin
floor_i = ceil_i + 1
floor_val = ceil_val
bin_num += 1
# Guarantee min and max values
bin_ranges[0] = (sorted_df.iloc[0][feat], bin_ranges[0][1])
bin_ranges[-1] = (bin_ranges[-1][0], sorted_df.iloc[-1][feat])
if self.verbosity >= 4:
print(
f"-bin sizes {sizes}; dataVMR={rnd(np.var(df[feat])/np.mean(df[feat]))}, binVMR={rnd(np.var(sizes)/np.mean(sizes))}"
) # , axis=None, dtype=None, out=None, ddof=0)})
return bin_ranges
# Create dict to store fit definitions for each feature
fit_dict = {}
feats_to_fit = self.find_continuous_feats(df, ignore_feats=ignore_feats)
if self.verbosity == 2:
print(f"fitting bins for features {feats_to_fit}")
if self.verbosity >= 2:
print()
# Collect fits in dict
count = 1
for feat in feats_to_fit:
fit = _fit_feat(df, feat)
fit_dict[feat] = fit
self.bins_ = fit_dict
def transform(self, df):
"""
Uses a pre-collected dictionary of fits to transform df features into bins.
Returns the fit df rather than modifying inplace.
"""
if self.bins_ is None:
return df
# Replace each feature with bin transformations
for feat, bin_fit_list in self.bins_.items():
if feat in df.columns:
df[feat] = df[feat].map(
lambda x: self._transform_value(x, bin_fit_list)
)
return df
def _transform_value(self, value, bin_fit_list):
"""Return bin string name for a given numerical value. Assumes bin_fit_list is ordered."""
min_val, min_bin = bin_fit_list[0][0], bin_fit_list[0]
max_val, max_bin = bin_fit_list[-1][1], bin_fit_list[-1]
for bin_fit in bin_fit_list:
if value <= bin_fit[1]:
start_name = (
str(round(bin_fit[0], self.names_precision))
if self.names_precision
else str(int(bin_fit[0]))
)
finish_name = (
str(round(bin_fit[1], self.names_precision))
if self.names_precision
else str(int(bin_fit[1]))
)
bin_name = "-".join([start_name, finish_name])
return bin_name
if value <= min_val:
return min_bin
elif value >= max_val:
return max_bin
else:
raise ValueError("No bin found for value", value)
def _try_rename_features(self, df, class_feat, feature_names):
"""Rename df columns according to user request."""
# Rename if same number of features
df_columns = [col for col in df.columns.tolist() if col != class_feat]
if len(df_columns) == len(feature_names):
col_replacements_dict = {
old: new for old, new in zip(df_columns, feature_names)
}
df = df.rename(columns=col_replacements_dict)
return df
# Wrong number of feature names
else:
return None
def _construct_from_ruleset(self, ruleset):
MIN_N_DISCRETIZED_BINS = 10
bt = BinTransformer()
bt.bins_ = self._bin_prediscretized_features(ruleset)
bt.n_discretize_bins = max(
(MIN_N_DISCRETIZED_BINS, max(len(bins) for bins in bt.bins_.values()))
)
bt.names_precision = self._max_dec_precision(bt.bins_)
return bt
def _bin_prediscretized_features(self, ruleset):
def is_valid_decimal(s):
try:
float(s)
except:
return False
return True
def find_floor_ceil(value):
"""id min, max separated by a dash. Return None if invalid pattern."""
split_idx = 0
for i, char in enumerate(value):
# Found a possible split and it's not the first number's minus sign
if char == "-" and i != 0:
if split_idx is not None and not split_idx:
split_idx = i
# Found a - after the split, and it's not the minus of a negative number
elif i > split_idx + 1:
return None
floor = value[:split_idx]
ceil = value[split_idx + 1 :]
if is_valid_decimal(floor) and is_valid_decimal(ceil):
return (floor, ceil)
else:
return None
# Main function: _bin_prediscretized_features
discrete = defaultdict(list)
for cond in ruleset.get_conds():
floor_ceil = find_floor_ceil(cond.val)
if floor_ceil:
discrete[cond.feature].append(floor_ceil)
for feat, ranges in discrete.items():
ranges.sort(key=lambda x: float(x[0]))
return dict(discrete)
def _max_dec_precision(self, bins_dict):
def dec_precision(value):
try:
return len(value) - value.index(".") - 1
except:
return 0
max_prec = 0
for bins in bins_dict.values():
for bin_ in bins:
for value in bin_:
cur_prec = dec_precision(value)
if cur_prec > max_prec:
max_prec = cur_prec
return max_prec
| [
"collections.defaultdict",
"numpy.mean",
"numpy.var"
] | [((10898, 10915), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10909, 10915), False, 'from collections import defaultdict\n'), ((6560, 6576), 'numpy.var', 'np.var', (['df[feat]'], {}), '(df[feat])\n', (6566, 6576), True, 'import numpy as np\n'), ((6577, 6594), 'numpy.mean', 'np.mean', (['df[feat]'], {}), '(df[feat])\n', (6584, 6594), True, 'import numpy as np\n'), ((6610, 6623), 'numpy.var', 'np.var', (['sizes'], {}), '(sizes)\n', (6616, 6623), True, 'import numpy as np\n'), ((6624, 6638), 'numpy.mean', 'np.mean', (['sizes'], {}), '(sizes)\n', (6631, 6638), True, 'import numpy as np\n')] |
# Python 3
# Model stacking
import time
import copy
import warnings
import pickle
import numpy as np
import pandas as pd
from joblib import dump, load
from collections import defaultdict
from sklearn.model_selection import ParameterGrid
"""
Package Description:
----------------------------------------------------------------------
The stacker implements calculation of predictions through a module
which contains an in-layer for initial models (to be stacked) and an
out-layer which combines predictions from the in-layer (stacker).
The in-layer passes out-of-sample (OOS) and hold-out (HO) predictions
to the out-layer, which trains and fits a second level of models.
Connections between all in and out layer models are present.
Each out-layer model returns a prediction vector. The user can
choose the best out-layer predictions to proceed (based on the
CV score) to predicting on the test set.The other option would be
to concatenate the predictions from all out-layer models into a
new data matrix to be fed into another stacking module.
Example Usage:
----------------------------------------------------------------------
import numpy as np
from sklearn.model_selection import KFold, train_test_split
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score
from from Pancake.Stacker import Stacker
# Dataset
X, y = make_classification(n_samples=1000)
X_tr, X_ts, y_tr, y_ts = train_test_split(X,y,stratify=y, test_size=0.30)
# Splitter and stacker
splitter = KFold(n_splits=5)
stacker = Stacker(X_tr, y_tr, splitter, roc_auc_score, family="binary")
# Add 2 in-layer models
stacker.addModelIn(LogisticRegression())
stacker.addModelIn(SVC())
# Add 1 out-laye model
grid = {'C':np.logspace(-2,2,100), grid)
# Train
predsTrain = stacker.stackTrain()
# Test set
predsTest = stacker.stackTest(X_ts)
# Summary
stacker.summary()
"""
# -- Input Node of stacking of a single model
class InNode(object):
def __init__ (self, splits, modelIn, family, trainable=False, hyperParameters=None):
"""
The class InNode represent the first layer models
Input
--------------------------------------------------------------------------------
splits : List of folds (HO)
modelIn : Model object
family : regression or binary
trainable : Is the model trainable or just to be fitted
hyperParameters : Hyper-parameters for the trainable model
"""
self.splits = splits
self.modelIn = modelIn
self.family = family
self.trainable = trainable
self.hyperParameters = hyperParameters
# Fit and return predictions for a single model on [folds] - [HO fold]
def fitPredOOS_HO(self, X, y):
"""
Generate out-of-sample predictions with hold-out folds
Input
---------------------------------------------
X : Data matrix
y : Target vector
Output
---------------------------------------------
yHatOOS : Out-of-sample predictions
yHatHO : Hold-out sample predictions
"""
# Initiate dictionaries for OOS and HO folds
yHatOOS = {}
yHatHO = {}
# Loop over folds
for ho, idx_ho in self.splits.items():
# Splits for OOS fits
internalSplits = {i:x for i,x in self.splits.items() if i != ho}
# OOS - HO predictions (there will be zeros in HO indices)
yOOS = np.zeros(len(y))
# Loop over internal splits and fit
for vld, idx_vld in internalSplits.items():
# Train data to fit on
idx_tr_ = [idx_ for f, idx_ in internalSplits.items() if f != vld]
idx_tr = np.concatenate(idx_tr_)
# Predict on the rest
self.modelIn.fit(X[idx_tr], y[idx_tr])
if self.family == 'regression':
y_prob = self.modelIn.predict(X[idx_vld])
else:
y_prob = self.modelIn.predict_proba(X[idx_vld])[:,1]
yOOS[idx_vld] = y_prob
# Save in dictionary
yHatOOS[ho] = yOOS
# Also predict on HO using the rest
idx_in_ = [idx_ for f, idx_ in self.splits.items() if f != ho]
idx_in = np.concatenate(idx_in_)
# Fit and predict
self.modelIn.fit(X[idx_in], y[idx_in])
if self.family == 'regression':
y_prob_ho = self.modelIn.predict(X[idx_ho])
else:
y_prob_ho = self.modelIn.predict_proba(X[idx_ho])[:,1]
yHatHO[ho] = y_prob_ho
return yHatOOS, yHatHO
# Train/Fit and return predictions for a single model on [folds] - [HO fold]
def trainPredOOS_HO(self, X, y, evalMetric):
"""
Generate out-of-sample predictions with hold-out folds
while training the in-layer model
Input
---------------------------------------------
X : Data matrix
y : Target vector
hyperParameters : Hyperparameters for model
evalMetric : Performance metric to maximize during training
Output
---------------------------------------------
yHatOOS : Out-of-sample predictions
yHatHO : Hold-out sample predictions
"""
# Initiate dictionaries for OOS and HO folds
yHatOOS = {}
yHatHO = {}
# List of hyper-parameters
hyper_grid = list(ParameterGrid(self.hyperParameters))
# Initiate dict for trained model hyper-params over folds
self.trainedHyperParams = {}
# Loop over folds
avg_cv_scores = []
for ho, idx_ho in self.splits.items():
# Splits for OOS fits
internalSplits = {i:x for i,x in self.splits.items() if i != ho}
# Loop over hyper-parameters (Can this be parallelized?)
scores = [] # Avg CV scores for each hyper-param
y_oos_ = [np.zeros(len(y)) for _ in range(len(hyper_grid))] # OOS predictions for all hyper-params
for ih, dict_hyp in enumerate(hyper_grid):
# Set model hyper-parameters
self.modelIn.set_params(**dict_hyp)
# Loop over internal splits
scores_ = []
for vld, idx_vld in internalSplits.items():
# Train data
idx_tr_ = [idx_ for f, idx_ in internalSplits.items() if f != vld]
idx_tr = np.concatenate(idx_tr_)
# Predict on validation set
self.modelIn.fit(X[idx_tr], y[idx_tr])
if self.family == 'regression':
y_prob = self.modelIn.predict(X[idx_vld])
else:
y_prob = self.modelIn.predict_proba(X[idx_vld])[:,1]
# Store the current prediction
y_oos_[ih][idx_vld] = y_prob
# Score
scores_.append(evalMetric(y[idx_vld], y_prob))
# Get mean of scores_
scores.append(np.mean(scores_))
# Find Best hyper-parameter and determine yOOS
best_idx = np.argmax(scores)
avg_cv_scores.append(np.max(scores))
# Save best hyper-parameters
self.trainedHyperParams[ho] = hyper_grid[best_idx]
# Save OOS predictions in dictionary
yHatOOS[ho] = y_oos_[best_idx]
# Also predict on HO using the rest
idx_in_ = [idx_ for f, idx_ in self.splits.items() if f != ho]
idx_in = np.concatenate(idx_in_)
# Fit and predict
self.modelIn.set_params(**hyper_grid[best_idx])
self.modelIn.fit(X[idx_in], y[idx_in])
if self.family == 'regression':
y_prob_ho = self.modelIn.predict(X[idx_ho])
else:
y_prob_ho = self.modelIn.predict_proba(X[idx_ho])[:,1]
yHatHO[ho] = y_prob_ho
return yHatOOS, yHatHO, avg_cv_scores
def fitPredOOS(self, X, y):
"""
Generate out-of-sample(OOS) predictions WITHOUT hold-out(HO) folds
Input
-----------------------------------------------------------------------
X : Data matrix
y : Target vector
Output
-----------------------------------------------------------------------
yHatOOS : Out-of-sample predictions
dict_fittedModels : Dictionary of fitted models for each fold
"""
# Initiate OOS predictions
yHatOOS = np.zeros(len(y))
# Initiate dict for fitted models over folds
dict_fittedModels = {}
# Loop over folds
for vld, idx_vld in self.splits.items():
# Indices of samples to fit on
idx_tr_ = [idx_ for f, idx_ in self.splits.items() if f != vld]
idx_tr = np.concatenate(idx_tr_)
# Fit and predict
if self.trainable:
self.modelIn.set_params(**self.trainedHyperParams[vld])
self.modelIn.fit(X[idx_tr], y[idx_tr])
if self.family == 'regression':
y_prob = self.modelIn.predict(X[idx_vld])
else:
y_prob = self.modelIn.predict_proba(X[idx_vld])[:,1]
yHatOOS[idx_vld] = y_prob
# Save model
model_copy = copy.deepcopy(self.modelIn) # A new copy of model, not a reference to it
dict_fittedModels[vld] = model_copy
return yHatOOS, dict_fittedModels
def predOOS_test(self, X_ts, dict_fittedModels, testSplits):
"""
Predictions on an independent test set
Input
-----------------------------------------------------------------------
X_ts : Data matrix (test set)
dict_fittedModels : Dictionary of fitted models for each fold
testSplits : List of folds for test set. Number of folds must be
same with dict_fittedModels. Recommended: Use the
same random_state as well.
Output
-----------------------------------------------------------------------
yHatOOS : Out-of-sample predictions (test set)
"""
# Initiate OOS predictions (folds x folds combinations)
yHatOOS = np.zeros((len(X_ts),len(dict_fittedModels)))
# Loop over folds and just predict
for vld, idx_vld in testSplits.items():
# Predict with all models
for m,mod in dict_fittedModels.items():
if self.family == 'regression':
y_prob = mod.predict(X_ts[idx_vld])
else:
y_prob = mod.predict_proba(X_ts[idx_vld])[:,1]
yHatOOS[idx_vld,m] = y_prob
# Return predictions
return yHatOOS.mean(axis=1)
# -- Node in out level
class OutNode(object):
def __init__ (self, splits, modelOut, hyperParameters, evalMetric, family):
"""
The class OutNode represent the second layer models
Input
---------------------------------------------------------------------
splits : List of folds (HO)
modelOut : Model object
hyperParameters : Hyperparameters for model
evalMetric : Performance metric to maximize during training
family : regression or binary (must match first layer)
"""
self.splits = splits
self.modelOut = modelOut
self.hyperParameters = hyperParameters
self.evalMetric = evalMetric
self.family = family
def train(self, y, dict_Xoos, dict_Xho):
"""
Train modelOut
Input
--------------------------------------------------------------
y : Target vector
dict_Xoos : OOS predictions for each model in in-layer
dict_Xho : HO predictions for each model in in-layer
Effect/Output
--------------------------------------------------------------
best_hyperParams : Best set of hyper-parameters
best_cvscore : CV score for the best parameters
"""
# List of hyper-parameters
hyper_grid = list(ParameterGrid(self.hyperParameters))
# Initiate dict for saving CV results
hyper_evals = defaultdict(list)
# Loop over folds
for ho, idx_ho in self.splits.items():
# Construct the train (X_oos) and test (X_ho) data for CV
# For multiclass: vstack --> hstack and no T
X_oos = np.vstack(dict_Xoos[ho]).T
X_ho = np.vstack(dict_Xho[ho]).T
# Remove zeros in OOS from hold-out section
X_oos = np.delete(X_oos, idx_ho, axis=0)
y_oos = np.delete(y, idx_ho, axis=0)
# Hold-out target
y_ho = y[idx_ho]
# Scores on folds (can parallelize this by joblib)
for ih, dict_hyp in enumerate(hyper_grid):
self.modelOut.set_params(**dict_hyp)
self.modelOut.fit(X_oos, y_oos)
if self.family == 'regression':
y_prob = self.modelOut.predict(X_ho)
else:
y_prob = self.modelOut.predict_proba(X_ho)[:,1]
hyper_evals[ih].append(self.evalMetric(y_ho, y_prob))
# Get best hyper-parameters with best performance
cv_scores = [np.mean(ls_) for i, ls_ in hyper_evals.items()]
best_idx = np.argmax(cv_scores)
self.best_hyperParams = hyper_grid[best_idx]
# Return best hyper-parameters and the corresponding CV score
return self.best_hyperParams, cv_scores[best_idx]
def fitPred(self, y, list_Xoos):
"""
Fit model on all training folds with best hyper-parameters
Input
---------------------------------------------------------------
y : Target vector
list_Xoos : OOS predictions from in-layer
Output
---------------------------------------------------------------
y_final : Final prediction
"""
Xoos = np.vstack(list_Xoos).T
self.modelOut.set_params(**self.best_hyperParams)
self.modelOut.fit(Xoos,y)
if self.family == 'regression':
y_final = self.modelOut.predict(Xoos)
else:
y_final = self.modelOut.predict_proba(Xoos)[:,1]
return y_final
def predTest(self, list_Xoos):
"""
Predict on all test folds with best hyper-parameters
Input
-----------------------------------------------------------------
list_Xoos : OOS predictions from in-level models (test set)
Output
-----------------------------------------------------------------
y_final_ts : Final prediction (test set)
"""
# Just predict
Xoos = np.vstack(list_Xoos).T
if self.family == 'regression':
y_final_ts = self.modelOut.predict(Xoos)
else:
y_final_ts = self.modelOut.predict_proba(Xoos)[:,1]
return y_final_ts
# -- An object for generating the stacked network
class Stacker(object):
def __init__ (self, X, y, splitter, evalMetric, family = "regression"):
"""
The Stacker class implements the full stacked predictions pipeline
Input
---------------------------------------------------------------------
X : Original data matrix
y : Target vector
splitter : Cross-validation generator
evalMetric : Performance metric to maximize during training
family : regression or binary (must match in-layer)
"""
self.X = X
self.y = y
self.splitter = splitter
self.evalMetric = evalMetric
self.family = family
# Initiate lists for models in layers
self.modelsIn = []
self.modelsOut = []
# Splits for training
splt_ = list(splitter.split(X,y))
self.splits = {i:x[1] for i,x in enumerate(splt_)}
def addModelIn(self, modelObj, trainable = False, hyperParameters = None):
""" Adder of model to in-layer """
newNode = InNode(self.splits, modelObj, self.family, trainable, hyperParameters)
self.modelsIn.append(newNode)
def addModelOut(self, modelObj, hyperParameters):
""" Adder of model to out-layer """
newNode = OutNode(self.splits, modelObj, hyperParameters, self.evalMetric, self.family)
current_index = len(self.modelsOut)
self.modelsOut.append(newNode)
def _checkInLayer(self):
""" Performs checks on the model """
# Classification problem
if self.family != 'regression' and len(set(self.y)) > 2:
raise NotImplementedError("Multi-class case not implemented")
# Number of models in the in-layer
nIn = len(self.modelsIn)
if nIn <= 1:
warnings.warn("Stacking requires more then one model",RuntimeWarning)
return 1
def _checkOutLayer(self):
""" Performs checks on the model """
# Number of models in the out-layer
nOut = len(self.modelsOut)
if nOut < 1:
warnings.warn("Stacking requires at least one out-layer model", RuntimeWarning)
# Hyper-parameters for each model
for m in self.modelsOut:
if len(m.hyperParameters) < 1:
warnings.warn("Hyperparameters need to be specified for training", RuntimeWarning)
return 1
def _stackLevelInTrain(self):
""" Runs in-layer and obtain OOS & HO predictions """
# Check in-layer model
_ = self._checkInLayer()
# The lists contain predictions from each model
dict_Xoos = defaultdict(list)
dict_Xho = defaultdict(list)
# For each model (node) in-layer, perform predictions
for i, node in enumerate(self.modelsIn):
# OOS and HO predictions
if not node.trainable:
yHatOOS, yHatHO = node.fitPredOOS_HO(self.X, self.y)
else:
yHatOOS, yHatHO, avg_scores = node.trainPredOOS_HO(self.X, self.y, self.evalMetric)
for ho in self.splits:
dict_Xoos[ho].append(yHatOOS[ho])
dict_Xho[ho].append(yHatHO[ho])
# Print report
if node.trainable:
print("In-layer model : {:d} trained, Avg CV score across HO folds: {:.4f}".format(i+1, np.mean(avg_scores)))
else:
print("In-layer model : {:d} only fitted".format(i+1))
return dict_Xoos, dict_Xho
def _stackLevelInFitPred(self):
""" Runs in-layer and obtain OOS predictions on all folds """
list_Xoos = []
# List of fitted models (save in object to re-use)
self.list_fittedModelsIn = []
# For each model (node) in-layer, perform predictions
for node in self.modelsIn:
yHatOOS, dict_fittedModels = node.fitPredOOS(self.X, self.y)
list_Xoos.append(yHatOOS)
self.list_fittedModelsIn.append(dict_fittedModels)
return list_Xoos
def _stackLevelInPredTest(self, X_ts, testSplits):
""" Runs in-layer and obtain OOS predictions on test set """
list_Xoos = []
# Loop over models and get predictions
for i, node in enumerate(self.modelsIn):
yHatOOS = node.predOOS_test(X_ts, self.list_fittedModelsIn[i], testSplits)
list_Xoos.append(yHatOOS)
return list_Xoos
def _trainOutLayer(self, dict_Xoos, dict_Xho):
""" Trains out-layer to get best hyper-parameters """
self.best_hypers = {}
# Check out-layer
_ = self._checkOutLayer()
# CV scores
self.cv_scores =[]
for i, node in enumerate(self.modelsOut):
best_hyper, score = node.train(self.y, dict_Xoos, dict_Xho)
self.best_hypers[i] = best_hyper
self.cv_scores.append(score)
# Print report
print("Out-layer model : {:d} trained, CV score = {:.4f}".format(i+1, score))
def _fitOutLayer(self, list_Xoos):
""" Fits out-layer models on training set """
# List of model predictions
predsTrain = []
for i, node in enumerate(self.modelsOut):
y_pred = node.fitPred(self.y, list_Xoos)
predsTrain.append(y_pred)
return predsTrain
def _predOutLayerTest(self, list_Xoos, testSplits):
""" Predicts on an independent test set """
predsTest = []
for i, node in enumerate(self.modelsOut):
y_prob_ts = node.predTest(list_Xoos)
predsTest.append(y_prob_ts)
return predsTest
def stackTrain(self, matrixOut = False):
""" Runner of training """
t0 = time.time()
# Stack in-level models for training (OOS & HO)
dict_Xoos, dict_Xho = self._stackLevelInTrain()
self.stackTime = time.time() - t0
# Train out-layer for best hyper-parameters
t0 = time.time()
self._trainOutLayer(dict_Xoos, dict_Xho)
# Get OOS predictions for all training set with all second level models
list_Xoos = self._stackLevelInFitPred()
predsTrain = self._fitOutLayer(list_Xoos)
# Train time
self.trainTime = time.time() - t0
if matrixOut:
return self._outMatrix(predsTrain)
else:
return predsTrain
def stackTest(self, X_ts, matrixOut = False):
""" Runner of test set predictions """
t0 = time.time()
# Get test splits
splt_ = list(self.splitter.split(X_ts))
testSplits = {i:x[1] for i,x in enumerate(splt_)}
# First level OOS predictions
list_Xoos = self._stackLevelInPredTest(X_ts, testSplits)
# Get OOS predictions for the test set with final model
predsTest = self._predOutLayerTest(list_Xoos, testSplits)
# Test time
self.testTime = time.time() - t0
if matrixOut:
return self._outMatrix(predsTest)
else:
return predsTest
@staticmethod
def _outMatrix(preds):
""" Returns predictions as a matrix from a list """
newDataMatrix = []
for pred in preds:
newDataMatrix.append(pred.reshape(-1,1))
newDataMatrix = np.hstack(newDataMatrix)
return newDataMatrix
def summary(self):
""" Prints summary on model training """
# Number of models (In and Out)
print("Stacked Model Summary:")
print(''.join(['-']*40))
print("{:d} in-layer models stacked in {:.2e} sec".format(len(self.modelsIn),self.stackTime))
# Training CV scores
print("{:d} out-layer models trained in {:.2e} sec".format(len(self.modelsOut),self.trainTime))
print("In-layer summary:")
print(''.join(['-']*40))
print("{:d} in-Layer models trained/fitted".format(len(self.modelsIn)))
print("Out-layer summary:")
print(''.join(['-']*40))
for i,mod in enumerate(self.modelsOut):
print("Out-layer model {:d}: CV score = {:.4f}".format(i+1,self.cv_scores[i]))
print("Best hyper-parameters:", mod.best_hyperParams)
# -- Save and load models
def saveModel(stacker, savePath):
""" Save trained model on disk """
dump(stacker, savePath)
def loadModel(loadPath):
""" Load trained model on disk """
stk = load(loadPath)
return stk
| [
"copy.deepcopy",
"numpy.argmax",
"joblib.dump",
"warnings.warn",
"time.time",
"collections.defaultdict",
"numpy.hstack",
"numpy.max",
"numpy.mean",
"sklearn.model_selection.ParameterGrid",
"numpy.vstack",
"joblib.load",
"numpy.delete",
"numpy.concatenate"
] | [((20466, 20489), 'joblib.dump', 'dump', (['stacker', 'savePath'], {}), '(stacker, savePath)\n', (20470, 20489), False, 'from joblib import dump, load\n'), ((20559, 20573), 'joblib.load', 'load', (['loadPath'], {}), '(loadPath)\n', (20563, 20573), False, 'from joblib import dump, load\n'), ((10908, 10925), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10919, 10925), False, 'from collections import defaultdict\n'), ((11856, 11876), 'numpy.argmax', 'np.argmax', (['cv_scores'], {}), '(cv_scores)\n', (11865, 11876), True, 'import numpy as np\n'), ((15632, 15649), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (15643, 15649), False, 'from collections import defaultdict\n'), ((15663, 15680), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (15674, 15680), False, 'from collections import defaultdict\n'), ((18247, 18258), 'time.time', 'time.time', ([], {}), '()\n', (18256, 18258), False, 'import time\n'), ((18450, 18461), 'time.time', 'time.time', ([], {}), '()\n', (18459, 18461), False, 'import time\n'), ((18899, 18910), 'time.time', 'time.time', ([], {}), '()\n', (18908, 18910), False, 'import time\n'), ((19568, 19592), 'numpy.hstack', 'np.hstack', (['newDataMatrix'], {}), '(newDataMatrix)\n', (19577, 19592), True, 'import numpy as np\n'), ((4047, 4070), 'numpy.concatenate', 'np.concatenate', (['idx_in_'], {}), '(idx_in_)\n', (4061, 4070), True, 'import numpy as np\n'), ((5075, 5110), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['self.hyperParameters'], {}), '(self.hyperParameters)\n', (5088, 5110), False, 'from sklearn.model_selection import ParameterGrid\n'), ((6474, 6491), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (6483, 6491), True, 'import numpy as np\n'), ((6812, 6835), 'numpy.concatenate', 'np.concatenate', (['idx_in_'], {}), '(idx_in_)\n', (6826, 6835), True, 'import numpy as np\n'), ((7928, 7951), 'numpy.concatenate', 'np.concatenate', (['idx_tr_'], {}), '(idx_tr_)\n', (7942, 7951), True, 'import numpy as np\n'), ((8309, 8336), 'copy.deepcopy', 'copy.deepcopy', (['self.modelIn'], {}), '(self.modelIn)\n', (8322, 8336), False, 'import copy\n'), ((10814, 10849), 'sklearn.model_selection.ParameterGrid', 'ParameterGrid', (['self.hyperParameters'], {}), '(self.hyperParameters)\n', (10827, 10849), False, 'from sklearn.model_selection import ParameterGrid\n'), ((11231, 11263), 'numpy.delete', 'np.delete', (['X_oos', 'idx_ho'], {'axis': '(0)'}), '(X_oos, idx_ho, axis=0)\n', (11240, 11263), True, 'import numpy as np\n'), ((11275, 11303), 'numpy.delete', 'np.delete', (['y', 'idx_ho'], {'axis': '(0)'}), '(y, idx_ho, axis=0)\n', (11284, 11303), True, 'import numpy as np\n'), ((11795, 11807), 'numpy.mean', 'np.mean', (['ls_'], {}), '(ls_)\n', (11802, 11807), True, 'import numpy as np\n'), ((12436, 12456), 'numpy.vstack', 'np.vstack', (['list_Xoos'], {}), '(list_Xoos)\n', (12445, 12456), True, 'import numpy as np\n'), ((13085, 13105), 'numpy.vstack', 'np.vstack', (['list_Xoos'], {}), '(list_Xoos)\n', (13094, 13105), True, 'import numpy as np\n'), ((14910, 14980), 'warnings.warn', 'warnings.warn', (['"""Stacking requires more then one model"""', 'RuntimeWarning'], {}), "('Stacking requires more then one model', RuntimeWarning)\n", (14923, 14980), False, 'import warnings\n'), ((15147, 15226), 'warnings.warn', 'warnings.warn', (['"""Stacking requires at least one out-layer model"""', 'RuntimeWarning'], {}), "('Stacking requires at least one out-layer model', RuntimeWarning)\n", (15160, 15226), False, 'import warnings\n'), ((18379, 18390), 'time.time', 'time.time', ([], {}), '()\n', (18388, 18390), False, 'import time\n'), ((18701, 18712), 'time.time', 'time.time', ([], {}), '()\n', (18710, 18712), False, 'import time\n'), ((19270, 19281), 'time.time', 'time.time', ([], {}), '()\n', (19279, 19281), False, 'import time\n'), ((3609, 3632), 'numpy.concatenate', 'np.concatenate', (['idx_tr_'], {}), '(idx_tr_)\n', (3623, 3632), True, 'import numpy as np\n'), ((6516, 6530), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (6522, 6530), True, 'import numpy as np\n'), ((11109, 11133), 'numpy.vstack', 'np.vstack', (['dict_Xoos[ho]'], {}), '(dict_Xoos[ho])\n', (11118, 11133), True, 'import numpy as np\n'), ((11146, 11169), 'numpy.vstack', 'np.vstack', (['dict_Xho[ho]'], {}), '(dict_Xho[ho])\n', (11155, 11169), True, 'import numpy as np\n'), ((15329, 15415), 'warnings.warn', 'warnings.warn', (['"""Hyperparameters need to be specified for training"""', 'RuntimeWarning'], {}), "('Hyperparameters need to be specified for training',\n RuntimeWarning)\n", (15342, 15415), False, 'import warnings\n'), ((5952, 5975), 'numpy.concatenate', 'np.concatenate', (['idx_tr_'], {}), '(idx_tr_)\n', (5966, 5975), True, 'import numpy as np\n'), ((6391, 6407), 'numpy.mean', 'np.mean', (['scores_'], {}), '(scores_)\n', (6398, 6407), True, 'import numpy as np\n'), ((16226, 16245), 'numpy.mean', 'np.mean', (['avg_scores'], {}), '(avg_scores)\n', (16233, 16245), True, 'import numpy as np\n')] |
import folium
import matplotlib
import numpy as np
from .config import config
from .analysis import (is_outlier, check_coords, filtered_heartrates,
elevation_summary, filter_median_average,
appropriate_partition, compute_distances_for_valid_trackpoints)
from .ui_text import d
from slither.core.unit_conversions import convert_m_to_km, convert_mps_to_kmph, minutes_from_start
def render_map(path):
"""Draw path on map with leaflet.js.
Parameters
----------
path : dict
A path that has at least the entries 'timestamps' and 'coords'.
Returns
-------
html : str
HTML representation of the rendered map.
"""
m = make_map(path)
return m.get_root().render()
def make_map(path):
"""Create folium map.
Parameters
----------
path : dict
Path with entry 'coords': latitude and longitude coordinates in radians
Returns
-------
m : folium.Map
Map with path
"""
coords = np.rad2deg(check_coords(path["coords"]))
if len(coords) == 0:
m = folium.Map()
else:
center = np.mean(coords, axis=0)
distance_markers = generate_distance_markers(path)
# TODO find a way to colorize path according to velocities (update docstring)
#valid_velocities = np.isfinite(path["velocities"])
#path["velocities"][np.logical_not(valid_velocities)] = 0.0
m = folium.Map(location=center)
folium.Marker(
coords[0].tolist(), tooltip="Start",
icon=folium.Icon(color="red", icon="flag")).add_to(m)
folium.Marker(
coords[-1].tolist(), tooltip="Finish",
icon=folium.Icon(color="green", icon="flag")).add_to(m)
for label, marker in distance_markers.items():
marker_location = coords[marker].tolist()
folium.Marker(
marker_location, tooltip=label,
icon=folium.Icon(color="blue", icon="flag")).add_to(m)
folium.PolyLine(coords).add_to(m)
south_west = np.min(coords, axis=0).tolist()
north_east = np.max(coords, axis=0).tolist()
folium.FitBounds([south_west, north_east]).add_to(m)
return m
def generate_distance_markers(path):
"""Generate indices of distance markers.
Parameters
----------
path : dict
A path that has at least the entries 'timestamps' and 'coords'.
Returns
-------
marker_indices : dict
Mapping of label (e.g., '1 km') to corresponding index of the path.
"""
distances, _ = compute_distances_for_valid_trackpoints(path)
total_distance = distances[-1]
marker_dist = appropriate_partition(total_distance)
thresholds = np.arange(marker_dist, int(total_distance), marker_dist)
indices = np.searchsorted(distances, thresholds)
marker_indices = {d.display_distance(threshold): index
for threshold, index in zip(thresholds, indices)}
return marker_indices
def plot_velocity_histogram(path, ax):
"""Plot velocity histogram.
Parameters
----------
path : dict
Path with entry 'velocities'
ax : Matplotlib axis
Axis on which we draw
"""
velocities = path["velocities"]
finite_velocities = np.isfinite(velocities)
velocities = velocities[finite_velocities]
if np.any(np.nonzero(velocities)):
no_outlier = np.logical_not(is_outlier(velocities))
velocities = convert_mps_to_kmph(velocities[no_outlier])
delta_ts = np.gradient(path["timestamps"])[finite_velocities][no_outlier]
ax.hist(velocities, bins=50, weights=delta_ts)
ax.set_xlabel("Velocity [km/h]")
ax.set_ylabel("Percentage")
ax.set_yticks(())
def plot_elevation(path, ax, filter=True):
"""Plot elevation over distance.
Parameters
----------
path : dict
Path with entries 'coords' and 'altitudes'
ax : Matplotlib axis
Axis on which we draw
filter : bool, optional (default: True)
Filter altitude data
"""
distances_in_m, valid_trackpoints = compute_distances_for_valid_trackpoints(path)
if len(distances_in_m) > 0:
distances_in_km = convert_m_to_km(distances_in_m)
total_distance_in_m = np.nanmax(distances_in_m)
altitudes = path["altitudes"][valid_trackpoints]
# TODO exactly 0 seems to be an indicator for an error, a better method would be to detect jumps
valid_altitudes = np.logical_and(np.isfinite(altitudes), altitudes != 0.0)
distances_in_km = distances_in_km[valid_altitudes]
altitudes = altitudes[valid_altitudes]
if len(altitudes) == 0:
return
if filter:
altitudes = filter_median_average(altitudes, config["plot"]["filter_width"])
gain, loss, slope_in_percent = elevation_summary(altitudes, total_distance_in_m)
ax.set_title(f"Elevation gain: {int(np.round(gain, 0))} m, "
f"loss: {int(np.round(loss, 0))} m, "
f"slope {np.round(slope_in_percent, 2)}%")
ax.fill_between(distances_in_km, np.zeros_like(altitudes), altitudes, alpha=0.3)
ax.plot(distances_in_km, altitudes)
ax.set_xlim((0, convert_m_to_km(total_distance_in_m)))
ax.set_ylim((min(altitudes), 1.1 * max(altitudes)))
ax.set_xlabel("Distance [km]")
ax.set_ylabel("Elevation [m]")
def plot_speed_heartrate(vel_axis, hr_axis, path):
"""Plot velocities and heartrates over time.
Parameters
----------
vel_axis : Matplotlib axis
Axis on which we draw velocities
hr_axis : Matplotlib axis
Axis on which we draw heartrate data
path : dict
Path with entries 'timestamps', 'velocities', and 'heartrates'
Returns
-------
handles : list
Line handles to create a legend
labels : list
Labels for lines to create a legend
"""
time_in_min = minutes_from_start(path["timestamps"])
velocities = convert_mps_to_kmph(
filter_median_average(path["velocities"], config["plot"]["filter_width"]))
heartrates = filtered_heartrates(path, config["plot"]["filter_width"])
matplotlib.rcParams["font.size"] = 10
matplotlib.rcParams["legend.fontsize"] = 10
handles = []
labels = []
if np.isfinite(velocities).any():
vel_line, = vel_axis.plot(time_in_min, velocities, color="#4f86f7",
alpha=0.8, lw=2)
handles.append(vel_line)
labels.append("Velocity")
vel_axis.set_xlim((time_in_min[0], time_in_min[-1]))
median_velocity = np.nanmedian(velocities)
max_velocity = np.nanmax(velocities)
vel_axis.set_ylim((0, max(2 * median_velocity, max_velocity)))
vel_axis.set_xlabel("Time [min]")
vel_axis.set_ylabel("Velocity [km/h]")
vel_axis.tick_params(axis="both", which="both", length=0)
vel_axis.grid(True)
else:
vel_axis.set_yticks(())
if np.isfinite(heartrates).any():
median_heartrate = np.nanmedian(heartrates)
hr_line, = hr_axis.plot(time_in_min, heartrates, color="#a61f34",
alpha=0.8, lw=2)
handles.append(hr_line)
labels.append("Heart Rate")
hr_axis.set_ylim((0, 2 * median_heartrate))
hr_axis.set_ylabel("Heart Rate [bpm]")
hr_axis.tick_params(axis="both", which="both", length=0)
hr_axis.spines["top"].set_visible(False)
else:
hr_axis.set_yticks(())
hr_axis.grid(False)
return handles, labels
| [
"numpy.zeros_like",
"numpy.nanmedian",
"slither.core.unit_conversions.convert_m_to_km",
"numpy.searchsorted",
"numpy.isfinite",
"slither.core.unit_conversions.convert_mps_to_kmph",
"numpy.nonzero",
"numpy.min",
"numpy.mean",
"numpy.max",
"slither.core.unit_conversions.minutes_from_start",
"fol... | [((2818, 2856), 'numpy.searchsorted', 'np.searchsorted', (['distances', 'thresholds'], {}), '(distances, thresholds)\n', (2833, 2856), True, 'import numpy as np\n'), ((3295, 3318), 'numpy.isfinite', 'np.isfinite', (['velocities'], {}), '(velocities)\n', (3306, 3318), True, 'import numpy as np\n'), ((5972, 6010), 'slither.core.unit_conversions.minutes_from_start', 'minutes_from_start', (["path['timestamps']"], {}), "(path['timestamps'])\n", (5990, 6010), False, 'from slither.core.unit_conversions import convert_m_to_km, convert_mps_to_kmph, minutes_from_start\n'), ((1100, 1112), 'folium.Map', 'folium.Map', ([], {}), '()\n', (1110, 1112), False, 'import folium\n'), ((1140, 1163), 'numpy.mean', 'np.mean', (['coords'], {'axis': '(0)'}), '(coords, axis=0)\n', (1147, 1163), True, 'import numpy as np\n'), ((1451, 1478), 'folium.Map', 'folium.Map', ([], {'location': 'center'}), '(location=center)\n', (1461, 1478), False, 'import folium\n'), ((3380, 3402), 'numpy.nonzero', 'np.nonzero', (['velocities'], {}), '(velocities)\n', (3390, 3402), True, 'import numpy as np\n'), ((3486, 3529), 'slither.core.unit_conversions.convert_mps_to_kmph', 'convert_mps_to_kmph', (['velocities[no_outlier]'], {}), '(velocities[no_outlier])\n', (3505, 3529), False, 'from slither.core.unit_conversions import convert_m_to_km, convert_mps_to_kmph, minutes_from_start\n'), ((4221, 4252), 'slither.core.unit_conversions.convert_m_to_km', 'convert_m_to_km', (['distances_in_m'], {}), '(distances_in_m)\n', (4236, 4252), False, 'from slither.core.unit_conversions import convert_m_to_km, convert_mps_to_kmph, minutes_from_start\n'), ((4283, 4308), 'numpy.nanmax', 'np.nanmax', (['distances_in_m'], {}), '(distances_in_m)\n', (4292, 4308), True, 'import numpy as np\n'), ((6653, 6677), 'numpy.nanmedian', 'np.nanmedian', (['velocities'], {}), '(velocities)\n', (6665, 6677), True, 'import numpy as np\n'), ((6701, 6722), 'numpy.nanmax', 'np.nanmax', (['velocities'], {}), '(velocities)\n', (6710, 6722), True, 'import numpy as np\n'), ((7085, 7109), 'numpy.nanmedian', 'np.nanmedian', (['heartrates'], {}), '(heartrates)\n', (7097, 7109), True, 'import numpy as np\n'), ((4513, 4535), 'numpy.isfinite', 'np.isfinite', (['altitudes'], {}), '(altitudes)\n', (4524, 4535), True, 'import numpy as np\n'), ((5145, 5169), 'numpy.zeros_like', 'np.zeros_like', (['altitudes'], {}), '(altitudes)\n', (5158, 5169), True, 'import numpy as np\n'), ((6340, 6363), 'numpy.isfinite', 'np.isfinite', (['velocities'], {}), '(velocities)\n', (6351, 6363), True, 'import numpy as np\n'), ((7027, 7050), 'numpy.isfinite', 'np.isfinite', (['heartrates'], {}), '(heartrates)\n', (7038, 7050), True, 'import numpy as np\n'), ((2022, 2045), 'folium.PolyLine', 'folium.PolyLine', (['coords'], {}), '(coords)\n', (2037, 2045), False, 'import folium\n'), ((2077, 2099), 'numpy.min', 'np.min', (['coords'], {'axis': '(0)'}), '(coords, axis=0)\n', (2083, 2099), True, 'import numpy as np\n'), ((2130, 2152), 'numpy.max', 'np.max', (['coords'], {'axis': '(0)'}), '(coords, axis=0)\n', (2136, 2152), True, 'import numpy as np\n'), ((2170, 2212), 'folium.FitBounds', 'folium.FitBounds', (['[south_west, north_east]'], {}), '([south_west, north_east])\n', (2186, 2212), False, 'import folium\n'), ((3549, 3580), 'numpy.gradient', 'np.gradient', (["path['timestamps']"], {}), "(path['timestamps'])\n", (3560, 3580), True, 'import numpy as np\n'), ((5261, 5297), 'slither.core.unit_conversions.convert_m_to_km', 'convert_m_to_km', (['total_distance_in_m'], {}), '(total_distance_in_m)\n', (5276, 5297), False, 'from slither.core.unit_conversions import convert_m_to_km, convert_mps_to_kmph, minutes_from_start\n'), ((5070, 5099), 'numpy.round', 'np.round', (['slope_in_percent', '(2)'], {}), '(slope_in_percent, 2)\n', (5078, 5099), True, 'import numpy as np\n'), ((1568, 1605), 'folium.Icon', 'folium.Icon', ([], {'color': '"""red"""', 'icon': '"""flag"""'}), "(color='red', icon='flag')\n", (1579, 1605), False, 'import folium\n'), ((1708, 1747), 'folium.Icon', 'folium.Icon', ([], {'color': '"""green"""', 'icon': '"""flag"""'}), "(color='green', icon='flag')\n", (1719, 1747), False, 'import folium\n'), ((4956, 4973), 'numpy.round', 'np.round', (['gain', '(0)'], {}), '(gain, 0)\n', (4964, 4973), True, 'import numpy as np\n'), ((5015, 5032), 'numpy.round', 'np.round', (['loss', '(0)'], {}), '(loss, 0)\n', (5023, 5032), True, 'import numpy as np\n'), ((1964, 2002), 'folium.Icon', 'folium.Icon', ([], {'color': '"""blue"""', 'icon': '"""flag"""'}), "(color='blue', icon='flag')\n", (1975, 2002), False, 'import folium\n')] |
import numpy as np
def pair_metric(rates, natural_rates):
metric = -np.sum(rates.rates*(natural_rates.rates+1e-8))
return metric
| [
"numpy.sum"
] | [((73, 124), 'numpy.sum', 'np.sum', (['(rates.rates * (natural_rates.rates + 1e-08))'], {}), '(rates.rates * (natural_rates.rates + 1e-08))\n', (79, 124), True, 'import numpy as np\n')] |
import matplotlib.pyplot as plt
import networkx as nx
from networkx.drawing.nx_pydot import graphviz_layout
import bitarray as ba
import numpy as np
from src.tangles import Tangle, core_algorithm
from src.utils import matching_items, Orientation
MAX_CLUSTERS = 50
class TangleNode(object):
def __init__(self, parent, right_child, left_child, is_left_child, splitting,
did_split, last_cut_added_id, last_cut_added_orientation, tangle):
self.parent = parent
self.right_child = right_child
self.left_child = left_child
self.is_left_child = is_left_child
self.splitting = splitting
self.did_split = did_split
self.last_cut_added_id = last_cut_added_id
self.last_cut_added_orientation = last_cut_added_orientation
self.tangle = tangle
def __str__(self, height=0): # pragma: no cover
if self.parent is None:
string = 'Root'
else:
padding = ' '
string = '{}{} -> {}'.format(padding * height, self.last_cut_added_id, self.last_cut_added_orientation)
if self.left_child is not None:
string += '\n'
string += self.left_child.__str__(height=height + 1)
if self.right_child is not None:
string += '\n'
string += self.right_child.__str__(height=height + 1)
return string
def is_leaf(self):
return self.left_child is None and self.right_child is None
class ContractedTangleNode(TangleNode):
def __init__(self, parent, node):
attributes = node.__dict__
super().__init__(**attributes)
self.parent = parent
self.right_child = None
self.left_child = None
self.characterizing_cuts = None
self.characterizing_cuts_left = None
self.characterizing_cuts_right = None
self.is_left_child_deleted = False
self.is_right_child_deleted = False
self.p = None
def __str__(self, height=0): # pragma: no cover
string = ""
if self.parent is None:
string += 'Root\n'
padding = ' '
string_cuts = ['{} -> {}'.format(k, v) for k, v in self.characterizing_cuts_left.items()] \
if self.characterizing_cuts_left is not None else ''
string += '{}{} left: {}\n'.format(padding * height, self.last_cut_added_id, string_cuts)
string_cuts = ['{} -> {}'.format(k, v) for k, v in self.characterizing_cuts_right.items()] \
if self.characterizing_cuts_right is not None else ''
string += '{}{} right: {}\n'.format(padding * height, self.last_cut_added_id, string_cuts)
if self.left_child is not None:
string += '\n'
string += self.left_child.__str__(height=height + 1)
if self.right_child is not None:
string += '\n'
string += self.right_child.__str__(height=height + 1)
return string
# created new TangleNode and adds it as child to current node
def _add_new_child(current_node, tangle, last_cut_added_id, last_cut_added_orientation, did_split):
new_node = TangleNode(parent=current_node,
right_child=None,
left_child=None,
is_left_child=last_cut_added_orientation,
splitting=False,
did_split=did_split,
last_cut_added_id=last_cut_added_id,
last_cut_added_orientation=last_cut_added_orientation,
tangle=tangle)
if new_node.is_left_child:
current_node.left_child = new_node
else:
current_node.right_child = new_node
return new_node
class TangleTree(object):
def __init__(self, agreement, max_clusters=None):
self.root = TangleNode(parent=None,
right_child=None,
left_child=None,
splitting=None,
is_left_child=None,
did_split=True,
last_cut_added_id=-1,
last_cut_added_orientation=None,
tangle=Tangle())
self.max_clusters = max_clusters
self.active = [self.root]
self.maximals = []
self.will_split = []
self.is_empty = True
self.agreement = agreement
def __str__(self): # pragma: no cover
return str(self.root)
# function to add a single cut to the tree
# function checks if tree is empty
# --- stops if number of active leaves gets too large ! ---
def add_cut(self, cut, cut_id):
if self.max_clusters and len(self.active) >= self.max_clusters:
print('Stopped since there are more then 50 leaves already.')
return False
current_active = self.active
self.active = []
could_add_one = False
for current_node in current_active:
could_add_node, did_split, is_maximal = self._add_children_to_node(current_node, cut, cut_id)
could_add_one = could_add_one or could_add_node
if did_split:
current_node.splitting = True
self.will_split.append(current_node)
elif is_maximal:
self.maximals.append(current_node)
if could_add_one:
self.is_empty = False
return could_add_one
def _add_children_to_node(self, current_node, cut, cut_id):
old_tangle = current_node.tangle
if cut.dtype is not bool:
cut = cut.astype(bool)
new_tangle_true = old_tangle.add(new_cut=ba.bitarray(cut.tolist()),
new_cut_specification={cut_id: True},
min_size=self.agreement)
new_tangle_false = old_tangle.add(new_cut=ba.bitarray((~cut).tolist()),
new_cut_specification={cut_id: False},
min_size=self.agreement)
could_add_one = False
if new_tangle_true is not None and new_tangle_false is not None:
did_split = True
else:
did_split = False
if new_tangle_true is None and new_tangle_false is None:
is_maximal = True
else:
is_maximal = False
if new_tangle_true is not None:
could_add_one = True
new_node = _add_new_child(current_node=current_node,
tangle=new_tangle_true,
last_cut_added_id=cut_id,
last_cut_added_orientation=True,
did_split=did_split)
self.active.append(new_node)
if new_tangle_false is not None:
could_add_one = True
new_node = _add_new_child(current_node=current_node,
tangle=new_tangle_false,
last_cut_added_id=cut_id,
last_cut_added_orientation=False,
did_split=did_split)
self.active.append(new_node)
return could_add_one, did_split, is_maximal
def plot_tree(self, path=None): # pragma: no cover
tree = nx.Graph()
labels = self._add_node_to_nx(tree, self.root)
pos = graphviz_layout(tree, prog='dot')
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(20, 10))
nx.draw_networkx(tree, pos=pos, ax=ax, labels=labels, node_size=1500)
if path is not None:
plt.savefig(path)
else:
plt.show()
def _add_node_to_nx(self, tree, node, parent_id=None, direction=None): # pragma: no cover
if node.parent is None:
my_id = 'root'
my_label = 'Root'
tree.add_node(my_id)
else:
my_id = parent_id + direction
str_o = 'T' if node.last_cut_added_orientation else 'F'
my_label = '{} -> {}'.format(node.last_cut_added_id, str_o)
tree.add_node(my_id)
tree.add_edge(my_id, parent_id)
labels = {my_id: my_label}
if node.left_child is not None:
left_labels = self._add_node_to_nx(tree, node.left_child, parent_id=my_id, direction='left')
labels = {**labels, **left_labels}
if node.right_child is not None:
right_labels = self._add_node_to_nx(tree, node.right_child, parent_id=my_id, direction='right')
labels = {**labels, **right_labels}
return labels
class ContractedTangleTree(TangleTree):
# noinspection PyMissingConstructor
def __init__(self, tree):
self.is_empty = tree.is_empty
self.processed_soft_prediction = False
self.maximals = []
self.splitting = []
self.root = self._contract_subtree(parent=None, node=tree.root)
def __str__(self): # pragma: no cover
return str(self.root)
def prune(self, prune_depth=1):
self._delete_noise_clusters(self.root, depth=prune_depth)
print("\t{} clusters after cutting out short paths.".format(len(self.maximals)))
def _delete_noise_clusters(self, node, depth):
if depth == 0:
return
if node.is_leaf():
if node.parent is None:
Warning("This node is a leaf and the root at the same time. This tree is empty!")
else:
node_id = node.last_cut_added_id
parent_id = node.parent.last_cut_added_id
diff = node_id - parent_id
if diff <= depth:
self.maximals.remove(node)
node.parent.splitting = False
if node.is_left_child:
node.parent.left_child = None
node.parent.is_left_child_deleted = True
else:
node.parent.right_child = None
if node.parent.is_left_child_deleted:
self.maximals.append(node.parent)
self._delete_noise_clusters(node.parent, depth)
else:
self._delete_noise_clusters(node.left_child, depth)
if not node.splitting:
self.splitting.remove(node)
self._delete_noise_clusters(node.right_child, depth)
if not node.splitting:
if node.parent is None:
if node.right_child is not None:
self.root = node.right_child
self.root.parent = None
elif node.left_child is not None:
self.root = node.left_child
self.root.parent = None
else:
if node in self.splitting:
self.splitting.remove(node)
if node.is_left_child:
node.parent.left_child = node.left_child
else:
node.parent.right_child = node.left_child
else:
if node.right_child is not None:
if node.is_left_child:
node.parent.left_child = node.right_child
else:
node.parent.right_child = node.right_child
def calculate_setP(self):
self._calculate_characterizing_cuts(self.root)
def _calculate_characterizing_cuts(self, node):
if node.left_child is None and node.right_child is None:
node.characterizing_cuts = dict()
return
else:
if node.left_child is not None and node.right_child is not None:
self._calculate_characterizing_cuts(node.left_child)
self._calculate_characterizing_cuts(node.right_child)
process_split(node)
return
def _contract_subtree(self, parent, node):
if node.left_child is None and node.right_child is None:
# is leaf so create new node
contracted_node = ContractedTangleNode(parent=parent, node=node)
self.maximals.append(contracted_node)
return contracted_node
elif node.left_child is not None and node.right_child is not None:
# is splitting so create new node
contracted_node = ContractedTangleNode(parent=parent, node=node)
contracted_left_child = self._contract_subtree(parent=contracted_node, node=node.left_child)
contracted_node.left_child = contracted_left_child
# let it know that it is a left child!
contracted_node.left_child.is_left_child = True
contracted_right_child = self._contract_subtree(parent=contracted_node, node=node.right_child)
contracted_node.right_child = contracted_right_child
# let it know that it is a right child!
contracted_node.right_child.is_left_child = False
self.splitting.append(contracted_node)
return contracted_node
else:
if node.left_child is not None:
return self._contract_subtree(parent=parent, node=node.left_child)
if node.right_child is not None:
return self._contract_subtree(parent=parent, node=node.right_child)
def process_split(node):
node_id = node.last_cut_added_id if node.last_cut_added_id else -1
characterizing_cuts_left = node.left_child.characterizing_cuts
characterizing_cuts_right = node.right_child.characterizing_cuts
orientation_left = node.left_child.tangle.specification
orientation_right = node.right_child.tangle.specification
# add new relevant cuts
for id_cut in range(node_id + 1, node.left_child.last_cut_added_id + 1):
characterizing_cuts_left[id_cut] = Orientation(orientation_left[id_cut])
for id_cut in range(node_id + 1, node.right_child.last_cut_added_id + 1):
characterizing_cuts_right[id_cut] = Orientation(orientation_right[id_cut])
id_not_in_both = (characterizing_cuts_left.keys() | characterizing_cuts_right.keys()) \
.difference(characterizing_cuts_left.keys() & characterizing_cuts_right.keys())
# if cuts are not oriented in both subtrees delete
for id_cut in id_not_in_both:
characterizing_cuts_left.pop(id_cut, None)
characterizing_cuts_right.pop(id_cut, None)
# characterizing cuts of the current node
characterizing_cuts = {**characterizing_cuts_left, **characterizing_cuts_right}
id_cuts_oriented_same_way = matching_items(characterizing_cuts_left, characterizing_cuts_right)
# if they are oriented in the same way they are not relevant for distungishing but might be for 'higher' nodes
# delete in the left and right parts but keep in the characteristics of the current node
for id_cut in id_cuts_oriented_same_way:
characterizing_cuts[id_cut] = characterizing_cuts_left[id_cut]
characterizing_cuts_left.pop(id_cut)
characterizing_cuts_right.pop(id_cut)
id_cuts_oriented_both_ways = characterizing_cuts_left.keys() & characterizing_cuts_right.keys()
# remove the cuts that are oriented in both trees but in different directions from the current node since they do
# not affect higher nodes anymore
for id_cut in id_cuts_oriented_both_ways:
characterizing_cuts.pop(id_cut)
node.characterizing_cuts_left = characterizing_cuts_left
node.characterizing_cuts_right = characterizing_cuts_right
node.characterizing_cuts = characterizing_cuts
def compute_soft_predictions_node(characterizing_cuts, cuts, weight):
sum_p = np.zeros(len(cuts.values[0]))
for i, o in characterizing_cuts.items():
if o.direction == 'left':
sum_p += np.array(cuts.values[i]) * weight[i]
elif o.direction == 'right':
sum_p += np.array(~cuts.values[i]) * weight[i]
return sum_p
def compute_soft_predictions_children(node, cuts, weight, verbose=0):
_, nb_points = cuts.values.shape
if node.parent is None:
node.p = np.ones(nb_points)
if node.left_child is not None and node.right_child is not None:
unnormalized_p_left = compute_soft_predictions_node(characterizing_cuts=node.characterizing_cuts_left,
cuts=cuts,
weight=weight)
unnormalized_p_right = compute_soft_predictions_node(characterizing_cuts=node.characterizing_cuts_right,
cuts=cuts,
weight=weight)
# normalize the ps
total_p = unnormalized_p_left + unnormalized_p_right
p_left = unnormalized_p_left / total_p
p_right = unnormalized_p_right / total_p
node.left_child.p = p_left * node.p
node.right_child.p = p_right * node.p
compute_soft_predictions_children(node=node.left_child,
cuts=cuts,
weight=weight,
verbose=verbose)
compute_soft_predictions_children(node=node.right_child,
cuts=cuts,
weight=weight,
verbose=verbose)
def tangle_computation(cuts, agreement, verbose):
"""
Parameters
----------
cuts: cuts
agreement: int
The agreement parameter
verbose:
verbosity level
Returns
-------
tangles_tree: TangleTree
The tangle search tree
"""
if verbose >= 2:
print("Using agreement = {} \n".format(agreement))
print("Start tangle computation", flush=True)
tangles_tree = TangleTree(agreement=agreement)
old_order = None
unique_orders = np.unique(cuts.costs)
for order in unique_orders:
if old_order is None:
idx_cuts_order_i = np.where(cuts.costs <= order)[0]
else:
idx_cuts_order_i = np.where(np.all([cuts.costs > old_order,
cuts.costs <= order], axis=0))[0]
if len(idx_cuts_order_i) > 0:
if verbose >= 2:
print("\tCompute tangles of order {} with {} new cuts".format(order, len(idx_cuts_order_i)), flush=True)
cuts_order_i = cuts.values[idx_cuts_order_i]
new_tree = core_algorithm(tree=tangles_tree,
current_cuts=cuts_order_i,
idx_current_cuts=idx_cuts_order_i)
if new_tree is None:
max_order = cuts.costs[-1]
if verbose >= 2:
print('\t\tI could not add all the new cuts due to inconsistency')
print('\n\tI stopped the computation at order {} instead of {}'.format(old_order, max_order),
flush=True)
break
else:
tangles_tree = new_tree
if verbose >= 2:
print("\t\tI found {} tangles of order less or equal {}".format(len(new_tree.active), order),
flush=True)
old_order = order
if tangles_tree is not None:
tangles_tree.maximals += tangles_tree.active
print("\t{} leaves before cutting out short paths.".format(len(tangles_tree.maximals)))
return tangles_tree
| [
"src.tangles.core_algorithm",
"matplotlib.pyplot.savefig",
"src.tangles.Tangle",
"matplotlib.pyplot.show",
"src.utils.matching_items",
"numpy.ones",
"networkx.draw_networkx",
"networkx.drawing.nx_pydot.graphviz_layout",
"numpy.all",
"numpy.where",
"networkx.Graph",
"numpy.array",
"src.utils.... | [((14865, 14932), 'src.utils.matching_items', 'matching_items', (['characterizing_cuts_left', 'characterizing_cuts_right'], {}), '(characterizing_cuts_left, characterizing_cuts_right)\n', (14879, 14932), False, 'from src.utils import matching_items, Orientation\n'), ((18259, 18280), 'numpy.unique', 'np.unique', (['cuts.costs'], {}), '(cuts.costs)\n', (18268, 18280), True, 'import numpy as np\n'), ((7467, 7477), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (7475, 7477), True, 'import networkx as nx\n'), ((7548, 7581), 'networkx.drawing.nx_pydot.graphviz_layout', 'graphviz_layout', (['tree'], {'prog': '"""dot"""'}), "(tree, prog='dot')\n", (7563, 7581), False, 'from networkx.drawing.nx_pydot import graphviz_layout\n'), ((7601, 7649), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(1)', 'figsize': '(20, 10)'}), '(nrows=1, ncols=1, figsize=(20, 10))\n', (7613, 7649), True, 'import matplotlib.pyplot as plt\n'), ((7658, 7727), 'networkx.draw_networkx', 'nx.draw_networkx', (['tree'], {'pos': 'pos', 'ax': 'ax', 'labels': 'labels', 'node_size': '(1500)'}), '(tree, pos=pos, ax=ax, labels=labels, node_size=1500)\n', (7674, 7727), True, 'import networkx as nx\n'), ((14127, 14164), 'src.utils.Orientation', 'Orientation', (['orientation_left[id_cut]'], {}), '(orientation_left[id_cut])\n', (14138, 14164), False, 'from src.utils import matching_items, Orientation\n'), ((14288, 14326), 'src.utils.Orientation', 'Orientation', (['orientation_right[id_cut]'], {}), '(orientation_right[id_cut])\n', (14299, 14326), False, 'from src.utils import matching_items, Orientation\n'), ((16390, 16408), 'numpy.ones', 'np.ones', (['nb_points'], {}), '(nb_points)\n', (16397, 16408), True, 'import numpy as np\n'), ((7769, 7786), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (7780, 7786), True, 'import matplotlib.pyplot as plt\n'), ((7813, 7823), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7821, 7823), True, 'import matplotlib.pyplot as plt\n'), ((18848, 18947), 'src.tangles.core_algorithm', 'core_algorithm', ([], {'tree': 'tangles_tree', 'current_cuts': 'cuts_order_i', 'idx_current_cuts': 'idx_cuts_order_i'}), '(tree=tangles_tree, current_cuts=cuts_order_i,\n idx_current_cuts=idx_cuts_order_i)\n', (18862, 18947), False, 'from src.tangles import Tangle, core_algorithm\n'), ((4281, 4289), 'src.tangles.Tangle', 'Tangle', ([], {}), '()\n', (4287, 4289), False, 'from src.tangles import Tangle, core_algorithm\n'), ((16084, 16108), 'numpy.array', 'np.array', (['cuts.values[i]'], {}), '(cuts.values[i])\n', (16092, 16108), True, 'import numpy as np\n'), ((18376, 18405), 'numpy.where', 'np.where', (['(cuts.costs <= order)'], {}), '(cuts.costs <= order)\n', (18384, 18405), True, 'import numpy as np\n'), ((16179, 16204), 'numpy.array', 'np.array', (['(~cuts.values[i])'], {}), '(~cuts.values[i])\n', (16187, 16204), True, 'import numpy as np\n'), ((18463, 18524), 'numpy.all', 'np.all', (['[cuts.costs > old_order, cuts.costs <= order]'], {'axis': '(0)'}), '([cuts.costs > old_order, cuts.costs <= order], axis=0)\n', (18469, 18524), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
import scipy as sp
from datetime import datetime
from datetime import timezone
import maria
atmosphere_config = {'n_layers' : 32, # how many layers to simulate, based on the integrated atmospheric model
'min_depth' : 100, # the height of the first layer
'max_depth' : 3000, #
'rel_atm_rms' : 5e-1,
'outer_scale' : 500}
pointing_config = {'scan_type' : 'lissajous_box',
'duration' : 300,'samp_freq' : 50,
'center_azim' : -45, 'center_elev' : 45,
'x_throw' : 5, 'x_period' : 21,
'y_throw' : 5, 'y_period' : 29}
pointing_config = {'scan_type' : 'CES',
'duration' : 60, 'samp_freq' : 20,
'center_azim' : 55, 'center_elev' : 45,
'az_throw' : 15, 'az_speed' : 1}
n_per = 128
nom_bands = np.r_[3.8e10*np.ones(n_per),
9.6e10*np.ones(n_per),
2.2e11*np.ones(n_per),
1.5e11*np.ones(n_per)][n_per:]
#bands = 1.5e11*np.ones(240)
#np.random.shuffle(bands)
#bands = 1.5e11 * np.ones(n_per)
bandwidths = 2.5e-1 * nom_bands
wns = 1e-1 #* np.sqrt(nom_bands / nom_bands.min())
array_config = {'shape' : 'flower',
'n' : len(nom_bands),
'fov' : 1.,
'nom_bands' : nom_bands,
'bandwidths' : bandwidths,
'white_noise' : wns}
beams_config = {'optical_type' : 'diff_lim',
'primary_size' : 5,
'beam_model' : 'top_hat',
'min_beam_res' : 1 }
site_config = {'site' : 'ACT',
'time' : datetime.now(timezone.utc).timestamp(),
'weather_gen_method' : 'mean',
'pwv' : 1,
'region' : 'atacama' }
heights = np.linspace(0,10000,100)
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['figure.dpi'] = 256
def equalize(ax):
xls,yls = ax.get_xlim(),ax.get_ylim()
x0,y0 = np.mean(xls), np.mean(yls)
r = np.maximum(-np.subtract(*xls),-np.subtract(*yls))/2
ax.set_xlim(x0-r,x0+r); ax.set_ylim(y0-r,y0+r)
return ax
new = True
sim = True
tm = maria.model(atmosphere_config=atmosphere_config,
pointing_config=pointing_config,
beams_config=beams_config,
array_config=array_config,
site_config=site_config,
verbose=True)
tm.sim()
data = tm.tot_data
pair_lags, clust_x, clust_y = maria.get_pair_lags(data,
tm.array.x,
tm.array.y,
tm.pointing.time,
tm.pointing.focal_azim,
tm.pointing.focal_elev,
n_clusters=6,
sub_scan_durs=[10])
lag_flat = lambda dz, vx, vy : - (vx*np.real(dz) + vy*np.imag(dz)) / np.square(np.abs(vx+1j*vy) + 1e-16)
max_vel = 2
max_lag = 2
vel_pars = maria.fit_pair_lags(pair_lags, clust_x, clust_y, max_lag=max_lag, max_vel=max_vel)
fig,axes = plt.subplots(1,2,figsize=(16,8))
OX, OY = np.subtract.outer(clust_x, clust_x), np.subtract.outer(clust_y, clust_y)
OZ = OX + 1j*OY
OA, OR = np.angle(OZ), np.degrees(np.abs(OZ) + 1e-16)
a_ = np.linspace(-np.pi,np.pi,64)
for i_spl in range(pair_lags.shape[0]):
axes[0].scatter(OA, pair_lags[i_spl]/OR)
axes[0].plot(a_,lag_flat(np.exp(1j*a_),*vel_pars[i_spl]) / np.degrees(1),label=f'{i_spl}')
axes[1].scatter(*np.degrees(vel_pars[i_spl]))
axes[0].legend()
axes[0].set_ylim(-max_lag,max_lag)
axes[1].plot([-max_vel,max_vel],[0,0],color='k')
axes[1].plot([0,0],[-max_vel,max_vel],color='k')
axes[1].set_xlim(-max_vel,max_vel), axes[1].set_ylim(-max_vel,max_vel)
#assert False
exec(open('/Users/thomas/Desktop/atmosphere/mpl_defs').read())
do_clouds = True
if do_clouds:
i = 0
fig,ax = plt.subplots(1,1,figsize=(8,8))
ax.pcolormesh(np.degrees(tm.X[i]),
np.degrees(tm.Y[i]),
tm.vals[i],shading='none',cmap='RdBu_r')
ax.scatter(np.degrees(np.real(tm.pointing.theta_edge_z[i]).T),
np.degrees(np.imag(tm.pointing.theta_edge_z[i]).T),s=1e-1,c='k')
ax.plot(np.degrees(np.real(tm.pointing.focal_theta_z[i])),
np.degrees(np.imag(tm.pointing.focal_theta_z[i])),c='k')
equalize(ax)
data_fig, data_axes = plt.subplots(3,1,figsize=(12,8),constrained_layout=True,sharex=True)
spec_fig, spec_axes = plt.subplots(1,2,figsize=(10,6),constrained_layout=True,sharey=True)
band_fig, band_axes = plt.subplots(1,2,figsize=(12,6),constrained_layout=True)
nf = 256
freq = np.fft.fftfreq(tm.pointing.nt,tm.pointing.dt)
fmids = np.geomspace(2/tm.pointing.duration,freq.max(),nf)
rfreq = np.exp(np.gradient(np.log(fmids))).mean()
fbins = np.append(fmids/np.sqrt(rfreq),fmids[-1]*np.sqrt(rfreq))
pt = np.linspace(0,2*np.pi,16)
from matplotlib.patches import Patch
from matplotlib.collections import EllipseCollection
handles = []
for iband, band in enumerate(tm.array.nom_band_list[::-1]):
color = mpl.cm.get_cmap('plasma')(.9*(len(tm.array.nom_band_list)-iband-1)/(1e-6+len(tm.array.nom_band_list)-1))
handles.append(Patch(label=f'{np.round(band*1e-9,1)} GHz',color=color))
bm = tm.array.nom_bands==band
hwhm = 1.22 * (2.998e8 / band) / tm.beams.aperture / 2
# PLOT DATA
for idet, det in enumerate(np.random.choice(data[bm].shape[0],4,replace=False)):
data_axes[0].plot(tm.pointing.time,tm.epwv[bm][det],color=color)
data_axes[1].plot(tm.pointing.time,1e-3*data[bm][det],color=color)
data_axes[2].plot(tm.pointing.time,data[bm][det]-data[bm][det].mean(axis=0),color=color)
# PLOT SPECTRA
ps = np.square(np.abs(np.fft.fft(data[bm] * np.hanning(data[bm].shape[-1])[None,:],axis=-1)))
ps *= (2*tm.pointing.dt/np.hanning(data[bm].shape[-1]).sum())
ncps = np.square(np.abs(np.fft.fft((data[bm]-data[bm].mean(axis=0)) * np.hanning(data[bm].shape[-1])[None,:],axis=-1)))
ncps *= (2*tm.pointing.dt/np.hanning(data[bm].shape[-1]).sum())
mps = ps.mean(axis=0); bmps = sp.stats.binned_statistic(freq,mps,bins=fbins,statistic='mean')[0]
ncmps = ncps.mean(axis=0); ncbmps = sp.stats.binned_statistic(freq,ncmps,bins=fbins,statistic='mean')[0]
nn = ~np.isnan(bmps)
spec_axes[0].plot(fmids[nn],bmps[nn],color=color)
spec_axes[1].plot(fmids[nn],ncbmps[nn],color=color)
# PLOT BANDS
band_axes[0].plot(np.degrees(tm.array.x[bm][None,:] + hwhm*np.cos(pt[:,None])),
np.degrees(tm.array.y[bm][None,:] + hwhm*np.sin(pt[:,None])),
lw=1e0,color=color)
ec = EllipseCollection(np.degrees(2*hwhm)*np.ones(bm.sum()),
np.degrees(2*hwhm)*np.ones(bm.sum()),
np.zeros(bm.sum()), units='x',
offsets=np.degrees(np.column_stack([tm.array.x[bm],tm.array.y[bm]])),
transOffset=band_axes[0].transData,
color=color,alpha=.5)
band_axes[0].add_collection(ec)
band_axes[0].set_xlabel(r'$\theta_x$ (degrees)'); band_axes[0].set_ylabel(r'$\theta_y$ (degrees)')
i_epwv = np.argmin(np.abs(tm.atmosphere.spectra_dict['epwv'] - tm.site.weather['pwv']))
i_elev = np.argmin(np.abs(tm.atmosphere.spectra_dict['elev'] - tm.pointing.elev.mean()))
spectrum = tm.array.band_pass_list[iband] * np.interp(tm.array.band_freq_list[iband],
tm.atmosphere.spectra_dict['freq'],
tm.atmosphere.spectra_dict['temp'][i_elev,i_epwv])
band_axes[1].plot(1e-9*tm.array.band_freq_list[iband], spectrum, color=color, lw=5, alpha=.5)
#band_axes[1].scatter(1e-9*tm.array.band_freq_list[iband], spectrum, color=color, s=16)
data_axes[2].set_xlabel('$t$ (s)')
data_axes[0].set_ylabel(r'$P_\mathrm{eff}(t)$ (mm)')
data_axes[1].set_ylabel(r'$T_\mathrm{atm}(t)$ (K$_\mathrm{CMB}$)')
data_axes[2].set_ylabel(r'$\Delta T_\mathrm{atm}(t)$ (mK$_\mathrm{CMB}$)')
spec_axes[0].plot(fmids[nn],(bmps[nn][-1]/1e0**(-8/3))*fmids[nn]**(-8/3),color='k')
spec_axes[1].plot(fmids[nn],(ncbmps[nn][-1]/1e0**(-8/3))*fmids[nn]**(-8/3),color='k')
spec_axes[0].loglog()
spec_axes[1].loglog()
band_axes[1].plot(1e-9*tm.atmosphere.spectra_dict['freq'], tm.atmosphere.spectra_dict['temp'][i_elev,i_epwv],color='k')
band_axes[1].set_xlabel(r'$\nu$ (GHz)'); band_axes[1].set_ylabel('Temperature (K)')
band_axes[1].grid(b=False)
band_axes[1].set_xscale('log')
band_axes[1].set_ylim(0,60)
band_ticks = np.array([30,40,60,100,150,200,300,500,1000])
band_axes[1].set_xticks(band_ticks)
band_axes[1].set_xticklabels([f'{tick:.00f}' for tick in band_axes[1].get_xticks()],rotation=45)
band_axes[1].set_xlim(80,300)
#array_ax.plot(np.degrees(tm.array.edge_x).T,np.degrees(tm.array.edge_y).T,
# color='k',lw=5e-1)
data_axes[0].legend(handles=handles[::-1])
spec_axes[0].legend(handles=handles[::-1])
band_axes[0].legend(handles=handles[::-1])
#fig,ax = plt.subplots(1,1,figsize=(8,8),constrained_layout=True)
#axes[1].scatter(tm.pointing.time,np.degrees(np.abs(tm.atmosphere.aam)))
beam_plot_height = int(np.sqrt(len(tm.atmosphere.depths)))
beam_plot_length = int(np.ceil(len(tm.atmosphere.depths)/beam_plot_height))
#fig,ax = plt.subplots(1,1,figsize=(8,8),constrained_layout=True)#,sharex=True,sharey=True)
fig,axes = plt.subplots(beam_plot_height,beam_plot_length,
figsize=(2*beam_plot_length,2*beam_plot_height),constrained_layout=True)
for ilay,depth in enumerate(tm.atmosphere.depths):
ax = axes.ravel()[ilay]
filt_lim_r = depth*(np.abs(tm.beam_filter_sides[ilay]).max())
extent = [-filt_lim_r,filt_lim_r,-filt_lim_r,filt_lim_r]
ax.imshow(tm.beam_filters[ilay],extent=extent)
ax.grid(b=False)
#ax.set_xlabel(r'$\theta_x$ (arcmin.)'); ax.set_ylabel(r'$\theta_y$ (arcmin.)')
if tm.array.n < 20:
fig,axes = plt.subplots(beam_plot_height,beam_plot_length,
figsize=(2*beam_plot_length,2*beam_plot_height),constrained_layout=True,sharex=True,sharey=True)
fig.suptitle(f'D = {tm.beams.aperture:.02f}m')
for ilay,depth in enumerate(tm.atmosphere.depths):
iy = ilay % beam_plot_length
ix = int(ilay / beam_plot_length)
axes[ix,iy].set_title(f'z = {depth:.02f}m')
axes[ix,iy].plot(np.degrees(tm.array.x+tm.beams_waists[ilay]/depth*np.cos(pt)[:,None]),
np.degrees(tm.array.y+tm.beams_waists[ilay]/depth*np.sin(pt)[:,None]),lw=.5)
| [
"numpy.abs",
"matplotlib.cm.get_cmap",
"numpy.angle",
"numpy.ones",
"numpy.isnan",
"numpy.imag",
"numpy.mean",
"numpy.sin",
"numpy.exp",
"numpy.interp",
"numpy.round",
"numpy.degrees",
"numpy.fft.fftfreq",
"maria.get_pair_lags",
"numpy.linspace",
"numpy.random.choice",
"numpy.real",
... | [((1919, 1945), 'numpy.linspace', 'np.linspace', (['(0)', '(10000)', '(100)'], {}), '(0, 10000, 100)\n', (1930, 1945), True, 'import numpy as np\n'), ((2296, 2479), 'maria.model', 'maria.model', ([], {'atmosphere_config': 'atmosphere_config', 'pointing_config': 'pointing_config', 'beams_config': 'beams_config', 'array_config': 'array_config', 'site_config': 'site_config', 'verbose': '(True)'}), '(atmosphere_config=atmosphere_config, pointing_config=\n pointing_config, beams_config=beams_config, array_config=array_config,\n site_config=site_config, verbose=True)\n', (2307, 2479), False, 'import maria\n'), ((2595, 2753), 'maria.get_pair_lags', 'maria.get_pair_lags', (['data', 'tm.array.x', 'tm.array.y', 'tm.pointing.time', 'tm.pointing.focal_azim', 'tm.pointing.focal_elev'], {'n_clusters': '(6)', 'sub_scan_durs': '[10]'}), '(data, tm.array.x, tm.array.y, tm.pointing.time, tm.\n pointing.focal_azim, tm.pointing.focal_elev, n_clusters=6,\n sub_scan_durs=[10])\n', (2614, 2753), False, 'import maria\n'), ((3119, 3206), 'maria.fit_pair_lags', 'maria.fit_pair_lags', (['pair_lags', 'clust_x', 'clust_y'], {'max_lag': 'max_lag', 'max_vel': 'max_vel'}), '(pair_lags, clust_x, clust_y, max_lag=max_lag, max_vel=\n max_vel)\n', (3138, 3206), False, 'import maria\n'), ((3214, 3249), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(16, 8)'}), '(1, 2, figsize=(16, 8))\n', (3226, 3249), True, 'import matplotlib.pyplot as plt\n'), ((3410, 3440), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', '(64)'], {}), '(-np.pi, np.pi, 64)\n', (3421, 3440), True, 'import numpy as np\n'), ((4569, 4642), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(1)'], {'figsize': '(12, 8)', 'constrained_layout': '(True)', 'sharex': '(True)'}), '(3, 1, figsize=(12, 8), constrained_layout=True, sharex=True)\n', (4581, 4642), True, 'import matplotlib.pyplot as plt\n'), ((4660, 4733), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(10, 6)', 'constrained_layout': '(True)', 'sharey': '(True)'}), '(1, 2, figsize=(10, 6), constrained_layout=True, sharey=True)\n', (4672, 4733), True, 'import matplotlib.pyplot as plt\n'), ((4751, 4811), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 6)', 'constrained_layout': '(True)'}), '(1, 2, figsize=(12, 6), constrained_layout=True)\n', (4763, 4811), True, 'import matplotlib.pyplot as plt\n'), ((4829, 4875), 'numpy.fft.fftfreq', 'np.fft.fftfreq', (['tm.pointing.nt', 'tm.pointing.dt'], {}), '(tm.pointing.nt, tm.pointing.dt)\n', (4843, 4875), True, 'import numpy as np\n'), ((5058, 5087), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(16)'], {}), '(0, 2 * np.pi, 16)\n', (5069, 5087), True, 'import numpy as np\n'), ((8862, 8915), 'numpy.array', 'np.array', (['[30, 40, 60, 100, 150, 200, 300, 500, 1000]'], {}), '([30, 40, 60, 100, 150, 200, 300, 500, 1000])\n', (8870, 8915), True, 'import numpy as np\n'), ((9718, 9849), 'matplotlib.pyplot.subplots', 'plt.subplots', (['beam_plot_height', 'beam_plot_length'], {'figsize': '(2 * beam_plot_length, 2 * beam_plot_height)', 'constrained_layout': '(True)'}), '(beam_plot_height, beam_plot_length, figsize=(2 *\n beam_plot_length, 2 * beam_plot_height), constrained_layout=True)\n', (9730, 9849), True, 'import matplotlib.pyplot as plt\n'), ((3257, 3292), 'numpy.subtract.outer', 'np.subtract.outer', (['clust_x', 'clust_x'], {}), '(clust_x, clust_x)\n', (3274, 3292), True, 'import numpy as np\n'), ((3294, 3329), 'numpy.subtract.outer', 'np.subtract.outer', (['clust_y', 'clust_y'], {}), '(clust_y, clust_y)\n', (3311, 3329), True, 'import numpy as np\n'), ((3359, 3371), 'numpy.angle', 'np.angle', (['OZ'], {}), '(OZ)\n', (3367, 3371), True, 'import numpy as np\n'), ((4065, 4099), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 8)'}), '(1, 1, figsize=(8, 8))\n', (4077, 4099), True, 'import matplotlib.pyplot as plt\n'), ((10276, 10437), 'matplotlib.pyplot.subplots', 'plt.subplots', (['beam_plot_height', 'beam_plot_length'], {'figsize': '(2 * beam_plot_length, 2 * beam_plot_height)', 'constrained_layout': '(True)', 'sharex': '(True)', 'sharey': '(True)'}), '(beam_plot_height, beam_plot_length, figsize=(2 *\n beam_plot_length, 2 * beam_plot_height), constrained_layout=True,\n sharex=True, sharey=True)\n', (10288, 10437), True, 'import matplotlib.pyplot as plt\n'), ((2112, 2124), 'numpy.mean', 'np.mean', (['xls'], {}), '(xls)\n', (2119, 2124), True, 'import numpy as np\n'), ((2126, 2138), 'numpy.mean', 'np.mean', (['yls'], {}), '(yls)\n', (2133, 2138), True, 'import numpy as np\n'), ((4116, 4135), 'numpy.degrees', 'np.degrees', (['tm.X[i]'], {}), '(tm.X[i])\n', (4126, 4135), True, 'import numpy as np\n'), ((4155, 4174), 'numpy.degrees', 'np.degrees', (['tm.Y[i]'], {}), '(tm.Y[i])\n', (4165, 4174), True, 'import numpy as np\n'), ((5008, 5022), 'numpy.sqrt', 'np.sqrt', (['rfreq'], {}), '(rfreq)\n', (5015, 5022), True, 'import numpy as np\n'), ((5033, 5047), 'numpy.sqrt', 'np.sqrt', (['rfreq'], {}), '(rfreq)\n', (5040, 5047), True, 'import numpy as np\n'), ((5267, 5292), 'matplotlib.cm.get_cmap', 'mpl.cm.get_cmap', (['"""plasma"""'], {}), "('plasma')\n", (5282, 5292), True, 'import matplotlib as mpl\n'), ((5594, 5647), 'numpy.random.choice', 'np.random.choice', (['data[bm].shape[0]', '(4)'], {'replace': '(False)'}), '(data[bm].shape[0], 4, replace=False)\n', (5610, 5647), True, 'import numpy as np\n'), ((6321, 6387), 'scipy.stats.binned_statistic', 'sp.stats.binned_statistic', (['freq', 'mps'], {'bins': 'fbins', 'statistic': '"""mean"""'}), "(freq, mps, bins=fbins, statistic='mean')\n", (6346, 6387), True, 'import scipy as sp\n'), ((6428, 6496), 'scipy.stats.binned_statistic', 'sp.stats.binned_statistic', (['freq', 'ncmps'], {'bins': 'fbins', 'statistic': '"""mean"""'}), "(freq, ncmps, bins=fbins, statistic='mean')\n", (6453, 6496), True, 'import scipy as sp\n'), ((6510, 6524), 'numpy.isnan', 'np.isnan', (['bmps'], {}), '(bmps)\n', (6518, 6524), True, 'import numpy as np\n'), ((7444, 7511), 'numpy.abs', 'np.abs', (["(tm.atmosphere.spectra_dict['epwv'] - tm.site.weather['pwv'])"], {}), "(tm.atmosphere.spectra_dict['epwv'] - tm.site.weather['pwv'])\n", (7450, 7511), True, 'import numpy as np\n'), ((7658, 7792), 'numpy.interp', 'np.interp', (['tm.array.band_freq_list[iband]', "tm.atmosphere.spectra_dict['freq']", "tm.atmosphere.spectra_dict['temp'][i_elev, i_epwv]"], {}), "(tm.array.band_freq_list[iband], tm.atmosphere.spectra_dict['freq'\n ], tm.atmosphere.spectra_dict['temp'][i_elev, i_epwv])\n", (7667, 7792), True, 'import numpy as np\n'), ((1772, 1798), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (1784, 1798), False, 'from datetime import datetime\n'), ((3384, 3394), 'numpy.abs', 'np.abs', (['OZ'], {}), '(OZ)\n', (3390, 3394), True, 'import numpy as np\n'), ((3594, 3607), 'numpy.degrees', 'np.degrees', (['(1)'], {}), '(1)\n', (3604, 3607), True, 'import numpy as np\n'), ((3652, 3679), 'numpy.degrees', 'np.degrees', (['vel_pars[i_spl]'], {}), '(vel_pars[i_spl])\n', (3662, 3679), True, 'import numpy as np\n'), ((4415, 4452), 'numpy.real', 'np.real', (['tm.pointing.focal_theta_z[i]'], {}), '(tm.pointing.focal_theta_z[i])\n', (4422, 4452), True, 'import numpy as np\n'), ((4478, 4515), 'numpy.imag', 'np.imag', (['tm.pointing.focal_theta_z[i]'], {}), '(tm.pointing.focal_theta_z[i])\n', (4485, 4515), True, 'import numpy as np\n'), ((6900, 6920), 'numpy.degrees', 'np.degrees', (['(2 * hwhm)'], {}), '(2 * hwhm)\n', (6910, 6920), True, 'import numpy as np\n'), ((6966, 6986), 'numpy.degrees', 'np.degrees', (['(2 * hwhm)'], {}), '(2 * hwhm)\n', (6976, 6986), True, 'import numpy as np\n'), ((1000, 1014), 'numpy.ones', 'np.ones', (['n_per'], {}), '(n_per)\n', (1007, 1014), True, 'import numpy as np\n'), ((1041, 1055), 'numpy.ones', 'np.ones', (['n_per'], {}), '(n_per)\n', (1048, 1055), True, 'import numpy as np\n'), ((1082, 1096), 'numpy.ones', 'np.ones', (['n_per'], {}), '(n_per)\n', (1089, 1096), True, 'import numpy as np\n'), ((1123, 1137), 'numpy.ones', 'np.ones', (['n_per'], {}), '(n_per)\n', (1130, 1137), True, 'import numpy as np\n'), ((2159, 2176), 'numpy.subtract', 'np.subtract', (['*xls'], {}), '(*xls)\n', (2170, 2176), True, 'import numpy as np\n'), ((2178, 2195), 'numpy.subtract', 'np.subtract', (['*yls'], {}), '(*yls)\n', (2189, 2195), True, 'import numpy as np\n'), ((3056, 3078), 'numpy.abs', 'np.abs', (['(vx + 1.0j * vy)'], {}), '(vx + 1.0j * vy)\n', (3062, 3078), True, 'import numpy as np\n'), ((3560, 3577), 'numpy.exp', 'np.exp', (['(1.0j * a_)'], {}), '(1.0j * a_)\n', (3566, 3577), True, 'import numpy as np\n'), ((4266, 4302), 'numpy.real', 'np.real', (['tm.pointing.theta_edge_z[i]'], {}), '(tm.pointing.theta_edge_z[i])\n', (4273, 4302), True, 'import numpy as np\n'), ((4333, 4369), 'numpy.imag', 'np.imag', (['tm.pointing.theta_edge_z[i]'], {}), '(tm.pointing.theta_edge_z[i])\n', (4340, 4369), True, 'import numpy as np\n'), ((4961, 4974), 'numpy.log', 'np.log', (['fmids'], {}), '(fmids)\n', (4967, 4974), True, 'import numpy as np\n'), ((6054, 6084), 'numpy.hanning', 'np.hanning', (['data[bm].shape[-1]'], {}), '(data[bm].shape[-1])\n', (6064, 6084), True, 'import numpy as np\n'), ((6247, 6277), 'numpy.hanning', 'np.hanning', (['data[bm].shape[-1]'], {}), '(data[bm].shape[-1])\n', (6257, 6277), True, 'import numpy as np\n'), ((7109, 7158), 'numpy.column_stack', 'np.column_stack', (['[tm.array.x[bm], tm.array.y[bm]]'], {}), '([tm.array.x[bm], tm.array.y[bm]])\n', (7124, 7158), True, 'import numpy as np\n'), ((9973, 10007), 'numpy.abs', 'np.abs', (['tm.beam_filter_sides[ilay]'], {}), '(tm.beam_filter_sides[ilay])\n', (9979, 10007), True, 'import numpy as np\n'), ((3014, 3025), 'numpy.real', 'np.real', (['dz'], {}), '(dz)\n', (3021, 3025), True, 'import numpy as np\n'), ((3031, 3042), 'numpy.imag', 'np.imag', (['dz'], {}), '(dz)\n', (3038, 3042), True, 'import numpy as np\n'), ((6721, 6740), 'numpy.cos', 'np.cos', (['pt[:, None]'], {}), '(pt[:, None])\n', (6727, 6740), True, 'import numpy as np\n'), ((6805, 6824), 'numpy.sin', 'np.sin', (['pt[:, None]'], {}), '(pt[:, None])\n', (6811, 6824), True, 'import numpy as np\n'), ((5406, 5431), 'numpy.round', 'np.round', (['(band * 1e-09)', '(1)'], {}), '(band * 1e-09, 1)\n', (5414, 5431), True, 'import numpy as np\n'), ((5973, 6003), 'numpy.hanning', 'np.hanning', (['data[bm].shape[-1]'], {}), '(data[bm].shape[-1])\n', (5983, 6003), True, 'import numpy as np\n'), ((6167, 6197), 'numpy.hanning', 'np.hanning', (['data[bm].shape[-1]'], {}), '(data[bm].shape[-1])\n', (6177, 6197), True, 'import numpy as np\n'), ((10784, 10794), 'numpy.cos', 'np.cos', (['pt'], {}), '(pt)\n', (10790, 10794), True, 'import numpy as np\n'), ((10880, 10890), 'numpy.sin', 'np.sin', (['pt'], {}), '(pt)\n', (10886, 10890), True, 'import numpy as np\n')] |
################################################################################################################################
# This function implements the image search/retrieval .
# inputs: Input location of uploaded image, extracted vectors
#
################################################################################################################################
import random
import tensorflow.compat.v1 as tf
import numpy as np
import imageio
import os
import scipy.io
import time
from datetime import datetime
from scipy import ndimage
#from scipy.misc import imsave
imsave = imageio.imsave
imread = imageio.imread
from scipy.spatial.distance import cosine
#import matplotlib.pyplot as plt
from sklearn.neighbors import NearestNeighbors
import pickle
from PIL import Image
import gc
import os
from tempfile import TemporaryFile
from tensorflow.python.platform import gfile
BOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'
BOTTLENECK_TENSOR_SIZE = 2048
MODEL_INPUT_WIDTH = 299
MODEL_INPUT_HEIGHT = 299
MODEL_INPUT_DEPTH = 3
JPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'
RESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
#show_neighbors(random.randint(0, len(extracted_features)), indices, neighbor_list)
def get_top_k_similar(image_data, pred, pred_final, k):
print("total data",len(pred))
print(image_data.shape)
#for i in pred:
#print(i.shape)
#break
os.mkdir('static/result')
# cosine calculates the cosine distance, not similiarity. Hence no need to reverse list
top_k_ind = np.argsort([cosine(image_data, pred_row) \
for ith_row, pred_row in enumerate(pred)])[:k]
print(top_k_ind)
for i, neighbor in enumerate(top_k_ind):
image = imread(pred_final[neighbor])
#timestr = datetime.now().strftime("%Y%m%d%H%M%S")
#name= timestr+"."+str(i)
name = pred_final[neighbor]
tokens = name.split("\\")
img_name = tokens[-1]
print(img_name)
name = 'static/result/'+img_name
imsave(name, image)
def create_inception_graph():
""""Creates a graph from saved GraphDef file and returns a Graph object.
Returns:
Graph holding the trained Inception network, and various tensors we'll be
manipulating.
"""
with tf.Session() as sess:
model_filename = os.path.join(
'imagenet', 'classify_image_graph_def.pb')
with gfile.FastGFile(model_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (
tf.import_graph_def(graph_def, name='', return_elements=[
BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,
RESIZED_INPUT_TENSOR_NAME]))
return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
bottleneck_tensor):
bottleneck_values = sess.run(
bottleneck_tensor,
{image_data_tensor: image_data})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def recommend(imagePath, extracted_features):
tf.reset_default_graph()
config = tf.ConfigProto(
device_count = {'GPU': 0}
)
sess = tf.Session(config=config)
graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (create_inception_graph())
image_data = gfile.FastGFile(imagePath, 'rb').read()
features = run_bottleneck_on_image(sess, image_data, jpeg_data_tensor, bottleneck_tensor)
with open('neighbor_list_recom.pickle','rb') as f:
neighbor_list = pickle.load(f)
print("loaded images")
get_top_k_similar(features, extracted_features, neighbor_list, k=9)
| [
"os.mkdir",
"tensorflow.python.platform.gfile.FastGFile",
"scipy.spatial.distance.cosine",
"tensorflow.compat.v1.Session",
"pickle.load",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.reset_default_graph",
"numpy.squeeze",
"tensorflow.compat.v1.GraphDef",
"os.path.join",
"tensorflow.... | [((1474, 1499), 'os.mkdir', 'os.mkdir', (['"""static/result"""'], {}), "('static/result')\n", (1482, 1499), False, 'import os\n'), ((3237, 3266), 'numpy.squeeze', 'np.squeeze', (['bottleneck_values'], {}), '(bottleneck_values)\n', (3247, 3266), True, 'import numpy as np\n'), ((3351, 3375), 'tensorflow.compat.v1.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (3373, 3375), True, 'import tensorflow.compat.v1 as tf\n'), ((3394, 3433), 'tensorflow.compat.v1.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'GPU': 0}"}), "(device_count={'GPU': 0})\n", (3408, 3433), True, 'import tensorflow.compat.v1 as tf\n'), ((3474, 3499), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (3484, 3499), True, 'import tensorflow.compat.v1 as tf\n'), ((2433, 2445), 'tensorflow.compat.v1.Session', 'tf.Session', ([], {}), '()\n', (2443, 2445), True, 'import tensorflow.compat.v1 as tf\n'), ((2476, 2531), 'os.path.join', 'os.path.join', (['"""imagenet"""', '"""classify_image_graph_def.pb"""'], {}), "('imagenet', 'classify_image_graph_def.pb')\n", (2488, 2531), False, 'import os\n'), ((3858, 3872), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3869, 3872), False, 'import pickle\n'), ((2550, 2587), 'tensorflow.python.platform.gfile.FastGFile', 'gfile.FastGFile', (['model_filename', '"""rb"""'], {}), "(model_filename, 'rb')\n", (2565, 2587), False, 'from tensorflow.python.platform import gfile\n'), ((2612, 2625), 'tensorflow.compat.v1.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (2623, 2625), True, 'import tensorflow.compat.v1 as tf\n'), ((2746, 2882), 'tensorflow.compat.v1.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""', 'return_elements': '[BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME, RESIZED_INPUT_TENSOR_NAME]'}), "(graph_def, name='', return_elements=[\n BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME, RESIZED_INPUT_TENSOR_NAME])\n", (2765, 2882), True, 'import tensorflow.compat.v1 as tf\n'), ((3623, 3655), 'tensorflow.python.platform.gfile.FastGFile', 'gfile.FastGFile', (['imagePath', '"""rb"""'], {}), "(imagePath, 'rb')\n", (3638, 3655), False, 'from tensorflow.python.platform import gfile\n'), ((1633, 1661), 'scipy.spatial.distance.cosine', 'cosine', (['image_data', 'pred_row'], {}), '(image_data, pred_row)\n', (1639, 1661), False, 'from scipy.spatial.distance import cosine\n')] |
import json
import numpy as np
import os
import argparse
def f1(p, r):
if r == 0.:
return 0.
return 2 * p * r / float(p + r)
def merge_dict(dict1, dict2):
res = {**dict1, **dict2}
return res
def macro(dataset, threshold, if_generate=False):
p = 0.
pred_example_count = 0
r = 0.
gold_label_count = 0
res = []
for raw_dat in dataset:
gold_labels = raw_dat['annotation']
confidence_ranking = raw_dat['confidence_ranking']
predicted_labels = [labels for labels in confidence_ranking
if confidence_ranking[labels] >= threshold]
if if_generate:
res_buffer = {'id': raw_dat['id'], 'premise': raw_dat['premise'], 'entity': ['entity'],
'annotation': raw_dat['annotation'], 'predicted_labels': list(predicted_labels)}
res.append(res_buffer)
if predicted_labels:
per_p = len(set(predicted_labels).intersection(set(gold_labels))) / float(len(predicted_labels))
pred_example_count += 1
p += per_p
if gold_labels:
per_r = len(set(predicted_labels).intersection(set(gold_labels))) / float(len(gold_labels))
gold_label_count += 1
r += per_r
precision = p / pred_example_count if pred_example_count > 0 else 0
recall = r / gold_label_count if gold_label_count > 0 else 0
return precision, recall, res
def load_res(res_path):
if os.path.isdir(res_path):
res = []
for file in os.listdir(res_path):
path = os.path.join(res_path, file)
with open(path) as fin:
raw_dat = fin.read().splitlines()
res_buffer = [json.loads(items) for items in raw_dat]
res.extend(res_buffer)
return res
elif os.path.isfile(res_path):
with open(res_path) as fin:
raw_dat = fin.read().splitlines()
res = [json.loads(items) for items in raw_dat]
return res
else:
raise ValueError("res_path error!")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dev',
type=str,
default='',
help='path to the DEV result file(s) generated by eval.py')
parser.add_argument('--test',
type=str,
default='',
help='path to the TEST result file(s) generated by eval.py')
parser.add_argument('--model_dir',
type=str,
default='',
help='dir path to model checkpoint. Used to save typing result')
parser.add_argument('--threshold_start',
type=float,
default=0.0,
help='Will loop through [threshold_start, 1.0] on dev set to select '
'the best threshold to eval on test set')
parser.add_argument('--threshold_step',
type=float,
default=0.005,
help='threshold increment every time')
args = parser.parse_args()
dev_dat = load_res(args.dev)
test_dat = load_res(args.test)
# Loose-macro follow ultra-fine grained entity typing
print('Eval DEV on Loose Macro Score:')
f1_champ = 0.0
threshold_champ = 1.0
for threshold in np.arange(args.threshold_start, 1.0+args.threshold_step, args.threshold_step):
precision, recall, res = macro(dev_dat, threshold, False)
summary = f'Threshold = {threshold}\t'\
f'{round(precision, 3) * 100}\t' \
f'{round(recall, 3) * 100}\t' \
f'{round(f1(precision, recall), 3) * 100}'
print(summary)
if f1(precision, recall) > f1_champ:
f1_champ = f1(precision, recall)
threshold_champ = threshold
else:
pass
print(f'{"*"*10}\n F1 champ on DEV = {round(f1_champ, 3) * 100} when threshold = {threshold_champ}\n{"*"*10}')
print("Eval TEST on Loose Macro Score:")
precision, recall, res = macro(test_dat, threshold_champ, True)
summary = f'{round(precision, 3) * 100}\t' \
f'{round(recall, 3) * 100}\t' \
f'{round(f1(precision, recall), 3) * 100}'
print(summary)
# save res file
with open(os.path.join(args.model_dir,'result.json'), 'w+') as fout:
fout.write("\n".join([json.dumps(items) for items in res]))
if __name__ == "__main__":
main() | [
"argparse.ArgumentParser",
"json.loads",
"os.path.isdir",
"json.dumps",
"os.path.isfile",
"numpy.arange",
"os.path.join",
"os.listdir"
] | [((1483, 1506), 'os.path.isdir', 'os.path.isdir', (['res_path'], {}), '(res_path)\n', (1496, 1506), False, 'import os\n'), ((2105, 2130), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2128, 2130), False, 'import argparse\n'), ((3442, 3521), 'numpy.arange', 'np.arange', (['args.threshold_start', '(1.0 + args.threshold_step)', 'args.threshold_step'], {}), '(args.threshold_start, 1.0 + args.threshold_step, args.threshold_step)\n', (3451, 3521), True, 'import numpy as np\n'), ((1545, 1565), 'os.listdir', 'os.listdir', (['res_path'], {}), '(res_path)\n', (1555, 1565), False, 'import os\n'), ((1838, 1862), 'os.path.isfile', 'os.path.isfile', (['res_path'], {}), '(res_path)\n', (1852, 1862), False, 'import os\n'), ((1586, 1614), 'os.path.join', 'os.path.join', (['res_path', 'file'], {}), '(res_path, file)\n', (1598, 1614), False, 'import os\n'), ((4420, 4463), 'os.path.join', 'os.path.join', (['args.model_dir', '"""result.json"""'], {}), "(args.model_dir, 'result.json')\n", (4432, 4463), False, 'import os\n'), ((1731, 1748), 'json.loads', 'json.loads', (['items'], {}), '(items)\n', (1741, 1748), False, 'import json\n'), ((1965, 1982), 'json.loads', 'json.loads', (['items'], {}), '(items)\n', (1975, 1982), False, 'import json\n'), ((4509, 4526), 'json.dumps', 'json.dumps', (['items'], {}), '(items)\n', (4519, 4526), False, 'import json\n')] |
import random, os
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from tqdm import tqdm
from torch.autograd import Variable
from transformers import BertTokenizer, BertForSequenceEncoder, RobertaTokenizer, RobertaForSequenceEncoder
from models import inference_model
from data_loader import DataLoader
from torch.nn import NLLLoss
import logging
from transformers import AdamW, get_linear_schedule_with_warmup
logger = logging.getLogger(__name__)
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def correct_prediction(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct
def eval_model(model, validset_reader):
model.eval()
correct_pred = 0.0
for index, data in enumerate(validset_reader):
inputs, lab_tensor = data
prob = model(inputs)
correct_pred += correct_prediction(prob, lab_tensor)
dev_accuracy = correct_pred / validset_reader.total_num
return dev_accuracy
def train_model(model, ori_model, args, trainset_reader, validset_reader):
save_path = args.outdir + '/model'
best_accuracy = 0.0
running_loss = 0.0
t_total = int(
trainset_reader.total_num / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=1e-8, correct_bias=True)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(args.warmup_proportion*t_total), num_training_steps=t_total)
global_step = 0
model.train()
for epoch in range(int(args.num_train_epochs)):
# optimizer.zero_grad()
for index, data in enumerate(trainset_reader):
model.train()
inputs, lab_tensor = data
prob = model(inputs)
loss = F.nll_loss(prob, lab_tensor)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
global_step += 1
running_loss += loss.item()
if global_step % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
if global_step % (args.gradient_accumulation_steps*10)==0:
logger.info('Epoch: {0}, Step: {1}, Loss: {2}'.format(epoch, global_step//args.gradient_accumulation_steps, (running_loss / 10)))
running_loss = 0
if global_step % (args.eval_step * args.gradient_accumulation_steps) == 0:
logger.info('Start eval!')
with torch.no_grad():
dev_accuracy = eval_model(model, validset_reader)
logger.info('Dev total acc: {0}'.format(dev_accuracy))
if dev_accuracy > best_accuracy:
best_accuracy = dev_accuracy
torch.save({'epoch': epoch,
'model': ori_model.state_dict(),
'best_accuracy': best_accuracy}, save_path + ".best.pt")
logger.info("Saved best epoch {0}, best accuracy {1}".format(epoch, best_accuracy))
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_type", type=str, default="bert", required=True)
parser.add_argument('--patience', type=int, default=20, help='Patience')
parser.add_argument('--dropout', type=float, default=0.6, help='Dropout.')
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training.')
parser.add_argument('--weight_decay', type=float, default=0, help='Weight decay (L2 loss on parameters).')
parser.add_argument('--train_path', help='train path')
parser.add_argument('--valid_path', help='valid path')
parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.")
# parser.add_argument("--bert_hidden_dim", default=1024, type=int, help="Total batch size for training.")
parser.add_argument("--valid_batch_size", default=32, type=int, help="Total batch size for predictions.")
parser.add_argument('--outdir', required=True, help='path to output directory')
parser.add_argument("--pool", type=str, default="att", help='Aggregating method: top, max, mean, concat, att, sum')
parser.add_argument("--layer", type=int, default=1, help='Graph Layer.')
parser.add_argument("--num_labels", type=int, default=3)
parser.add_argument("--evi_num", type=int, default=5, help='Evidence num.')
parser.add_argument("--kernel", type=int, default=21, help='Evidence num.')
parser.add_argument("--threshold", type=float, default=0.0, help='Evidence num.')
parser.add_argument("--max_len", default=130, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--eval_step", default=1000, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument('--bert_pretrain', required=True)
parser.add_argument('--postpretrain')
parser.add_argument("--learning_rate", default=2e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion", default=0.1, type=float,
help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10% "
"of training.")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--no_clip', action='store_true', default=False, help='')
args = parser.parse_args()
if not os.path.exists(args.outdir):
os.mkdir(args.outdir)
args.cuda = not args.no_cuda and torch.cuda.is_available()
handlers = [logging.FileHandler(os.path.abspath(args.outdir) + '/train_log.txt'), logging.StreamHandler()]
logging.basicConfig(format='[%(asctime)s] %(levelname)s: %(message)s', level=logging.DEBUG,
datefmt='%d-%m-%Y %H:%M:%S', handlers=handlers)
logger.info(args)
set_seed(args)
logger.info('Start training!')
label_map = {'SUPPORTS': 0, 'REFUTES': 1, 'NOT ENOUGH INFO': 2}
if args.model_type=='bert':
tokenizer = BertTokenizer.from_pretrained(args.bert_pretrain, do_lower_case=False)
bert_model = BertForSequenceEncoder.from_pretrained(args.bert_pretrain)
elif args.model_type=='roberta':
tokenizer = RobertaTokenizer.from_pretrained(args.bert_pretrain, do_lower_case=False)
bert_model = RobertaForSequenceEncoder.from_pretrained(args.bert_pretrain)
else:
assert(False)
args.bert_hidden_dim = bert_model.hidden_size
logger.info("loading training set")
trainset_reader = DataLoader(args.train_path, label_map, tokenizer, args,
batch_size=args.train_batch_size)
logger.info("loading validation set")
validset_reader = DataLoader(args.valid_path, label_map, tokenizer, args,
batch_size=args.valid_batch_size, test=True)
logger.info('initializing estimator model')
ori_model = inference_model(bert_model, args)
if torch.cuda.device_count() > 1:
model = nn.DataParallel(ori_model)
else:
model = ori_model
model = model.cuda()
train_model(model, ori_model, args, trainset_reader, validset_reader)
| [
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"transformers.RobertaTokenizer.from_pretrained",
"transformers.BertForSequenceEncoder.from_pretrained",
"torch.cuda.device_count",
"torch.no_grad",
"transformers.RobertaForSequenceEncoder.from_pretrained",
"os.path.abspath",
"os.path.exis... | [((502, 529), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (519, 529), False, 'import logging\n'), ((1884, 1976), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate', 'eps': '(1e-08)', 'correct_bias': '(True)'}), '(optimizer_grouped_parameters, lr=args.learning_rate, eps=1e-08,\n correct_bias=True)\n', (1889, 1976), False, 'from transformers import AdamW, get_linear_schedule_with_warmup\n'), ((3957, 3979), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (3968, 3979), False, 'import random, os\n'), ((3984, 4009), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (3998, 4009), True, 'import numpy as np\n'), ((4014, 4042), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (4031, 4042), False, 'import torch\n'), ((4047, 4084), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (4073, 4084), False, 'import torch\n'), ((4130, 4155), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4153, 4155), False, 'import argparse\n'), ((7581, 7724), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(asctime)s] %(levelname)s: %(message)s"""', 'level': 'logging.DEBUG', 'datefmt': '"""%d-%m-%Y %H:%M:%S"""', 'handlers': 'handlers'}), "(format='[%(asctime)s] %(levelname)s: %(message)s',\n level=logging.DEBUG, datefmt='%d-%m-%Y %H:%M:%S', handlers=handlers)\n", (7600, 7724), False, 'import logging\n'), ((8452, 8546), 'data_loader.DataLoader', 'DataLoader', (['args.train_path', 'label_map', 'tokenizer', 'args'], {'batch_size': 'args.train_batch_size'}), '(args.train_path, label_map, tokenizer, args, batch_size=args.\n train_batch_size)\n', (8462, 8546), False, 'from data_loader import DataLoader\n'), ((8639, 8744), 'data_loader.DataLoader', 'DataLoader', (['args.valid_path', 'label_map', 'tokenizer', 'args'], {'batch_size': 'args.valid_batch_size', 'test': '(True)'}), '(args.valid_path, label_map, tokenizer, args, batch_size=args.\n valid_batch_size, test=True)\n', (8649, 8744), False, 'from data_loader import DataLoader\n'), ((8839, 8872), 'models.inference_model', 'inference_model', (['bert_model', 'args'], {}), '(bert_model, args)\n', (8854, 8872), False, 'from models import inference_model\n'), ((7344, 7371), 'os.path.exists', 'os.path.exists', (['args.outdir'], {}), '(args.outdir)\n', (7358, 7371), False, 'import random, os\n'), ((7381, 7402), 'os.mkdir', 'os.mkdir', (['args.outdir'], {}), '(args.outdir)\n', (7389, 7402), False, 'import random, os\n'), ((7440, 7465), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7463, 7465), False, 'import torch\n'), ((7552, 7575), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (7573, 7575), False, 'import logging\n'), ((7942, 8012), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['args.bert_pretrain'], {'do_lower_case': '(False)'}), '(args.bert_pretrain, do_lower_case=False)\n', (7971, 8012), False, 'from transformers import BertTokenizer, BertForSequenceEncoder, RobertaTokenizer, RobertaForSequenceEncoder\n'), ((8034, 8092), 'transformers.BertForSequenceEncoder.from_pretrained', 'BertForSequenceEncoder.from_pretrained', (['args.bert_pretrain'], {}), '(args.bert_pretrain)\n', (8072, 8092), False, 'from transformers import BertTokenizer, BertForSequenceEncoder, RobertaTokenizer, RobertaForSequenceEncoder\n'), ((8880, 8905), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (8903, 8905), False, 'import torch\n'), ((8927, 8953), 'torch.nn.DataParallel', 'nn.DataParallel', (['ori_model'], {}), '(ori_model)\n', (8942, 8953), True, 'import torch.nn as nn\n'), ((2411, 2439), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['prob', 'lab_tensor'], {}), '(prob, lab_tensor)\n', (2421, 2439), True, 'import torch.nn.functional as F\n'), ((8150, 8223), 'transformers.RobertaTokenizer.from_pretrained', 'RobertaTokenizer.from_pretrained', (['args.bert_pretrain'], {'do_lower_case': '(False)'}), '(args.bert_pretrain, do_lower_case=False)\n', (8182, 8223), False, 'from transformers import BertTokenizer, BertForSequenceEncoder, RobertaTokenizer, RobertaForSequenceEncoder\n'), ((8245, 8306), 'transformers.RobertaForSequenceEncoder.from_pretrained', 'RobertaForSequenceEncoder.from_pretrained', (['args.bert_pretrain'], {}), '(args.bert_pretrain)\n', (8286, 8306), False, 'from transformers import BertTokenizer, BertForSequenceEncoder, RobertaTokenizer, RobertaForSequenceEncoder\n'), ((7502, 7530), 'os.path.abspath', 'os.path.abspath', (['args.outdir'], {}), '(args.outdir)\n', (7517, 7530), False, 'import random, os\n'), ((3341, 3356), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3354, 3356), False, 'import torch\n')] |
import gym
import time
import random
import numpy as np
env = gym.make('Navigation2D-v0')
state = env.reset()
goals = np.random.uniform(-0.5, 0.5, size=(2,))
task = {'goal': goals}
env.reset_task(task)
score = 0
# Without any policy
while True:
time.sleep(1)
env.render()
action = np.random.uniform(-0.1, 0.1, size=(2,))
state, reward, done, _ = env.step(action)
score += reward
if done: # 游戏结束
print('score: ', score) # 打印分数
break
env.close()
| [
"numpy.random.uniform",
"gym.make",
"time.sleep"
] | [((63, 90), 'gym.make', 'gym.make', (['"""Navigation2D-v0"""'], {}), "('Navigation2D-v0')\n", (71, 90), False, 'import gym\n'), ((121, 160), 'numpy.random.uniform', 'np.random.uniform', (['(-0.5)', '(0.5)'], {'size': '(2,)'}), '(-0.5, 0.5, size=(2,))\n', (138, 160), True, 'import numpy as np\n'), ((255, 268), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (265, 268), False, 'import time\n'), ((299, 338), 'numpy.random.uniform', 'np.random.uniform', (['(-0.1)', '(0.1)'], {'size': '(2,)'}), '(-0.1, 0.1, size=(2,))\n', (316, 338), True, 'import numpy as np\n')] |
from ast import Bytes
from collections import OrderedDict
from io import BytesIO
import struct
from typing import Callable, Dict, List, Tuple
import numpy as np
from flwr.server.strategy import FedAvg
from flwr.common import (
EvaluateRes,
FitRes,
Parameters,
Scalar,
Weights,
)
from typing import Optional
from flwr.server.client_manager import ClientManager
from flwr.server.client_proxy import ClientProxy
from flwr.server.strategy.aggregate import aggregate, weighted_loss_avg
from numpy import bytes_, numarray
class FedAvgCpp(FedAvg):
def __init__(
self,
fraction_fit: float = 1.0,
fraction_eval: float = 1.0,
min_fit_clients: int = 2,
min_eval_clients: int = 2,
min_available_clients: int = 2,
eval_fn: Optional[
Callable[[Weights], Optional[Tuple[float, Dict[str, Scalar]]]]
] = None,
on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,
on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,
accept_failures: bool = True,
initial_parameters: Optional[Parameters] = None,
) -> None:
super().__init__(
fraction_fit=fraction_fit,
fraction_eval=fraction_eval,
min_fit_clients=min_fit_clients,
min_eval_clients=min_eval_clients,
min_available_clients=min_available_clients,
eval_fn=eval_fn,
on_fit_config_fn=on_fit_config_fn,
on_evaluate_config_fn=on_evaluate_config_fn,
accept_failures=accept_failures,
initial_parameters=initial_parameters,
)
def aggregate_fit(
self,
rnd: int,
results: List[Tuple[ClientProxy, FitRes]],
failures: List[BaseException],
) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:
"""Aggregate fit results using weighted average."""
if not results:
return None, {}
# Do not aggregate if there are failures and failures are not accepted
if not self.accept_failures and failures:
return None, {}
# Convert results
weights_results = [
(parameters_to_weights(fit_res.parameters), fit_res.num_examples)
for client, fit_res in results
]
aggregated_weights = aggregate(weights_results)
parameters_results = weights_to_parameters(aggregated_weights)
return parameters_results, {}
def aggregate_evaluate(
self,
rnd: int,
results: List[Tuple[ClientProxy, EvaluateRes]],
failures: List[BaseException],
) -> Tuple[Optional[float], Dict[str, Scalar]]:
"""Aggregate evaluation losses using weighted average."""
if not results:
return None, {}
# Do not aggregate if there are failures and failures are not accepted
if not self.accept_failures and failures:
return None, {}
print(results[0][1])
loss_aggregated = weighted_loss_avg(
[
(
evaluate_res.num_examples,
evaluate_res.loss,
)
for _, evaluate_res in results
]
)
return loss_aggregated, {}
def weights_to_parameters(weights) -> Parameters:
tensors = [ndarray_to_bytes(tensor) for tensor in weights]
return Parameters(tensors=tensors, tensor_type="cpp_double")
def parameters_to_weights(parameters: Parameters) -> Weights:
"""Convert parameters object to NumPy weights."""
weights = [bytes_to_ndarray(tensor) for tensor in parameters.tensors]
return weights
def bytes_to_ndarray(tensor_bytes: Bytes) -> np.ndarray:
list_doubles = []
for idx in range(0, len(tensor_bytes), 8):
this_double = struct.unpack("d", tensor_bytes[idx : idx + 8])
list_doubles.append(this_double[0])
weights_np = np.asarray(list_doubles)
return weights_np
def ndarray_to_bytes(a: np.ndarray) -> Bytes:
doublelist = a.tolist()
buf = struct.pack("%sd" % len(doublelist), *doublelist)
return buf
| [
"numpy.asarray",
"struct.unpack",
"flwr.server.strategy.aggregate.weighted_loss_avg",
"flwr.common.Parameters",
"flwr.server.strategy.aggregate.aggregate"
] | [((3412, 3465), 'flwr.common.Parameters', 'Parameters', ([], {'tensors': 'tensors', 'tensor_type': '"""cpp_double"""'}), "(tensors=tensors, tensor_type='cpp_double')\n", (3422, 3465), False, 'from flwr.common import EvaluateRes, FitRes, Parameters, Scalar, Weights\n'), ((3936, 3960), 'numpy.asarray', 'np.asarray', (['list_doubles'], {}), '(list_doubles)\n', (3946, 3960), True, 'import numpy as np\n'), ((2350, 2376), 'flwr.server.strategy.aggregate.aggregate', 'aggregate', (['weights_results'], {}), '(weights_results)\n', (2359, 2376), False, 'from flwr.server.strategy.aggregate import aggregate, weighted_loss_avg\n'), ((3025, 3127), 'flwr.server.strategy.aggregate.weighted_loss_avg', 'weighted_loss_avg', (['[(evaluate_res.num_examples, evaluate_res.loss) for _, evaluate_res in results]'], {}), '([(evaluate_res.num_examples, evaluate_res.loss) for _,\n evaluate_res in results])\n', (3042, 3127), False, 'from flwr.server.strategy.aggregate import aggregate, weighted_loss_avg\n'), ((3827, 3872), 'struct.unpack', 'struct.unpack', (['"""d"""', 'tensor_bytes[idx:idx + 8]'], {}), "('d', tensor_bytes[idx:idx + 8])\n", (3840, 3872), False, 'import struct\n')] |
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors.linear_regression import LinearRegression
import os
pio.templates.default = "simple_white"
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
data = pd.read_csv(filename)
#data = data.dropna().drop(['id', 'date', 'condition', 'lat', 'long',
# 'sqft_lot15', 'sqft_lot', 'yr_built'], axis=1)
data = data.dropna().drop(['id', 'date'], axis=1)
data = data[(data['price'] > 0) & \
(data['bedrooms'] > 0) & \
(data['bathrooms'] > 0) & \
(data['sqft_living'] > 30) & \
(data['floors'] > 0)]
data['age'] = (2015 - data['yr_built'])
data.age = [
0 if i < 1
else 1 if i < 5
else 2 if i < 10
else 3 if i < 25
else 4 if i < 50
else 5 if i < 75
else 6 if i < 100
else 7
for i in data.age]
data['renov_age'] = (2015 - data['yr_renovated'])
data.renov_age = [
0 if i < 1
else 1 if i < 5
else 2 if i < 10
else 3 if i < 25
else 4 if i < 50
else 5 if i < 75
else 6 if i < 100
else 7
for i in data.renov_age]
data['new_age'] = data['age'] + data['renov_age']
#data['rooms_per_ft'] = data['sqft_living'] / data['bedrooms']
#data['bath_per_room'] = data['bathrooms'] / data['bedrooms']
data['sqft_living_vs_neigbors'] = data['sqft_living'] / data['sqft_living15']
data['sqft_lot_vs_neigbors'] = data['sqft_lot'] / data['sqft_lot15']
data['living_vs_lot'] = data['sqft_living'] / data['sqft_lot']
data = data.drop(['age', 'renov_age'], axis=1)
return data
def feature_evaluation(X: pd.DataFrame, y: pd.Series, output_path: str = ".") -> NoReturn:
"""
Create scatter plot between each feature and the response.
- Plot title specifies feature name
- Plot title specifies Pearson Correlation between feature and response
- Plot saved under given folder with file name including feature name
Parameters
----------
X : DataFrame of shape (n_samples, n_features)
Design matrix of regression problem
y : array-like of shape (n_samples, )
Response vector to evaluate against
output_path: str (default ".")
Path to folder in which plots are saved
"""
if not output_path is ".":
dir_create = False
i = 1
while not dir_create:
if os.path.exists(output_path + str(i)):
i = i + 1
else:
os.mkdir(output_path + str(i))
dir_create = True
output_path = output_path + str(i)
n_features = X.shape[1]
covs = np.zeros((n_features, 1))
y_std = y.std()
y_name = y.name
for i in range(n_features):
covs[i] = X.iloc[:, i].cov(y) / (X.iloc[:, i].std() * y_std)
pearson_fig = go.Figure(data=go.Scatter(x=X.iloc[:, i], y=y, mode='markers'))
pearson_fig.update_layout(
title="Pearson Correlation = " + str(covs[i]),
xaxis_title="Feature - " + X.iloc[:, i].name,
yaxis_title=y_name
)
image_loc_str = output_path + "/" + X.iloc[:, i].name + ".png"
pearson_fig.write_image(image_loc_str)
print("Pearson_Corr of " + X.iloc[:, i].name + " = " + str(covs[i]))
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of housing prices dataset
filename = "../datasets/house_prices.csv"
data = load_data(filename)
# Question 2 - Feature evaluation with respect to response
obs = data.drop(['price'], axis=1)
prices = data['price']
fe_path = "./pearson_correlation_"
feature_evaluation(obs, prices, fe_path)
# Question 3 - Split samples into training- and testing sets.
train_house_data, train_prices, test_house_data, test_prices = split_train_test(obs, prices, 0.75)
# Question 4 - Fit model over increasing percentages of the overall training data
# For every percentage p in 10%, 11%, ..., 100%, repeat the following 10 times:
# 1) Sample p% of the overall training data
# 2) Fit linear model (including intercept) over sampled set
# 3) Test fitted model over test set
# 4) Store average and variance of loss over test set
# Then plot average loss as function of training size with error ribbon of size (mean-2*std, mean+2*std)
lr = LinearRegression()
sample_count = 100 - 10 + 1
sample_means = np.zeros(sample_count)
sample_stds = np.zeros(sample_count)
for p in range(10, sample_count + 10):
mean_loss = np.zeros(10)
for i in range(10):
train_p = pd.DataFrame.sample(train_house_data.merge(train_prices, left_index=True, right_index=True), frac=(p/100))
lr.fit(train_p.drop(['price'], axis=1).to_numpy(), train_p['price'].to_numpy())
mean_loss[i] = lr.loss(test_house_data.to_numpy(), test_prices.to_numpy())
#y_pred = lr.predict(test_house_data.to_numpy())
#mean_loss[i] = mean_square_error(test_prices.to_numpy(), y_pred)
sample_means[p-10] = np.mean(mean_loss)
sample_stds[p-10] = np.std(mean_loss)
q4_fig = go.Figure([go.Scatter
(x=np.arange(10, 101),
y=sample_means,
line=dict(color='rgb(31, 119, 180)')),
go.Scatter(
x=np.arange(10, 101),
y=sample_means+2*sample_stds,
mode='lines',
marker=dict(color="#444"),
line=dict(width=0),
hoverinfo="skip",
showlegend=False
),
go.Scatter(
x=np.arange(10, 101),
y=sample_means-2*sample_stds,
marker=dict(color="#444"),
line=dict(width=0),
mode='lines',
fillcolor='rgba(68, 68, 68, 0.3)',
fill='tonexty',
showlegend=False
)
])
q4_fig.update_layout(
title="Mean Loss as function of Sample Size",
xaxis_title="% Sample Size",
yaxis_title="Mean Loss"
)
q4_fig.show()
| [
"plotly.graph_objects.Scatter",
"IMLearn.learners.regressors.linear_regression.LinearRegression",
"numpy.random.seed",
"pandas.read_csv",
"numpy.std",
"numpy.zeros",
"IMLearn.utils.split_train_test",
"numpy.mean",
"numpy.arange"
] | [((654, 675), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (665, 675), True, 'import pandas as pd\n'), ((3185, 3210), 'numpy.zeros', 'np.zeros', (['(n_features, 1)'], {}), '((n_features, 1))\n', (3193, 3210), True, 'import numpy as np\n'), ((3861, 3878), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3875, 3878), True, 'import numpy as np\n'), ((4372, 4407), 'IMLearn.utils.split_train_test', 'split_train_test', (['obs', 'prices', '(0.75)'], {}), '(obs, prices, 0.75)\n', (4388, 4407), False, 'from IMLearn.utils import split_train_test\n'), ((4918, 4936), 'IMLearn.learners.regressors.linear_regression.LinearRegression', 'LinearRegression', ([], {}), '()\n', (4934, 4936), False, 'from IMLearn.learners.regressors.linear_regression import LinearRegression\n'), ((4988, 5010), 'numpy.zeros', 'np.zeros', (['sample_count'], {}), '(sample_count)\n', (4996, 5010), True, 'import numpy as np\n'), ((5029, 5051), 'numpy.zeros', 'np.zeros', (['sample_count'], {}), '(sample_count)\n', (5037, 5051), True, 'import numpy as np\n'), ((5116, 5128), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (5124, 5128), True, 'import numpy as np\n'), ((5633, 5651), 'numpy.mean', 'np.mean', (['mean_loss'], {}), '(mean_loss)\n', (5640, 5651), True, 'import numpy as np\n'), ((5680, 5697), 'numpy.std', 'np.std', (['mean_loss'], {}), '(mean_loss)\n', (5686, 5697), True, 'import numpy as np\n'), ((3390, 3437), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'X.iloc[:, i]', 'y': 'y', 'mode': '"""markers"""'}), "(x=X.iloc[:, i], y=y, mode='markers')\n", (3400, 3437), True, 'import plotly.graph_objects as go\n'), ((5761, 5779), 'numpy.arange', 'np.arange', (['(10)', '(101)'], {}), '(10, 101)\n', (5770, 5779), True, 'import numpy as np\n'), ((5952, 5970), 'numpy.arange', 'np.arange', (['(10)', '(101)'], {}), '(10, 101)\n', (5961, 5970), True, 'import numpy as np\n'), ((6359, 6377), 'numpy.arange', 'np.arange', (['(10)', '(101)'], {}), '(10, 101)\n', (6368, 6377), True, 'import numpy as np\n')] |
# Copyright (C) 2022, <NAME> AG
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of The Regents or University of California nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Please contact the author of this library if you have any questions.
# Author: <NAME> (<EMAIL>)
import os
import numpy as np
from ast import literal_eval
import cv2
def check_if_pose_is_close(all_poses, position_to_test, distance):
"""
Parameters
----------
all_poses : list
a list of arrays containing camera positions
position_to_test : ndarray
a position to test
distance : float
a minimum distance to decide if the position_to_test is close to any position
in all_poses
Returns
-------
bool
True or False if position_to_test is within close distance to all_poses
"""
for i in range(len(all_poses)):
dist = cv2.norm(all_poses[i] - position_to_test)
if dist < distance:
return True
return False
def get_cam_intrinsics(cam_dict):
"""
Parameters
----------
cam_dict : dict
a dictionary containing all camera intrinsics
Returns
-------
list
relevant list of relevant cam intrinsics
"""
height = cam_dict['Height']
width = cam_dict['Width']
intrinsic0 = literal_eval(cam_dict['IntrinsicMatrix.0'])
intrinsic1 = literal_eval(cam_dict['IntrinsicMatrix.1'])
data = [width, height, intrinsic0[0],
intrinsic1[1], intrinsic0[2], intrinsic1[2]]
if "LensDistortionInverseLookupTable" in cam_dict:
inverse_lut = cam_dict["LensDistortionLookupTable"]
dist_reference_dims = literal_eval(
cam_dict["IntrinsicMatrixReferenceDimensions"])
distortion_center = literal_eval(cam_dict["LensDistortionCenter"])
# scale distortion center to intrinsics
scaler = width / dist_reference_dims[0]
distortion_center = np.array(distortion_center) * scaler
return data, inverse_lut, distortion_center
return data, None, None
def bilinear_interpolation_01(x, y, values):
"""Interpolate values given at the corners of
[0,1]x[0,1] square.
Parameters:
x : float
y : float
points : ((v00, v01), (v10, v11))
input grid with 4 values from which to interpolate.
Inner dimension = x, thus v01 = value at (x=1,y=0).
Returns:
float
interpolated value
"""
return (values[0][0] * (1 - x) * (1 - y) +
values[0][1] * x * (1 - y) +
values[1][0] * (1 - x) * y +
values[1][1] * x * y)
def linear_interpolation_01(x, values):
"""Interpolate values given at 0 and 1.
Parameters:
x : float
y : float
points : (v0, v1)
values at 0 and 1
Returns:
float
interpolated value
"""
return values[0] * (1 - x) + values[1] * x
def interpolate_depth_value(point2d, depth_image):
""" Bilinear interpolate a depth value
Parameters
----------
x : ndarray
input 2d point
depth_image : ndarray
input depth image
Returns
-------
float
interpolated depth
"""
x = point2d[1]
y = point2d[0]
xl = int(np.floor(x))
xr = int(np.ceil(x))
yl = int(np.floor(y))
yr = int(np.ceil(y))
if xl == xr:
if yl == yr:
return depth_image[xl, yl]
else:
values = [depth_image[xl, yl],
depth_image[xr, yr]]
return linear_interpolation_01(y - yl, values)
else:
if yl == yr:
values = [depth_image[xl, yl],
depth_image[xr, yr]]
return linear_interpolation_01(x - xl, values)
else:
values = ((depth_image[xl, yl], depth_image[xr, yl]),
(depth_image[xl, yr], depth_image[xr, yr]))
return bilinear_interpolation_01(x - xl, y - yl, values)
def unproject_pt_to_3d(point2d, depth_image, inv_cam_mat, bi_interp=True):
""" Unproject a 2D point to 3D using a depth image and camera intrinsics
Parameters
----------
point2d : ndarray
2D point to unproject to 3D
depth_image : ndarray
depth image used to unproject the 2D point
inv_cam_mat : ndarray
inverse of the camera matrix. Used to unproject point2d to a vector
bi_interp : bool
if point should be interpolated instead of taking the nearest neighbor
Returns
-------
numpy array
a 3D point corresponding to point2D and the depth
"""
if bi_interp:
depth = interpolate_depth_value(point2d, depth_image)
else:
x = int(np.round(point2d[1]))
y = int(np.round(point2d[0]))
if (x < depth_image.shape[0] and
y < depth_image.shape[1] and
x >= 0 and y >= 0):
depth = depth_image[x, y]
else:
depth = 0.0
impage_pt = depth * np.array([point2d[0], point2d[1], 1.0])
return np.matmul(inv_cam_mat, impage_pt), depth
def get_square_length(file):
"""
Parameters
----------
file : str
path to checkersize.txt. Contains square length in centimeters [cm].
Returns
-------
float
square length in meters [m].
"""
with open(file) as f:
return float(f.read()) * 1e-2
def create_aruco_board(dataset_path):
"""
Parameters
----------
dataset_path : str
path to dataset also containing the checkersize.txt.
Returns
-------
tuple
termination criteria for supixel estimation
cv2.aruco_CharucoBoard
aruco board object
cv2.aruco_DetectorParameters()
aruco detector params
cv2.aruco_Dictionary()
aruco dictionary
float
square length in meters
"""
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
aruco_dict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_ARUCO_ORIGINAL)
aruco_params = cv2.aruco.DetectorParameters_create()
square_length = get_square_length(
os.path.join(dataset_path, "checkersize.txt"))
board = cv2.aruco.CharucoBoard_create(
10, 8, square_length, square_length / 2.0, aruco_dict)
return criteria, board, aruco_params, aruco_dict, square_length
def detect_corners(image, aruco_board, criteria, aruco_dict, aruco_params, cam_matrix=None):
"""
Parameters
----------
image : ndarray
gray value image to detect corners on
aruco_board : cv2.aruco_CharucoBoard
aruco board object
criteria : tuple
subpixel termination criteria
aruco_dict : cv2.aruco_Dictionary()
aruco dictionary
aruco_params : cv2.aruco_DetectorParameters()
detector params
cam_matrix : ndarray
camera matrix 3x3
Returns
-------
int
number of detected corners
list
list of charuco_corners
list
list of charuco ids
"""
corners, ids, _ = cv2.aruco.detectMarkers(
image, dictionary=aruco_dict, parameters=aruco_params)
if len(corners) > 0:
# SUB PIXEL DETECTION
for corner in corners:
cv2.cornerSubPix(image, corner, winSize=(
3, 3), zeroZone=(-1, -1), criteria=criteria)
nr_pts, charuco_corners, charuco_ids = cv2.aruco.interpolateCornersCharuco(
corners, ids, image, aruco_board, cameraMatrix=cam_matrix, distCoeffs=None, minMarkers=1)
return nr_pts, charuco_corners, charuco_ids
else:
return None, None, None
| [
"cv2.aruco.CharucoBoard_create",
"cv2.aruco.DetectorParameters_create",
"numpy.ceil",
"numpy.floor",
"cv2.cornerSubPix",
"cv2.aruco.Dictionary_get",
"cv2.aruco.detectMarkers",
"numpy.array",
"cv2.aruco.interpolateCornersCharuco",
"numpy.matmul",
"ast.literal_eval",
"numpy.round",
"cv2.norm",... | [((2774, 2817), 'ast.literal_eval', 'literal_eval', (["cam_dict['IntrinsicMatrix.0']"], {}), "(cam_dict['IntrinsicMatrix.0'])\n", (2786, 2817), False, 'from ast import literal_eval\n'), ((2836, 2879), 'ast.literal_eval', 'literal_eval', (["cam_dict['IntrinsicMatrix.1']"], {}), "(cam_dict['IntrinsicMatrix.1'])\n", (2848, 2879), False, 'from ast import literal_eval\n'), ((7587, 7642), 'cv2.aruco.Dictionary_get', 'cv2.aruco.Dictionary_get', (['cv2.aruco.DICT_ARUCO_ORIGINAL'], {}), '(cv2.aruco.DICT_ARUCO_ORIGINAL)\n', (7611, 7642), False, 'import cv2\n'), ((7663, 7700), 'cv2.aruco.DetectorParameters_create', 'cv2.aruco.DetectorParameters_create', ([], {}), '()\n', (7698, 7700), False, 'import cv2\n'), ((7810, 7898), 'cv2.aruco.CharucoBoard_create', 'cv2.aruco.CharucoBoard_create', (['(10)', '(8)', 'square_length', '(square_length / 2.0)', 'aruco_dict'], {}), '(10, 8, square_length, square_length / 2.0,\n aruco_dict)\n', (7839, 7898), False, 'import cv2\n'), ((8695, 8773), 'cv2.aruco.detectMarkers', 'cv2.aruco.detectMarkers', (['image'], {'dictionary': 'aruco_dict', 'parameters': 'aruco_params'}), '(image, dictionary=aruco_dict, parameters=aruco_params)\n', (8718, 8773), False, 'import cv2\n'), ((2327, 2368), 'cv2.norm', 'cv2.norm', (['(all_poses[i] - position_to_test)'], {}), '(all_poses[i] - position_to_test)\n', (2335, 2368), False, 'import cv2\n'), ((3133, 3193), 'ast.literal_eval', 'literal_eval', (["cam_dict['IntrinsicMatrixReferenceDimensions']"], {}), "(cam_dict['IntrinsicMatrixReferenceDimensions'])\n", (3145, 3193), False, 'from ast import literal_eval\n'), ((3237, 3283), 'ast.literal_eval', 'literal_eval', (["cam_dict['LensDistortionCenter']"], {}), "(cam_dict['LensDistortionCenter'])\n", (3249, 3283), False, 'from ast import literal_eval\n'), ((4811, 4822), 'numpy.floor', 'np.floor', (['x'], {}), '(x)\n', (4819, 4822), True, 'import numpy as np\n'), ((4838, 4848), 'numpy.ceil', 'np.ceil', (['x'], {}), '(x)\n', (4845, 4848), True, 'import numpy as np\n'), ((4864, 4875), 'numpy.floor', 'np.floor', (['y'], {}), '(y)\n', (4872, 4875), True, 'import numpy as np\n'), ((4891, 4901), 'numpy.ceil', 'np.ceil', (['y'], {}), '(y)\n', (4898, 4901), True, 'import numpy as np\n'), ((6586, 6625), 'numpy.array', 'np.array', (['[point2d[0], point2d[1], 1.0]'], {}), '([point2d[0], point2d[1], 1.0])\n', (6594, 6625), True, 'import numpy as np\n'), ((6638, 6671), 'numpy.matmul', 'np.matmul', (['inv_cam_mat', 'impage_pt'], {}), '(inv_cam_mat, impage_pt)\n', (6647, 6671), True, 'import numpy as np\n'), ((7750, 7795), 'os.path.join', 'os.path.join', (['dataset_path', '"""checkersize.txt"""'], {}), "(dataset_path, 'checkersize.txt')\n", (7762, 7795), False, 'import os\n'), ((9040, 9169), 'cv2.aruco.interpolateCornersCharuco', 'cv2.aruco.interpolateCornersCharuco', (['corners', 'ids', 'image', 'aruco_board'], {'cameraMatrix': 'cam_matrix', 'distCoeffs': 'None', 'minMarkers': '(1)'}), '(corners, ids, image, aruco_board,\n cameraMatrix=cam_matrix, distCoeffs=None, minMarkers=1)\n', (9075, 9169), False, 'import cv2\n'), ((3411, 3438), 'numpy.array', 'np.array', (['distortion_center'], {}), '(distortion_center)\n', (3419, 3438), True, 'import numpy as np\n'), ((6304, 6324), 'numpy.round', 'np.round', (['point2d[1]'], {}), '(point2d[1])\n', (6312, 6324), True, 'import numpy as np\n'), ((6343, 6363), 'numpy.round', 'np.round', (['point2d[0]'], {}), '(point2d[0])\n', (6351, 6363), True, 'import numpy as np\n'), ((8888, 8978), 'cv2.cornerSubPix', 'cv2.cornerSubPix', (['image', 'corner'], {'winSize': '(3, 3)', 'zeroZone': '(-1, -1)', 'criteria': 'criteria'}), '(image, corner, winSize=(3, 3), zeroZone=(-1, -1), criteria\n =criteria)\n', (8904, 8978), False, 'import cv2\n')] |
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import math
import signal
import sys
from collections import OrderedDict
import h5py
import numpy as np
from six import string_types
from brainstorm.describable import Describable
from brainstorm import optional
from brainstorm.structure.network import Network
from brainstorm.tools import evaluate
from brainstorm.utils import get_by_path, progress_bar, get_brainstorm_info
class Hook(Describable):
__undescribed__ = {
'__name__', # the name is saved in the trainer
'run_verbosity'
}
__default_values__ = {
'timescale': 'epoch',
'interval': 1,
'verbose': None
}
def __init__(self, name=None, timescale='epoch', interval=1, verbose=None):
self.timescale = timescale
self.interval = interval
self.__name__ = name or self.__class__.__name__
self.priority = 0
self.verbose = verbose
self.run_verbosity = None
def start(self, net, stepper, verbose, named_data_iters):
if self.verbose is None:
self.run_verbosity = verbose
else:
self.run_verbosity = self.verbose
def message(self, msg):
"""Print an output message if :attr:`run_verbosity` is True."""
if self.run_verbosity:
print("{} >> {}".format(self.__name__, msg))
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
pass
# -------------------------------- Saviors ---------------------------------- #
class SaveBestNetwork(Hook):
"""
Check to see if the specified log entry is at it's best value and if so,
save the network to a specified file.
Can save the network when the log entry is at its minimum (such as an
error) or maximum (such as accuracy) according to the ``criterion``
argument.
The ``timescale`` and ``interval`` should be the same as those for the
monitoring hook which logs the quantity of interest.
Args:
log_name:
Name of the log entry to be checked for improvement.
It should be in the form <monitorname>.<log_name> where log_name
itself may be a nested dictionary key in dotted notation.
filename:
Name of the HDF5 file to which the network should be saved.
criterion:
Indicates whether training should be stopped when the log entry is
at its minimum or maximum value. Must be either 'min' or 'max'.
Defaults to 'min'.
name (Optional[str]):
Name of this monitor. This name is used as a key in the trainer
logs. Default is 'SaveBestNetwork'.
timescale (Optional[str]):
Specifies whether the Monitor should be called after each epoch or
after each update. Default is 'epoch'.
interval (Optional[int]):
This monitor should be called every ``interval`` epochs/updates.
Default is 1.
verbose: bool, optional
Specifies whether the logs of this monitor should be printed, and
acts as a fallback verbosity for the used data iterator.
If not set it defaults to the verbosity setting of the trainer.
Examples:
Add a hook to monitor a quantity of interest:
>>> scorer = bs.scorers.Accuracy()
>>> trainer.add_hook(bs.hooks.MonitorScores('valid_getter', [scorer],
... name='validation'))
Check every epoch and save the network if validation accuracy rises:
>>> trainer.add_hook(bs.hooks.SaveBestNetwork('validation.Accuracy',
... filename='best_acc.h5',
... criterion='max'))
Check every epoch and save the network if validation loss drops:
>>> trainer.add_hook(bs.hooks.SaveBestNetwork('validation.total_loss',
... filename='best_loss.h5',
... criterion='min'))
"""
__undescribed__ = {'parameters': None}
__default_values__ = {'filename': None}
def __init__(self, log_name, filename=None, criterion='max', name=None,
timescale='epoch', interval=1, verbose=None):
super(SaveBestNetwork, self).__init__(name, timescale,
interval, verbose)
self.log_name = log_name
self.filename = filename
self.best_parameters = None
assert criterion == 'min' or criterion == 'max'
self.best_so_far = np.inf if criterion == 'min' else -np.inf
self.best_t = None
self.criterion = criterion
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
if epoch_nr == 0:
try:
e = get_by_path(logs, self.log_name)
except KeyError:
return
e = get_by_path(logs, self.log_name)
last = e[-1]
if self.criterion == 'min':
imp = last < self.best_so_far
else:
imp = last > self.best_so_far
if imp:
self.best_so_far = last
self.best_t = epoch_nr if self.timescale == 'epoch' else update_nr
params = net.get('parameters')
if self.filename is not None:
self.message("{} improved (criterion: {}). Saving network to "
"{}".format(self.log_name, self.criterion,
self.filename))
net.save_as_hdf5(self.filename)
else:
self.message("{} improved (criterion: {}). Caching parameters".
format(self.log_name, self.criterion))
self.parameters = params
else:
self.message("Last saved parameters at {} {} when {} was {}".
format(self.timescale, self.best_t, self.log_name,
self.best_so_far))
def load_best_network(self):
return Network.from_hdf5(self.filename) if self.filename is not None \
else self.parameters
class SaveLogs(Hook):
"""
Periodically Save the trainer logs dictionary to an HDF5 file.
Default behavior is to save every epoch.
"""
def __init__(self, filename, name=None, timescale='epoch', interval=1):
super(SaveLogs, self).__init__(name, timescale, interval)
self.filename = filename
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
with h5py.File(self.filename, 'w') as f:
f.attrs.create('info', get_brainstorm_info())
f.attrs.create('format', b'Logs file v1.0')
SaveLogs._save_recursively(f, logs)
@staticmethod
def _save_recursively(group, logs):
for name, log in logs.items():
if isinstance(log, dict):
subgroup = group.create_group(name)
SaveLogs._save_recursively(subgroup, log)
else:
group.create_dataset(name, data=np.array(log))
class SaveNetwork(Hook):
"""
Periodically save the weights of the network to the given file.
Default behavior is to save the network after every training epoch.
"""
def __init__(self, filename, name=None, timescale='epoch', interval=1):
super(SaveNetwork, self).__init__(name, timescale, interval)
self.filename = filename
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
net.save_as_hdf5(self.filename)
def load_network(self):
return Network.from_hdf5(self.filename)
# -------------------------------- Monitors --------------------------------- #
class MonitorLayerDeltas(Hook):
"""
Monitor some statistics about all the deltas of a layer.
"""
def __init__(self, layer_name, name=None, timescale='epoch', interval=1,
verbose=None):
if name is None:
name = "MonitorDeltas_{}".format(layer_name)
super(MonitorLayerDeltas, self).__init__(name, timescale,
interval, verbose)
self.layer_name = layer_name
def start(self, net, stepper, verbose, named_data_iters):
assert self.layer_name in net.layers.keys(), \
"{} >> No layer named {} present in network. Available layers " \
"are {}.".format(self.__name__, self.layer_name, net.layers.keys())
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
log = OrderedDict()
for key, v in net.buffer[self.layer_name].internals.items():
v = net.handler.get_numpy_copy(v)
log[key] = OrderedDict()
log[key]['min'] = v.min()
log[key]['avg'] = v.mean()
log[key]['max'] = v.max()
out_deltas_log = log['output_deltas'] = OrderedDict()
for key, v in net.buffer[self.layer_name].output_deltas.items():
v = net.handler.get_numpy_copy(v)
key_log = out_deltas_log[key] = OrderedDict()
key_log['min'] = v.min()
key_log['avg'] = v.mean()
key_log['max'] = v.max()
in_deltas_log = log['input_deltas'] = OrderedDict()
for key, v in net.buffer[self.layer_name].input_deltas.items():
key_log = in_deltas_log[key] = OrderedDict()
v = net.handler.get_numpy_copy(v)
key_log[key]['min'] = v.min()
key_log[key]['avg'] = v.mean()
key_log[key]['max'] = v.max()
return log
class MonitorLayerGradients(Hook):
"""
Monitor some statistics about all the gradients of a layer.
"""
def __init__(self, layer_name, name=None, timescale='epoch', interval=1,
verbose=None):
if name is None:
name = "MonitorGradients_{}".format(layer_name)
super(MonitorLayerGradients, self).__init__(name, timescale,
interval, verbose)
self.layer_name = layer_name
def start(self, net, stepper, verbose, named_data_iters):
assert self.layer_name in net.layers.keys(), \
"{} >> No layer named {} present in network. Available layers " \
"are {}.".format(self.__name__, self.layer_name, net.layers.keys())
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
log = OrderedDict()
for key, v in net.buffer[self.layer_name].gradients.items():
v = net.handler.get_numpy_copy(v)
log[key] = OrderedDict()
log[key]['min'] = v.min()
log[key]['avg'] = v.mean()
log[key]['max'] = v.max()
return log
class MonitorLayerInOuts(Hook):
"""
Monitor some statistics about all the inputs and outputs of a layer.
"""
def __init__(self, layer_name, name=None, timescale='epoch', interval=1,
verbose=None):
if name is None:
name = "MonitorInOuts_{}".format(layer_name)
super(MonitorLayerInOuts, self).__init__(name, timescale,
interval, verbose)
self.layer_name = layer_name
def start(self, net, stepper, verbose, named_data_iters):
assert self.layer_name in net.layers.keys(), \
"{} >> No layer named {} present in network. Available layers " \
"are {}.".format(self.__name__, self.layer_name, net.layers.keys())
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
log = OrderedDict()
input_log = log['inputs'] = OrderedDict()
for key, v in net.buffer[self.layer_name].inputs.items():
v = net.handler.get_numpy_copy(v)
key_log = input_log[key] = OrderedDict()
key_log['min'] = v.min()
key_log['avg'] = v.mean()
key_log['max'] = v.max()
output_log = log['outputs'] = OrderedDict()
for key, v in net.buffer[self.layer_name].outputs.items():
key_log = output_log[key] = OrderedDict()
v = net.handler.get_numpy_copy(v)
key_log['min'] = v.min()
key_log['avg'] = v.mean()
key_log['max'] = v.max()
return log
class MonitorLayerParameters(Hook):
"""
Monitor some statistics about all the parameters of a layer.
"""
def __init__(self, layer_name, name=None, timescale='epoch', interval=1,
verbose=None):
if name is None:
name = "MonitorParameters_{}".format(layer_name)
super(MonitorLayerParameters, self).__init__(name, timescale,
interval, verbose)
self.layer_name = layer_name
def start(self, net, stepper, verbose, named_data_iters):
assert self.layer_name in net.layers.keys(), \
"{} >> No layer named {} present in network. Available layers " \
"are {}.".format(self.__name__, self.layer_name, net.layers.keys())
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
log = OrderedDict()
for key, v in net.buffer[self.layer_name].parameters.items():
v = net.handler.get_numpy_copy(v)
log[key] = OrderedDict()
log[key]['min'] = v.min()
log[key]['avg'] = v.mean()
log[key]['max'] = v.max()
if len(v.shape) > 1:
log[key]['min_L2_norm'] = np.sqrt(np.sum(v ** 2, axis=1)).min()
log[key]['avg_L2_norm'] = np.sqrt(np.sum(v ** 2,
axis=1)).mean()
log[key]['max_L2_norm'] = np.sqrt(np.sum(v ** 2, axis=1)).max()
return log
class MonitorLoss(Hook):
"""
Monitor the losses computed by the network on a dataset using a given data
iterator.
"""
def __init__(self, iter_name, name=None, timescale='epoch', interval=1,
verbose=None):
super(MonitorLoss, self).__init__(name, timescale, interval, verbose)
self.iter_name = iter_name
self.iter = None
def start(self, net, stepper, verbose, named_data_iters):
super(MonitorLoss, self).start(net, stepper, verbose, named_data_iters)
if self.iter_name not in named_data_iters:
raise KeyError("{} >> {} is not present in named_data_iters. "
"Remember to pass it as a kwarg to Trainer.train()"
.format(self.__name__, self.iter_name))
self.iter = named_data_iters[self.iter_name]
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
return evaluate(net, self.iter, scorers=())
class MonitorScores(Hook):
"""
Monitor the losses and optionally several scores using a given data
iterator.
Args:
iter_name (str):
name of the data iterator to use (as specified in the train() call)
scorers (List[brainstorm.scorers.Scorer]):
List of Scorers to evaluate.
name (Optional[str]):
Name of this monitor. This name is used as a key in the trainer
logs. Default is 'MonitorScores'
timescale (Optional[str]):
Specifies whether the Monitor should be called after each epoch or
after each update. Default is 'epoch'.
interval (Optional[int]):
This monitor should be called every ``interval`` epochs/updates.
Default is 1.
verbose: bool, optional
Specifies whether the logs of this monitor should be printed, and
acts as a fallback verbosity for the used data iterator.
If not set it defaults to the verbosity setting of the trainer.
See Also:
MonitorLoss: monitor the overall loss of the network.
"""
def __init__(self, iter_name, scorers, name=None, timescale='epoch',
interval=1, verbose=None):
super(MonitorScores, self).__init__(name, timescale, interval, verbose)
self.iter_name = iter_name
self.iter = None
self.scorers = scorers
def start(self, net, stepper, verbose, named_data_iters):
super(MonitorScores, self).start(net, stepper, verbose,
named_data_iters)
if self.iter_name not in named_data_iters:
raise KeyError("{} >> {} is not present in named_data_iters. "
"Remember to pass it as a kwarg to Trainer.train()"
.format(self.__name__, self.iter_name))
self.iter = named_data_iters[self.iter_name]
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
return evaluate(net, self.iter, self.scorers)
# -------------------------------- Stoppers --------------------------------- #
class EarlyStopper(Hook):
"""
Stop the training if a log entry does not improve for some time.
Can stop training when the log entry is at its minimum (such as an error)
or maximum (such as accuracy) according to the ``criterion`` argument.
The ``timescale`` and ``interval`` should be the same as those for the
monitoring hook which logs the quantity of interest.
Args:
log_name:
Name of the log entry to be checked for improvement.
It should be in the form <monitorname>.<log_name> where log_name
itself may be a nested dictionary key in dotted notation.
patience:
Number of log updates to wait before stopping training.
Default is 1.
criterion:
Indicates whether training should be stopped when the log entry is
at its minimum or maximum value. Must be either 'min' or 'max'.
Defaults to 'min'.
name (Optional[str]):
Name of this monitor. This name is used as a key in the trainer
logs. Default is 'EarlyStopper'.
timescale (Optional[str]):
Specifies whether the Monitor should be called after each epoch or
after each update. Default is 'epoch'.
interval (Optional[int]):
This monitor should be called every ``interval`` epochs/updates.
Default is 1.
verbose: bool, optional
Specifies whether the logs of this monitor should be printed, and
acts as a fallback verbosity for the used data iterator.
If not set it defaults to the verbosity setting of the trainer.
Examples:
Add a hook to monitor a quantity of interest:
>>> scorer = bs.scorers.Accuracy()
>>> trainer.add_hook(bs.hooks.MonitorScores('valid_getter', [scorer],
... name='validation'))
Stop training if validation set accuracy does not rise for 10 epochs:
>>> trainer.add_hook(bs.hooks.EarlyStopper('validation.Accuracy',
... patience=10,
... criterion='max'))
Stop training if loss on validation set does not drop for 5 epochs:
>>> trainer.add_hook(bs.hooks.EarlyStopper('validation.total_loss',
... patience=5,
... criterion='min'))
"""
__default_values__ = {'patience': 1}
def __init__(self, log_name, patience=1, criterion='min',
name=None, timescale='epoch', interval=1, verbose=None):
super(EarlyStopper, self).__init__(name, timescale, interval, verbose)
self.log_name = log_name
self.patience = patience
if criterion not in ['min', 'max']:
raise ValueError("Unknown criterion: '{}'"
"(Should be 'min' or 'max')".format(criterion))
self.criterion = criterion
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
if epoch_nr == 0:
try:
e = get_by_path(logs, self.log_name)
except KeyError:
return
e = get_by_path(logs, self.log_name)
best_idx = np.argmin(e) if self.criterion == 'min' else np.argmax(e)
if len(e) > best_idx + self.patience:
self.message("Stopping because {} did not improve for {} checks "
"(criterion used : {}).".format(self.log_name,
self.patience,
self.criterion))
raise StopIteration()
class StopAfterEpoch(Hook):
"""
Stop the training after a specified number of epochs.
Args:
max_epochs (int):
The number of epochs to train.
name (Optional[str]):
Name of this monitor. This name is used as a key in the trainer
logs. Default is 'StopAfterEpoch'.
timescale (Optional[str]):
Specifies whether the Monitor should be called after each epoch or
after each update. Default is 'epoch'.
interval (Optional[int]):
This monitor should be called every ``interval`` epochs/updates.
Default is 1.
verbose: bool, optional
Specifies whether the logs of this monitor should be printed, and
acts as a fallback verbosity for the used data iterator.
If not set it defaults to the verbosity setting of the trainer.
"""
def __init__(self, max_epochs, name=None, timescale='epoch', interval=1,
verbose=None):
super(StopAfterEpoch, self).__init__(name, timescale,
interval, verbose)
self.max_epochs = max_epochs
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
if epoch_nr >= self.max_epochs:
self.message("Stopping because the maximum number of epochs ({}) "
"was reached.".format(self.max_epochs))
raise StopIteration()
class StopAfterThresholdReached(Hook):
"""
Stop the training if a log entry reaches the given threshold
Can stop training when the log entry becomes sufficiently small (such as an
error) or sufficiently large (such as accuracy) according to the threshold.
Args:
log_name:
Name of the log entry to be checked for improvement.
It should be in the form <monitorname>.<log_name> where log_name
itself may be a nested dictionary key in dotted notation.
threshold:
The threshold value to reach
criterion:
Indicates whether training should be stopped when the log entry is
at its minimum or maximum value. Must be either 'min' or 'max'.
Defaults to 'min'.
name (Optional[str]):
Name of this monitor. This name is used as a key in the trainer
logs. Default is 'StopAfterThresholdReached'.
timescale (Optional[str]):
Specifies whether the Monitor should be called after each epoch or
after each update. Default is 'epoch'.
interval (Optional[int]):
This monitor should be called every ``interval`` epochs/updates.
Default is 1.
verbose: bool, optional
Specifies whether the logs of this monitor should be printed, and
acts as a fallback verbosity for the used data iterator.
If not set it defaults to the verbosity setting of the trainer.
Examples:
Stop training if validation set accuracy is at least 97 %:
>>> trainer.add_hook(StopAfterThresholdReached('validation.Accuracy',
... threshold=0.97,
... criterion='max'))
Stop training if loss on validation set goes below 0.2:
>>> trainer.add_hook(StopAfterThresholdReached('validation.total_loss',
... threshold=0.2,
... criterion='min'))
"""
def __init__(self, log_name, threshold, criterion='min',
name=None, timescale='epoch', interval=1, verbose=None):
super(StopAfterThresholdReached, self).__init__(name, timescale,
interval, verbose)
self.log_name = log_name
self.threshold = threshold
if criterion not in ['min', 'max']:
raise ValueError("Unknown criterion: '{}'"
"(Must be 'min' or 'max')".format(criterion))
self.criterion = criterion
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
e = get_by_path(logs, self.log_name)
is_threshold_reached = False
if self.criterion == 'max' and max(e) >= self.threshold:
is_threshold_reached = True
elif self.criterion == 'min' and min(e) <= self.threshold:
is_threshold_reached = True
if is_threshold_reached:
self.message("Stopping because {} has reached the threshold {} "
"(criterion used : {})".format(
self.log_name, self.threshold, self.criterion))
raise StopIteration()
class StopOnNan(Hook):
"""
Stop the training if infinite or NaN values are found in parameters.
This hook can also check a list of logs for invalid values.
Args:
logs_to_check (Optional[list, tuple]):
A list of trainer logs to check in dotted notation. Defaults to ().
check_parameters (Optional[bool]):
Indicates whether the parameters should be checked for NaN.
Defaults to True.
name (Optional[str]):
Name of this monitor. This name is used as a key in the trainer
logs. Default is 'StopOnNan'.
timescale (Optional[str]):
Specifies whether the Monitor should be called after each epoch or
after each update. Default is 'epoch'.
interval (Optional[int]):
This monitor should be called every ``interval`` epochs/updates.
Default is 1.
verbose: bool, optional
Specifies whether the logs of this monitor should be printed, and
acts as a fallback verbosity for the used data iterator.
If not set it defaults to the verbosity setting of the trainer.
"""
def __init__(self, logs_to_check=(), check_parameters=True,
check_training_loss=True, name=None, timescale='epoch',
interval=1, verbose=None):
super(StopOnNan, self).__init__(name, timescale, interval, verbose)
self.logs_to_check = ([logs_to_check] if isinstance(logs_to_check,
string_types)
else logs_to_check)
self.check_parameters = check_parameters
self.check_training_loss = check_training_loss
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
for log_name in self.logs_to_check:
log = get_by_path(logs, log_name)
if not np.all(np.isfinite(log)):
self.message("NaN or inf detected in {}!".format(log_name))
raise StopIteration()
if self.check_parameters:
if not net.handler.is_fully_finite(net.buffer.parameters):
self.message("NaN or inf detected in parameters!")
raise StopIteration()
if self.check_training_loss and 'rolling_training' in logs:
rtrain = logs['rolling_training']
if 'total_loss' in rtrain:
loss = rtrain['total_loss']
else:
loss = rtrain['Loss']
if not np.all(np.isfinite(loss)):
self.message("NaN or inf detected in rolling training loss!")
raise StopIteration()
class StopOnSigQuit(Hook):
"""
Stop training after the next call if it received a SIGQUIT (Ctrl + \).
This hook makes it possible to exit the training loop and continue with
the rest of the program execution.
Args:
name (Optional[str]):
Name of this monitor. This name is used as a key in the trainer
logs. Default is 'StopOnSigQuit'.
timescale (Optional[str]):
Specifies whether the Monitor should be called after each epoch or
after each update. Default is 'epoch'.
interval (Optional[int]):
This monitor should be called every ``interval`` epochs/updates.
Default is 1.
verbose: bool, optional
Specifies whether the logs of this monitor should be printed, and
acts as a fallback verbosity for the used data iterator.
If not set it defaults to the verbosity setting of the trainer.
"""
__undescribed__ = {'quit': False}
def __init__(self, name=None, timescale='epoch', interval=1, verbose=None):
super(StopOnSigQuit, self).__init__(name, timescale, interval,
verbose=verbose)
self.quit = False
def start(self, net, stepper, verbose, named_data_iters):
super(StopOnSigQuit, self).start(net, stepper, verbose,
named_data_iters)
self.quit = False
signal.signal(signal.SIGQUIT, self.receive_signal)
def receive_signal(self, signum, stack):
self.message('Interrupting')
self.quit = True
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
if self.quit:
raise StopIteration('Received SIGQUIT signal.')
# ------------------------------ Visualizers -------------------------------- #
if not optional.has_bokeh:
BokehVisualizer = optional.bokeh_mock
else:
import bokeh.plotting as bk
import warnings
class BokehVisualizer(Hook):
"""
Visualizes log values in your browser during training time using the
Bokeh plotting library.
Before running the trainer the user is required to have the Bokeh
Server running.
By default the visualization is discarded upon closing the webbrowser.
However if an output file is specified then the .html file will be
saved after each iteration at the specified location.
Args:
log_names (list, array):
Contains the name of the logs that are being recorded to be
visualized. log_names should be of the form
<monitorname>.<log_name> where log_name itself may be a nested
dictionary key in dotted notation.
filename (Optional, str):
The location to which the .html file containing the accuracy
plot should be saved.
timescale (Optional[str]):
Specifies whether the Monitor should be called after each
epoch or after each update. Default is 'epoch'
interval (Optional[int]):
This monitor should be called every ``interval``
number of epochs/updates. Default is 1.
name (Optional[str]):
Name of this monitor. This name is used as a key in the trainer
logs. Default is 'MonitorScores'
verbose: bool, optional
Specifies whether the logs of this monitor should be printed,
and acts as a fallback verbosity for the used data iterator.
If not set it defaults to the verbosity setting of the trainer.
"""
def __init__(self, log_names, filename=None, timescale='epoch',
interval=1, name=None, verbose=None):
super(BokehVisualizer, self).__init__(name, timescale, interval,
verbose)
if isinstance(log_names, string_types):
self.log_names = [log_names]
elif isinstance(log_names, (tuple, list)):
self.log_names = log_names
else:
raise ValueError('log_names must be either str or list but'
' was {}'.format(type(log_names)))
self.filename = filename
self.bk = bk
self.TOOLS = "resize,crosshair,pan,wheel_zoom,box_zoom,reset,save"
self.colors = ['blue', 'green', 'red', 'olive', 'cyan', 'aqua',
'gray']
warnings.filterwarnings('error')
try:
self.bk.output_server(self.__name__)
warnings.resetwarnings()
except Warning:
raise StopIteration('Bokeh server is not running')
self.fig = self.bk.figure(
title=self.__name__, x_axis_label=self.timescale,
y_axis_label='value', tools=self.TOOLS,
plot_width=1000, x_range=(0, 25), y_range=(0, 1))
def start(self, net, stepper, verbose, named_data_iters):
count = 0
# create empty line objects
for log_name in self.log_names:
self.fig.line([], [], legend=log_name, line_width=2,
color=self.colors[count % len(self.colors)],
name=log_name)
count += 1
self.bk.show(self.fig)
self.bk.output_file('bokeh_visualisation.html',
title=self.__name__, mode='cdn')
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
if epoch_nr == 0:
return
for log_name in self.log_names:
renderer = self.fig.select(dict(name=log_name))
datasource = renderer[0].data_source
datasource.data["y"] = get_by_path(logs, log_name)
datasource.data["x"] = range(len(datasource.data["y"]))
self.bk.cursession().store_objects(datasource)
if self.filename is not None:
self.bk.save(self.fig, filename=self.filename + ".html")
class ProgressBar(Hook):
""" Adds a progress bar to show the training progress. """
def __init__(self):
super(ProgressBar, self).__init__(None, 'update', 1)
self.length = None
self.bar = None
def start(self, net, stepper, verbose, named_data_iters):
assert 'training_data_iter' in named_data_iters
self.length = named_data_iters['training_data_iter'].length
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
assert epoch_nr == 0 or math.ceil(update_nr / self.length) == epoch_nr
if update_nr % self.length == 1:
self.bar = progress_bar(self.length)
print(next(self.bar), end='')
sys.stdout.flush()
elif update_nr % self.length == 0:
if self.bar:
print(self.bar.send(self.length))
else:
print(self.bar.send(update_nr % self.length), end='')
sys.stdout.flush()
# ----------------------------- Miscellaneous ------------------------------- #
class InfoUpdater(Hook):
""" Save the information from logs to the Sacred custom info dict"""
def __init__(self, run, name=None, timescale='epoch', interval=1):
super(InfoUpdater, self).__init__(name, timescale, interval)
self.run = run
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
info = self.run.info
info['epoch_nr'] = epoch_nr
info['update_nr'] = update_nr
info['logs'] = logs
if 'nr_parameters' not in info:
info['nr_parameters'] = net.buffer.parameters.size
class ModifyStepperAttribute(Hook):
"""Modify an attribute of the training stepper."""
def __init__(self, schedule, attr_name='learning_rate',
timescale='epoch', interval=1, name=None, verbose=None):
super(ModifyStepperAttribute, self).__init__(name, timescale,
interval, verbose)
self.schedule = schedule
self.attr_name = attr_name
def start(self, net, stepper, verbose, monitor_kwargs):
super(ModifyStepperAttribute, self).start(net, stepper, verbose,
monitor_kwargs)
assert hasattr(stepper, self.attr_name), \
"The stepper {} does not have the attribute {}".format(
stepper.__class__.__name__, self.attr_name)
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
setattr(stepper, self.attr_name,
self.schedule(epoch_nr, update_nr, self.timescale,
self.interval, net, stepper, logs))
| [
"h5py.File",
"brainstorm.structure.network.Network.from_hdf5",
"numpy.sum",
"numpy.argmax",
"warnings.filterwarnings",
"brainstorm.utils.get_brainstorm_info",
"warnings.resetwarnings",
"math.ceil",
"brainstorm.tools.evaluate",
"numpy.argmin",
"numpy.isfinite",
"sys.stdout.flush",
"numpy.arra... | [((5021, 5053), 'brainstorm.utils.get_by_path', 'get_by_path', (['logs', 'self.log_name'], {}), '(logs, self.log_name)\n', (5032, 5053), False, 'from brainstorm.utils import get_by_path, progress_bar, get_brainstorm_info\n'), ((7693, 7725), 'brainstorm.structure.network.Network.from_hdf5', 'Network.from_hdf5', (['self.filename'], {}), '(self.filename)\n', (7710, 7725), False, 'from brainstorm.structure.network import Network\n'), ((8636, 8649), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8647, 8649), False, 'from collections import OrderedDict\n'), ((8966, 8979), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8977, 8979), False, 'from collections import OrderedDict\n'), ((9316, 9329), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9327, 9329), False, 'from collections import OrderedDict\n'), ((10496, 10509), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10507, 10509), False, 'from collections import OrderedDict\n'), ((11637, 11650), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11648, 11650), False, 'from collections import OrderedDict\n'), ((11687, 11700), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11698, 11700), False, 'from collections import OrderedDict\n'), ((12017, 12030), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12028, 12030), False, 'from collections import OrderedDict\n'), ((13179, 13192), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13190, 13192), False, 'from collections import OrderedDict\n'), ((14744, 14780), 'brainstorm.tools.evaluate', 'evaluate', (['net', 'self.iter'], {'scorers': '()'}), '(net, self.iter, scorers=())\n', (14752, 14780), False, 'from brainstorm.tools import evaluate\n'), ((16788, 16826), 'brainstorm.tools.evaluate', 'evaluate', (['net', 'self.iter', 'self.scorers'], {}), '(net, self.iter, self.scorers)\n', (16796, 16826), False, 'from brainstorm.tools import evaluate\n'), ((20170, 20202), 'brainstorm.utils.get_by_path', 'get_by_path', (['logs', 'self.log_name'], {}), '(logs, self.log_name)\n', (20181, 20202), False, 'from brainstorm.utils import get_by_path, progress_bar, get_brainstorm_info\n'), ((24849, 24881), 'brainstorm.utils.get_by_path', 'get_by_path', (['logs', 'self.log_name'], {}), '(logs, self.log_name)\n', (24860, 24881), False, 'from brainstorm.utils import get_by_path, progress_bar, get_brainstorm_info\n'), ((29530, 29580), 'signal.signal', 'signal.signal', (['signal.SIGQUIT', 'self.receive_signal'], {}), '(signal.SIGQUIT, self.receive_signal)\n', (29543, 29580), False, 'import signal\n'), ((6153, 6185), 'brainstorm.structure.network.Network.from_hdf5', 'Network.from_hdf5', (['self.filename'], {}), '(self.filename)\n', (6170, 6185), False, 'from brainstorm.structure.network import Network\n'), ((6656, 6685), 'h5py.File', 'h5py.File', (['self.filename', '"""w"""'], {}), "(self.filename, 'w')\n", (6665, 6685), False, 'import h5py\n'), ((8788, 8801), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8799, 8801), False, 'from collections import OrderedDict\n'), ((9143, 9156), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9154, 9156), False, 'from collections import OrderedDict\n'), ((9445, 9458), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9456, 9458), False, 'from collections import OrderedDict\n'), ((10648, 10661), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10659, 10661), False, 'from collections import OrderedDict\n'), ((11852, 11865), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11863, 11865), False, 'from collections import OrderedDict\n'), ((12138, 12151), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12149, 12151), False, 'from collections import OrderedDict\n'), ((13332, 13345), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13343, 13345), False, 'from collections import OrderedDict\n'), ((20222, 20234), 'numpy.argmin', 'np.argmin', (['e'], {}), '(e)\n', (20231, 20234), True, 'import numpy as np\n'), ((20267, 20279), 'numpy.argmax', 'np.argmax', (['e'], {}), '(e)\n', (20276, 20279), True, 'import numpy as np\n'), ((27264, 27291), 'brainstorm.utils.get_by_path', 'get_by_path', (['logs', 'log_name'], {}), '(logs, log_name)\n', (27275, 27291), False, 'from brainstorm.utils import get_by_path, progress_bar, get_brainstorm_info\n'), ((32662, 32694), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (32685, 32694), False, 'import warnings\n'), ((34906, 34931), 'brainstorm.utils.progress_bar', 'progress_bar', (['self.length'], {}), '(self.length)\n', (34918, 34931), False, 'from brainstorm.utils import get_by_path, progress_bar, get_brainstorm_info\n'), ((34986, 35004), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (35002, 35004), False, 'import sys\n'), ((4923, 4955), 'brainstorm.utils.get_by_path', 'get_by_path', (['logs', 'self.log_name'], {}), '(logs, self.log_name)\n', (4934, 4955), False, 'from brainstorm.utils import get_by_path, progress_bar, get_brainstorm_info\n'), ((6727, 6748), 'brainstorm.utils.get_brainstorm_info', 'get_brainstorm_info', ([], {}), '()\n', (6746, 6748), False, 'from brainstorm.utils import get_by_path, progress_bar, get_brainstorm_info\n'), ((20073, 20105), 'brainstorm.utils.get_by_path', 'get_by_path', (['logs', 'self.log_name'], {}), '(logs, self.log_name)\n', (20084, 20105), False, 'from brainstorm.utils import get_by_path, progress_bar, get_brainstorm_info\n'), ((32781, 32805), 'warnings.resetwarnings', 'warnings.resetwarnings', ([], {}), '()\n', (32803, 32805), False, 'import warnings\n'), ((34004, 34031), 'brainstorm.utils.get_by_path', 'get_by_path', (['logs', 'log_name'], {}), '(logs, log_name)\n', (34015, 34031), False, 'from brainstorm.utils import get_by_path, progress_bar, get_brainstorm_info\n'), ((34795, 34829), 'math.ceil', 'math.ceil', (['(update_nr / self.length)'], {}), '(update_nr / self.length)\n', (34804, 34829), False, 'import math\n'), ((35215, 35233), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (35231, 35233), False, 'import sys\n'), ((27318, 27334), 'numpy.isfinite', 'np.isfinite', (['log'], {}), '(log)\n', (27329, 27334), True, 'import numpy as np\n'), ((27941, 27958), 'numpy.isfinite', 'np.isfinite', (['loss'], {}), '(loss)\n', (27952, 27958), True, 'import numpy as np\n'), ((7166, 7179), 'numpy.array', 'np.array', (['log'], {}), '(log)\n', (7174, 7179), True, 'import numpy as np\n'), ((13544, 13566), 'numpy.sum', 'np.sum', (['(v ** 2)'], {'axis': '(1)'}), '(v ** 2, axis=1)\n', (13550, 13566), True, 'import numpy as np\n'), ((13624, 13646), 'numpy.sum', 'np.sum', (['(v ** 2)'], {'axis': '(1)'}), '(v ** 2, axis=1)\n', (13630, 13646), True, 'import numpy as np\n'), ((13762, 13784), 'numpy.sum', 'np.sum', (['(v ** 2)'], {'axis': '(1)'}), '(v ** 2, axis=1)\n', (13768, 13784), True, 'import numpy as np\n')] |
import torch
import math
import torch.nn as nn
import torch.nn.functional as F
import scipy.spatial as sp
import numpy as np
from torch_connectomics.model.utils import *
from torch_connectomics.model.blocks import *
class ClassificationNet(nn.Module):
def __init__(self, in_channel=1, filters=(16, 16, 32, 32, 64), non_linearity=torch.sigmoid):
super().__init__()
# encoding path
self.layer1_E = nn.Sequential(
conv3d_bn_elu(in_planes=in_channel, out_planes=filters[0],
kernel_size=(1,5,5), stride=1, padding=(0,2,2)),
conv3d_bn_elu(in_planes=filters[0], out_planes=filters[0],
kernel_size=(1,3,3), stride=1, padding=(0,1,1)),
residual_block_2d(filters[0], filters[0], projection=False)
)
self.layer2_E = nn.Sequential(
conv3d_bn_elu(in_planes=filters[0], out_planes=filters[1],
kernel_size=(3,3,3), stride=1, padding=(1,1,1)),
residual_block_3d(filters[1], filters[1], projection=False),
residual_block_3d(filters[1], filters[1], projection=False)
)
self.layer3_E = nn.Sequential(
conv3d_bn_elu(in_planes=filters[1], out_planes=filters[2],
kernel_size=(3,3,3), stride=1, padding=(1,1,1)),
residual_block_3d(filters[2], filters[2], projection=False),
residual_block_3d(filters[2], filters[2], projection=False)
)
self.layer4_E = nn.Sequential(
conv3d_bn_elu(in_planes=filters[2], out_planes=filters[3],
kernel_size=(3,3,3), stride=1, padding=(1,1,1)),
residual_block_3d(filters[3], filters[3], projection=False),
residual_block_3d(filters[3], filters[3], projection=False)
)
self.layer5_E = nn.Sequential(
conv3d_bn_elu(in_planes=filters[3], out_planes=filters[4],
kernel_size=(3,3,3), stride=1, padding=(1,1,1)),
residual_block_3d(filters[4], filters[4], projection=False),
residual_block_3d(filters[4], filters[4], projection=False)
)
# pooling & upsample blocks
self.down = nn.AvgPool3d(kernel_size=(1,2,2), stride=(1,2,2))
self.down_z = nn.AvgPool3d(kernel_size=(2, 2, 2), stride=(2, 2, 2))
self.linear_layer_in_sz = filters[-1]*4*6*6
self.fc_1 = nn.Linear(self.linear_layer_in_sz, 64)
self.fc_2 = nn.Linear(64, 1)
self.dropout = nn.modules.Dropout(p=0.33)
self.non_linearity = non_linearity
#initialization
ortho_init(self)
def forward(self, x):
# encoding path
z1 = self.layer1_E(x)
x = self.down(z1)
z2 = self.layer2_E(x)
x = self.down_z(z2)
x = self.dropout(x)
z3 = self.layer3_E(x)
x = self.down_z(z3)
x = self.dropout(x)
z4 = self.layer4_E(x)
x = self.down_z(z4)
x = self.dropout(x)
z5 = self.layer5_E(x)
x = self.down_z(z5)
x = self.dropout(x)
x = x.view(-1, self.linear_layer_in_sz)
x = self.fc_1(x)
x = self.fc_2(x)
# x = self.non_linearity(x)
return x
def get_distance_feature(size):
z = np.arange(size[0], dtype=np.int16)
y = np.arange(size[1], dtype=np.int16)
x = np.arange(size[2], dtype=np.int16)
Z, Y, X = np.meshgrid(z, y, x, indexing='ij')
Z = Z.astype(np.int32) - (size[0] // 2)
Y = Y.astype(np.int32) - (size[1] // 2)
X = X.astype(np.int32) - (size[2] // 2)
return np.sqrt(Z**2 + Y**2 + X**2, dtype=np.float32) | [
"numpy.meshgrid",
"numpy.arange",
"torch.nn.Linear",
"torch.nn.modules.Dropout",
"torch.nn.AvgPool3d",
"numpy.sqrt"
] | [((3307, 3341), 'numpy.arange', 'np.arange', (['size[0]'], {'dtype': 'np.int16'}), '(size[0], dtype=np.int16)\n', (3316, 3341), True, 'import numpy as np\n'), ((3350, 3384), 'numpy.arange', 'np.arange', (['size[1]'], {'dtype': 'np.int16'}), '(size[1], dtype=np.int16)\n', (3359, 3384), True, 'import numpy as np\n'), ((3393, 3427), 'numpy.arange', 'np.arange', (['size[2]'], {'dtype': 'np.int16'}), '(size[2], dtype=np.int16)\n', (3402, 3427), True, 'import numpy as np\n'), ((3442, 3477), 'numpy.meshgrid', 'np.meshgrid', (['z', 'y', 'x'], {'indexing': '"""ij"""'}), "(z, y, x, indexing='ij')\n", (3453, 3477), True, 'import numpy as np\n'), ((3624, 3675), 'numpy.sqrt', 'np.sqrt', (['(Z ** 2 + Y ** 2 + X ** 2)'], {'dtype': 'np.float32'}), '(Z ** 2 + Y ** 2 + X ** 2, dtype=np.float32)\n', (3631, 3675), True, 'import numpy as np\n'), ((2243, 2296), 'torch.nn.AvgPool3d', 'nn.AvgPool3d', ([], {'kernel_size': '(1, 2, 2)', 'stride': '(1, 2, 2)'}), '(kernel_size=(1, 2, 2), stride=(1, 2, 2))\n', (2255, 2296), True, 'import torch.nn as nn\n'), ((2315, 2368), 'torch.nn.AvgPool3d', 'nn.AvgPool3d', ([], {'kernel_size': '(2, 2, 2)', 'stride': '(2, 2, 2)'}), '(kernel_size=(2, 2, 2), stride=(2, 2, 2))\n', (2327, 2368), True, 'import torch.nn as nn\n'), ((2442, 2480), 'torch.nn.Linear', 'nn.Linear', (['self.linear_layer_in_sz', '(64)'], {}), '(self.linear_layer_in_sz, 64)\n', (2451, 2480), True, 'import torch.nn as nn\n'), ((2501, 2517), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(1)'], {}), '(64, 1)\n', (2510, 2517), True, 'import torch.nn as nn\n'), ((2542, 2568), 'torch.nn.modules.Dropout', 'nn.modules.Dropout', ([], {'p': '(0.33)'}), '(p=0.33)\n', (2560, 2568), True, 'import torch.nn as nn\n')] |
import numpy as np
import numba
from scipy import ndimage as scnd
from ..util import image_utils as iu
from ..beam import gen_probe as gp
from ..pty import pty_utils as pu
def single_side_band(processed_data4D,
aperture_mrad,
voltage,
image_size,
calibration_pm):
e_wavelength_pm = pu.wavelength_pm(voltage)
alpha_rad = aperture_mrad/1000
eps = 1e-3
k_max = dc.metadata.calibration['K_pix_size']
dxy = dc.metadata.calibration['R_pix_size']
theta = np.deg2rad(dc.metadata.calibration['R_to_K_rotation_degrees'])
ny, nx, nky, nkx = processed_data4D.shape
Kx, Ky = pu.fourier_coords_1D([nkx, nky], k_max, fft_shifted=True)
Qx, Qy = pu.fourier_coords_1D([nx, ny], dxy)
Kplus = np.sqrt((Kx + Qx[:, :, None, None]) ** 2 + (Ky + Qy[:, :, None, None]) ** 2)
Kminus = np.sqrt((Kx - Qx[:, :, None, None]) ** 2 + (Ky - Qy[:, :, None, None]) ** 2)
K = np.sqrt(Kx ** 2 + Ky ** 2)
A_KplusQ = np.zeros_like(G)
A_KminusQ = np.zeros_like(G)
for ix, qx in enumerate(Qx[0]):
for iy, qy in enumerate(Qy[:, 0]):
x = Kx + qx
y = Ky + qy
A_KplusQ[iy, ix] = np.exp(1j * cartesian_aberrations(x, y, lam, C)) * aperture_xp(x, y, lam, alpha_rad,
edge=0)
x = Kx - qx
y = Ky - qy
A_KminusQ[iy, ix] = np.exp(1j * cartesian_aberrations(x, y, lam, C)) * aperture_xp(x, y, lam, alpha_rad,
edge=0)
Gamma = np.conj(A) * A_KminusQ - A * np.conj(A_KplusQ)
double_overlap1 = (Kplus < alpha_rad / lam) * (K < alpha_rad / lam) * (Kminus > alpha_rad / lam)
double_overlap2 = (Kplus > alpha_rad / lam) * (K < alpha_rad / lam) * (Kminus < alpha_rad / lam)
Psi_Qp = np.zeros((ny, nx), dtype=np.complex64)
Psi_Qp_left_sb = np.zeros((ny, nx), dtype=np.complex64)
Psi_Qp_right_sb = np.zeros((ny, nx), dtype=np.complex64)
for y in range(ny):
for x in range(nx):
Gamma_abs = np.abs(Gamma[y, x])
take = Gamma_abs > eps
Psi_Qp[y, x] = np.sum(G[y, x][take] * Gamma[y, x][take].conj())
Psi_Qp_left_sb[y, x] = np.sum(G[y, x][double_overlap1[y, x]])
Psi_Qp_right_sb[y, x] = np.sum(G[y, x][double_overlap2[y, x]])
if x == 0 and y == 0:
Psi_Qp[y, x] = np.sum(np.abs(G[y, x]))
Psi_Qp_left_sb[y, x] = np.sum(np.abs(G[y, x]))
Psi_Qp_right_sb[y, x] = np.sum(np.abs(G[y, x]))
Psi_Rp = xp.fft.ifft2(Psi_Qp, norm='ortho')
Psi_Rp_left_sb = xp.fft.ifft2(Psi_Qp_left_sb, norm='ortho')
Psi_Rp_right_sb = xp.fft.ifft2(Psi_Qp_right_sb, norm='ortho')
return Psi_Rp, Psi_Rp_left_sb, Psi_Rp_right_sb | [
"numpy.conj",
"numpy.zeros_like",
"numpy.abs",
"numpy.sum",
"numpy.deg2rad",
"numpy.zeros",
"numpy.sqrt"
] | [((557, 619), 'numpy.deg2rad', 'np.deg2rad', (["dc.metadata.calibration['R_to_K_rotation_degrees']"], {}), "(dc.metadata.calibration['R_to_K_rotation_degrees'])\n", (567, 619), True, 'import numpy as np\n'), ((798, 874), 'numpy.sqrt', 'np.sqrt', (['((Kx + Qx[:, :, None, None]) ** 2 + (Ky + Qy[:, :, None, None]) ** 2)'], {}), '((Kx + Qx[:, :, None, None]) ** 2 + (Ky + Qy[:, :, None, None]) ** 2)\n', (805, 874), True, 'import numpy as np\n'), ((888, 964), 'numpy.sqrt', 'np.sqrt', (['((Kx - Qx[:, :, None, None]) ** 2 + (Ky - Qy[:, :, None, None]) ** 2)'], {}), '((Kx - Qx[:, :, None, None]) ** 2 + (Ky - Qy[:, :, None, None]) ** 2)\n', (895, 964), True, 'import numpy as np\n'), ((973, 999), 'numpy.sqrt', 'np.sqrt', (['(Kx ** 2 + Ky ** 2)'], {}), '(Kx ** 2 + Ky ** 2)\n', (980, 999), True, 'import numpy as np\n'), ((1020, 1036), 'numpy.zeros_like', 'np.zeros_like', (['G'], {}), '(G)\n', (1033, 1036), True, 'import numpy as np\n'), ((1053, 1069), 'numpy.zeros_like', 'np.zeros_like', (['G'], {}), '(G)\n', (1066, 1069), True, 'import numpy as np\n'), ((1988, 2026), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {'dtype': 'np.complex64'}), '((ny, nx), dtype=np.complex64)\n', (1996, 2026), True, 'import numpy as np\n'), ((2048, 2086), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {'dtype': 'np.complex64'}), '((ny, nx), dtype=np.complex64)\n', (2056, 2086), True, 'import numpy as np\n'), ((2109, 2147), 'numpy.zeros', 'np.zeros', (['(ny, nx)'], {'dtype': 'np.complex64'}), '((ny, nx), dtype=np.complex64)\n', (2117, 2147), True, 'import numpy as np\n'), ((1726, 1736), 'numpy.conj', 'np.conj', (['A'], {}), '(A)\n', (1733, 1736), True, 'import numpy as np\n'), ((1755, 1772), 'numpy.conj', 'np.conj', (['A_KplusQ'], {}), '(A_KplusQ)\n', (1762, 1772), True, 'import numpy as np\n'), ((2229, 2248), 'numpy.abs', 'np.abs', (['Gamma[y, x]'], {}), '(Gamma[y, x])\n', (2235, 2248), True, 'import numpy as np\n'), ((2395, 2433), 'numpy.sum', 'np.sum', (['G[y, x][double_overlap1[y, x]]'], {}), '(G[y, x][double_overlap1[y, x]])\n', (2401, 2433), True, 'import numpy as np\n'), ((2470, 2508), 'numpy.sum', 'np.sum', (['G[y, x][double_overlap2[y, x]]'], {}), '(G[y, x][double_overlap2[y, x]])\n', (2476, 2508), True, 'import numpy as np\n'), ((2581, 2596), 'numpy.abs', 'np.abs', (['G[y, x]'], {}), '(G[y, x])\n', (2587, 2596), True, 'import numpy as np\n'), ((2644, 2659), 'numpy.abs', 'np.abs', (['G[y, x]'], {}), '(G[y, x])\n', (2650, 2659), True, 'import numpy as np\n'), ((2708, 2723), 'numpy.abs', 'np.abs', (['G[y, x]'], {}), '(G[y, x])\n', (2714, 2723), True, 'import numpy as np\n')] |
import os
import torch
import numpy as np
import matplotlib.pyplot as plt
from Agent import Agent
from Env import Env
from arm import Viewer
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
env_tst = Env()
agent_tst = Agent()
agent_tst.actor.load_state_dict(torch.load("./model_test1/modela1750_.pth"))
def arm_tst():
viewer = Viewer()
state, goal = env_tst.reset(viewer)
state[12] = 20 / 30 # x
state[13] = 20 / 30 # y
state[14] = 0 / 30 # z
goal = np.asarray([state[12], state[13], state[14]])
tmp_reward = 0
step_sum = 0
for i in range(200):
step_sum += 1
with torch.no_grad():
action = agent_tst.actor(torch.FloatTensor(state).cuda()).detach().cpu().numpy()
next_state, reward, done = env_tst.step(action, goal, viewer)
# print(next_state[11])
print(reward)
env_tst.renders(viewer,
next_state[15],
next_state[16],
next_state[17],
next_state[18],
goal)
tmp_reward += reward
if done:
break
state = next_state
print("step_num: ", step_sum)
if __name__ == "__main__":
arm_tst()
| [
"numpy.asarray",
"torch.load",
"Env.Env",
"arm.Viewer",
"torch.FloatTensor",
"Agent.Agent",
"torch.no_grad"
] | [((213, 218), 'Env.Env', 'Env', ([], {}), '()\n', (216, 218), False, 'from Env import Env\n'), ((232, 239), 'Agent.Agent', 'Agent', ([], {}), '()\n', (237, 239), False, 'from Agent import Agent\n'), ((273, 316), 'torch.load', 'torch.load', (['"""./model_test1/modela1750_.pth"""'], {}), "('./model_test1/modela1750_.pth')\n", (283, 316), False, 'import torch\n'), ((348, 356), 'arm.Viewer', 'Viewer', ([], {}), '()\n', (354, 356), False, 'from arm import Viewer\n'), ((501, 546), 'numpy.asarray', 'np.asarray', (['[state[12], state[13], state[14]]'], {}), '([state[12], state[13], state[14]])\n', (511, 546), True, 'import numpy as np\n'), ((648, 663), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (661, 663), False, 'import torch\n'), ((703, 727), 'torch.FloatTensor', 'torch.FloatTensor', (['state'], {}), '(state)\n', (720, 727), False, 'import torch\n')] |
#!/usr/bin/env python3
# vim: set filetype=python sts=2 ts=2 sw=2 expandtab:
"""
Command line interface module
"""
# pylint: disable=unused-variable
# pylint: disable=fixme
# pylint: disable=too-many-locals
# pylint: disable=too-many-instance-attributes
# pylint: disable=pointless-string-statement
import logging
import sys
import asyncio
import signal
import uuid
import datetime
import json
import os
import glob
import numpy as np
import tensorflow as tf
import sklearn as skl
logger = logging.getLogger(__name__)
script_dirname = os.path.dirname(__file__)
script_dirnamea = os.path.abspath(script_dirname)
script_basename = os.path.basename(__file__)
script_vardir = os.path.join(script_dirnamea,"..","..","..","var")
from .argparse_tree import ArgParseNode
from . import __version__
from tensorflow.python.keras.callbacks import CSVLogger, EarlyStopping, ModelCheckpoint
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
from tensorflow.keras.preprocessing import image
from tensorflow.python.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input,Lambda, Dense, Flatten
from tensorflow.image import grayscale_to_rgb
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
import tensorflow.keras.callbacks as tfkc
import matplotlib.pyplot
logger = logging.getLogger(__name__)
def analise_samples(data_path, select_clazz=None):
filenames = os.listdir(data_path)
index_max = 0
clazzes = set([])
types = set([])
clazz_type_counts = {}
clazz_index_max = {}
for filename in filenames:
filename_parts = filename.split('.')[0].split('_')
filename_index = int(filename_parts[-1])
filename_clazz = filename_parts[-2]
filename_type = '_'.join(filename_parts[0:-2])
clazzes.add(filename_clazz)
types.add(filename_type)
clazz_type_counts.setdefault(filename_type, {})
clazz_type_counts[filename_type].setdefault(filename_clazz, 0)
clazz_index_max.setdefault(filename_clazz, 0);
clazz_type_counts[filename_type][filename_clazz] += 1
clazz_index_max[filename_clazz] = max(clazz_index_max[filename_clazz], filename_index)
index_max = max(index_max, filename_index)
print("types = ", types)
print("clazzes = ", clazzes)
print("index_max", index_max)
print("clazz_type_counts = ", clazz_type_counts)
print("clazz_index_max = ", clazz_index_max)
return ( types, clazzes, index_max )
def load_sample(data_path, clazz, index, xglob="*"):
sample_files = glob.glob(os.path.join(data_path,"{:s}_{:s}_{:d}.json".format(xglob, clazz, index)))
print("sample_files = ", sample_files)
def load_samples(data_path, limit=None):
( types, clazzes, index_max ) = analise_samples(data_path)
def load_files(data_path, limit=None):
#data_path = os.path.join('force2019-data-000', 'data-001')#'hackathon_training_data'
#data_path = os.path.join('/content/drive/My Drive/force-hackathon-2019', 'data-002')
num_of_sample = 0
list_of_files = os.listdir(data_path)
for filename in [f for f in list_of_files if f.startswith('zone')]:
num_of_sample = max (num_of_sample,int(filename.split('.')[0].split('_')[-1]))
num_of_files = len(list_of_files)
first_file_path = os.path.join(data_path, list_of_files[0])
num_smp = num_of_sample * 2 # Good & Bad
with open(first_file_path,'r') as read_file:
shape_of_files = (num_smp,) + np.asarray(json.load(read_file)).shape + (2, )
# the shape of the data is (num_smp, width, length, chanels)
data = np.zeros((shape_of_files))
# labels are a vector the size of the number of sumples
labels = np.zeros(num_smp)
# labels for categorical crossentropy are a matrix of num_smp X num_classes
labels_ce = np.zeros((num_smp,2))
for filename in os.listdir(data_path):
if not filename.startswith('seismic'):
splitted_name = filename.split('.')[0].split('_')
if splitted_name[1] == 'seismic':
continue
# Horizon A and B are on two different channels
chan = int(splitted_name[0]=='topa')
# calculate the index of the data (if data belongs to the "Good" class I shift it)
i = int(splitted_name[-1])
is_good = int (splitted_name[1] == 'good')
i += (num_of_sample) * is_good
full_path = os.path.join(data_path,filename)
labels[i] = is_good #int(filename.startswith('good'))
labels_ce[i, is_good] = 1
with open(full_path,'r') as read_file:
loaded = np.asarray(json.load(read_file))
#mask = np.zeros_like(loaded)
#mask[np.argwhere(loaded > 999990)] = 1
data[i, :, :, chan] = loaded
print('labels shape', labels.shape)
print('labels for CE shape', labels_ce.shape)
print('data shape', data.shape)
class Application:
def __init__(self):
self.apn_root = ArgParseNode(options={"add_help": True})
def handle_load_data(self, parse_result):
analise_samples(parse_result.fromdir)
load_sample(parse_result.fromdir, "good", 0)
#load_files(parse_result.fromdir, parse_result.limit_input)
def main(self):
# pylint: disable=too-many-statements
apn_current = apn_root = self.apn_root
apn_root.parser.add_argument("--version",
action="version", version="{:s}".format(__version__))
apn_root.parser.add_argument("-v", "--verbose",
action="count", dest="verbosity",
help="increase verbosity level")
apn_root.parser.add_argument("--vardir",
action="store", dest="vardir",
default=None, required=False)
apn_current = apn_eventgrid = apn_root.get("load_files")
apn_current.parser.add_argument("--from", dest="fromdir", action="store", type=str, required=True)
apn_current.parser.add_argument("--limit-input", dest="limit_input", action="store", default=None, type=int, required=False)
apn_current.parser.set_defaults(handler=self.handle_load_data)
parse_result = self.parse_result = apn_root.parser.parse_args(args=sys.argv[1:])
verbosity = parse_result.verbosity
if verbosity is not None:
root_logger = logging.getLogger("")
root_logger.propagate = True
new_level = (root_logger.getEffectiveLevel() -
(min(1, verbosity)) * 10 - min(max(0, verbosity - 1), 9) * 1)
root_logger.setLevel(new_level)
else:
# XXX TODO FIXME this is here because this logger has it is logging things on
# info level that should be logged as debugging
# to re-enable you can use PYLOG_LEVELS="{ azure.storage.common.storageclient: INFO }"
logging.getLogger("azure.storage.common.storageclient").setLevel(logging.WARNING)
logger.debug("sys.argv = %s, parse_result = %s, logging.level = %s, logger.level = %s",
sys.argv, parse_result, logging.getLogger("").getEffectiveLevel(),
logger.getEffectiveLevel())
global script_vardir
if parse_result.vardir is not None:
script_vardir = parse_result.vardir
logger.info("start ... script_vardir = %s", script_vardir);
os.makedirs(script_vardir, exist_ok=True);
if "handler" in parse_result and parse_result.handler:
parse_result.handler(parse_result)
def main():
logging.basicConfig(level=logging.INFO, datefmt="%Y-%m-%dT%H:%M:%S", stream=sys.stderr,
format=("%(asctime)s %(process)d %(thread)x %(levelno)03d:%(levelname)-8s "
"%(name)-12s %(module)s:%(lineno)s:%(funcName)s %(message)s"))
application = Application()
application.main()
if __name__ == "__main__":
main()
| [
"os.path.abspath",
"json.load",
"os.makedirs",
"logging.basicConfig",
"os.path.basename",
"os.path.dirname",
"numpy.zeros",
"os.path.join",
"os.listdir",
"logging.getLogger"
] | [((492, 519), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (509, 519), False, 'import logging\n'), ((538, 563), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (553, 563), False, 'import os\n'), ((582, 613), 'os.path.abspath', 'os.path.abspath', (['script_dirname'], {}), '(script_dirname)\n', (597, 613), False, 'import os\n'), ((632, 658), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (648, 658), False, 'import os\n'), ((675, 729), 'os.path.join', 'os.path.join', (['script_dirnamea', '""".."""', '""".."""', '""".."""', '"""var"""'], {}), "(script_dirnamea, '..', '..', '..', 'var')\n", (687, 729), False, 'import os\n'), ((1455, 1482), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1472, 1482), False, 'import logging\n'), ((1549, 1570), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (1559, 1570), False, 'import os\n'), ((3104, 3125), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (3114, 3125), False, 'import os\n'), ((3336, 3377), 'os.path.join', 'os.path.join', (['data_path', 'list_of_files[0]'], {}), '(data_path, list_of_files[0])\n', (3348, 3377), False, 'import os\n'), ((3624, 3648), 'numpy.zeros', 'np.zeros', (['shape_of_files'], {}), '(shape_of_files)\n', (3632, 3648), True, 'import numpy as np\n'), ((3721, 3738), 'numpy.zeros', 'np.zeros', (['num_smp'], {}), '(num_smp)\n', (3729, 3738), True, 'import numpy as np\n'), ((3832, 3854), 'numpy.zeros', 'np.zeros', (['(num_smp, 2)'], {}), '((num_smp, 2))\n', (3840, 3854), True, 'import numpy as np\n'), ((3873, 3894), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (3883, 3894), False, 'import os\n'), ((7446, 7682), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'datefmt': '"""%Y-%m-%dT%H:%M:%S"""', 'stream': 'sys.stderr', 'format': '"""%(asctime)s %(process)d %(thread)x %(levelno)03d:%(levelname)-8s %(name)-12s %(module)s:%(lineno)s:%(funcName)s %(message)s"""'}), "(level=logging.INFO, datefmt='%Y-%m-%dT%H:%M:%S', stream\n =sys.stderr, format=\n '%(asctime)s %(process)d %(thread)x %(levelno)03d:%(levelname)-8s %(name)-12s %(module)s:%(lineno)s:%(funcName)s %(message)s'\n )\n", (7465, 7682), False, 'import logging\n'), ((7273, 7314), 'os.makedirs', 'os.makedirs', (['script_vardir'], {'exist_ok': '(True)'}), '(script_vardir, exist_ok=True)\n', (7284, 7314), False, 'import os\n'), ((4379, 4412), 'os.path.join', 'os.path.join', (['data_path', 'filename'], {}), '(data_path, filename)\n', (4391, 4412), False, 'import os\n'), ((6257, 6278), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (6274, 6278), False, 'import logging\n'), ((4579, 4599), 'json.load', 'json.load', (['read_file'], {}), '(read_file)\n', (4588, 4599), False, 'import json\n'), ((6776, 6831), 'logging.getLogger', 'logging.getLogger', (['"""azure.storage.common.storageclient"""'], {}), "('azure.storage.common.storageclient')\n", (6793, 6831), False, 'import logging\n'), ((6991, 7012), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (7008, 7012), False, 'import logging\n'), ((3515, 3535), 'json.load', 'json.load', (['read_file'], {}), '(read_file)\n', (3524, 3535), False, 'import json\n')] |
# -*- coding: utf-8 -*-
from math import sqrt
from functools import reduce
import pytest
import numpy as np
from renormalizer.model.phonon import Phonon
from renormalizer.utils import Quantity
def test_property():
ph = Phonon.simple_phonon(
omega=Quantity(1), displacement=Quantity(1), n_phys_dim=10
)
assert ph.reorganization_energy.as_au() == pytest.approx(0.5)
assert ph.coupling_constant == pytest.approx(sqrt(0.5))
evecs = ph.get_displacement_evecs()
s = 0.5
res = [np.exp(-s)]
for k in range(1, 10):
res.append(res[-1] * s / k)
assert np.allclose(res, evecs[:, 0] ** 2)
ph2 = Phonon.simple_phonon(
omega=Quantity(1), displacement=Quantity(1), n_phys_dim=10
)
assert ph == ph2
def test_simplest_phonon():
ph =Phonon.simplest_phonon(Quantity(0.1), Quantity(10))
assert ph.nlevels == 32
ph = Phonon.simplest_phonon(Quantity(1), Quantity(1))
assert ph.nlevels == 16
ph = Phonon.simplest_phonon(Quantity(0.128), Quantity(6.25))
assert ph.nlevels == 16
ph = Phonon.simplest_phonon(Quantity(0.032), Quantity(6.25))
assert ph.nlevels == 16
ph = Phonon.simplest_phonon(Quantity(1), Quantity(0.01), temperature=Quantity(1))
assert ph.nlevels == 14
ph = Phonon.simplest_phonon(Quantity(520, "cm-1"), Quantity(28, "meV"), Quantity(298, "K"), lam=True)
assert ph.nlevels == 19
def test_split():
ph = Phonon.simplest_phonon(Quantity(100, "cm-1"), Quantity(1))
ph1, ph2 = ph.split(width=Quantity(20, "cm-1"))
assert ph1.e0 == ph2.e0 == ph.e0 / 2
assert ph1.omega[0] == Quantity(80, "cm-1").as_au()
ph_list = ph.split(n=100)
assert reduce(lambda x, y: x+y, map(lambda x: x.e0, ph_list)) == ph.e0 | [
"math.sqrt",
"numpy.allclose",
"numpy.exp",
"pytest.approx",
"renormalizer.utils.Quantity"
] | [((598, 632), 'numpy.allclose', 'np.allclose', (['res', '(evecs[:, 0] ** 2)'], {}), '(res, evecs[:, 0] ** 2)\n', (609, 632), True, 'import numpy as np\n'), ((370, 388), 'pytest.approx', 'pytest.approx', (['(0.5)'], {}), '(0.5)\n', (383, 388), False, 'import pytest\n'), ((512, 522), 'numpy.exp', 'np.exp', (['(-s)'], {}), '(-s)\n', (518, 522), True, 'import numpy as np\n'), ((821, 834), 'renormalizer.utils.Quantity', 'Quantity', (['(0.1)'], {}), '(0.1)\n', (829, 834), False, 'from renormalizer.utils import Quantity\n'), ((836, 848), 'renormalizer.utils.Quantity', 'Quantity', (['(10)'], {}), '(10)\n', (844, 848), False, 'from renormalizer.utils import Quantity\n'), ((910, 921), 'renormalizer.utils.Quantity', 'Quantity', (['(1)'], {}), '(1)\n', (918, 921), False, 'from renormalizer.utils import Quantity\n'), ((923, 934), 'renormalizer.utils.Quantity', 'Quantity', (['(1)'], {}), '(1)\n', (931, 934), False, 'from renormalizer.utils import Quantity\n'), ((996, 1011), 'renormalizer.utils.Quantity', 'Quantity', (['(0.128)'], {}), '(0.128)\n', (1004, 1011), False, 'from renormalizer.utils import Quantity\n'), ((1013, 1027), 'renormalizer.utils.Quantity', 'Quantity', (['(6.25)'], {}), '(6.25)\n', (1021, 1027), False, 'from renormalizer.utils import Quantity\n'), ((1089, 1104), 'renormalizer.utils.Quantity', 'Quantity', (['(0.032)'], {}), '(0.032)\n', (1097, 1104), False, 'from renormalizer.utils import Quantity\n'), ((1106, 1120), 'renormalizer.utils.Quantity', 'Quantity', (['(6.25)'], {}), '(6.25)\n', (1114, 1120), False, 'from renormalizer.utils import Quantity\n'), ((1182, 1193), 'renormalizer.utils.Quantity', 'Quantity', (['(1)'], {}), '(1)\n', (1190, 1193), False, 'from renormalizer.utils import Quantity\n'), ((1195, 1209), 'renormalizer.utils.Quantity', 'Quantity', (['(0.01)'], {}), '(0.01)\n', (1203, 1209), False, 'from renormalizer.utils import Quantity\n'), ((1296, 1317), 'renormalizer.utils.Quantity', 'Quantity', (['(520)', '"""cm-1"""'], {}), "(520, 'cm-1')\n", (1304, 1317), False, 'from renormalizer.utils import Quantity\n'), ((1319, 1338), 'renormalizer.utils.Quantity', 'Quantity', (['(28)', '"""meV"""'], {}), "(28, 'meV')\n", (1327, 1338), False, 'from renormalizer.utils import Quantity\n'), ((1340, 1358), 'renormalizer.utils.Quantity', 'Quantity', (['(298)', '"""K"""'], {}), "(298, 'K')\n", (1348, 1358), False, 'from renormalizer.utils import Quantity\n'), ((1450, 1471), 'renormalizer.utils.Quantity', 'Quantity', (['(100)', '"""cm-1"""'], {}), "(100, 'cm-1')\n", (1458, 1471), False, 'from renormalizer.utils import Quantity\n'), ((1473, 1484), 'renormalizer.utils.Quantity', 'Quantity', (['(1)'], {}), '(1)\n', (1481, 1484), False, 'from renormalizer.utils import Quantity\n'), ((264, 275), 'renormalizer.utils.Quantity', 'Quantity', (['(1)'], {}), '(1)\n', (272, 275), False, 'from renormalizer.utils import Quantity\n'), ((290, 301), 'renormalizer.utils.Quantity', 'Quantity', (['(1)'], {}), '(1)\n', (298, 301), False, 'from renormalizer.utils import Quantity\n'), ((438, 447), 'math.sqrt', 'sqrt', (['(0.5)'], {}), '(0.5)\n', (442, 447), False, 'from math import sqrt\n'), ((680, 691), 'renormalizer.utils.Quantity', 'Quantity', (['(1)'], {}), '(1)\n', (688, 691), False, 'from renormalizer.utils import Quantity\n'), ((706, 717), 'renormalizer.utils.Quantity', 'Quantity', (['(1)'], {}), '(1)\n', (714, 717), False, 'from renormalizer.utils import Quantity\n'), ((1223, 1234), 'renormalizer.utils.Quantity', 'Quantity', (['(1)'], {}), '(1)\n', (1231, 1234), False, 'from renormalizer.utils import Quantity\n'), ((1516, 1536), 'renormalizer.utils.Quantity', 'Quantity', (['(20)', '"""cm-1"""'], {}), "(20, 'cm-1')\n", (1524, 1536), False, 'from renormalizer.utils import Quantity\n'), ((1606, 1626), 'renormalizer.utils.Quantity', 'Quantity', (['(80)', '"""cm-1"""'], {}), "(80, 'cm-1')\n", (1614, 1626), False, 'from renormalizer.utils import Quantity\n')] |
from dataclasses import dataclass
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
from cw.simulation import Simulation, StatesBase, AB3Integrator, ModuleBase, Logging, Plotter
from cw.simulation.modules import EOM6DOF
from cw.context import time_it
nan = float('nan')
def main():
simulation = Simulation(
states_class=Sim1States,
integrator=AB3Integrator(
h=0.01,
rk4=True,
fd_max_order=1),
modules=[
ModuleA(),
ModuleB(),
],
logging=Logging(),
initial_state_values=None,
)
simulation.initialize()
with time_it("simulation run"):
result = simulation.run(10000)
plotter = Plotter()
plotter.plot_to_pdf(Path(__file__).parent / "results.i.pdf", result)
@dataclass
class Sim1States(StatesBase):
t: float = 0
mass: float = 10
s: np.ndarray = np.zeros(3)
v: np.ndarray = np.zeros(3)
v_fd: np.ndarray = np.zeros(3)
a: np.ndarray = np.zeros(3)
a_fd: np.ndarray = np.zeros(3)
# i: np.ndarray = np.eye(3)
# state: str = "qwerty"
def get_y_dot(self):
return np.array([self.v, self.a], dtype=np.float)
def get_y(self):
return np.array([self.s, self.v], dtype=np.float)
def set_t_y(self, t, y):
self.t = t
self.s = y[0]
self.v = y[1]
def get_differentiation_y(self):
return np.vstack((self.s, self.v))
def set_differentiation_y_dot(self, y_dot):
self.v_fd = y_dot[0, :]
self.a_fd = y_dot[1, :]
class ModuleA(ModuleBase):
def __init__(self):
super().__init__()
def initialize(self, simulation):
super().initialize(simulation)
simulation.states.a = np.array([0., 0., 0.])
def step(self):
pass
# print("Module A step")
# self.simulation.states.a = 1
class ModuleB(ModuleBase):
def __init__(self):
super().__init__(is_discreet=True,
target_time_step=0.01)
self.da = 0.1
def initialize(self, simulation):
super().initialize(simulation)
def step(self):
print("Module B step", self.s.t)
a = self.simulation.states.a[0]
self.s.a = np.array([self.da, 0, 0])
self.da *= -1
main()
| [
"cw.simulation.Logging",
"cw.simulation.AB3Integrator",
"numpy.zeros",
"pathlib.Path",
"numpy.array",
"cw.context.time_it",
"cw.simulation.Plotter",
"numpy.vstack"
] | [((746, 755), 'cw.simulation.Plotter', 'Plotter', ([], {}), '()\n', (753, 755), False, 'from cw.simulation import Simulation, StatesBase, AB3Integrator, ModuleBase, Logging, Plotter\n'), ((930, 941), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (938, 941), True, 'import numpy as np\n'), ((962, 973), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (970, 973), True, 'import numpy as np\n'), ((997, 1008), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1005, 1008), True, 'import numpy as np\n'), ((1029, 1040), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1037, 1040), True, 'import numpy as np\n'), ((1064, 1075), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1072, 1075), True, 'import numpy as np\n'), ((663, 688), 'cw.context.time_it', 'time_it', (['"""simulation run"""'], {}), "('simulation run')\n", (670, 688), False, 'from cw.context import time_it\n'), ((1177, 1219), 'numpy.array', 'np.array', (['[self.v, self.a]'], {'dtype': 'np.float'}), '([self.v, self.a], dtype=np.float)\n', (1185, 1219), True, 'import numpy as np\n'), ((1257, 1299), 'numpy.array', 'np.array', (['[self.s, self.v]'], {'dtype': 'np.float'}), '([self.s, self.v], dtype=np.float)\n', (1265, 1299), True, 'import numpy as np\n'), ((1446, 1473), 'numpy.vstack', 'np.vstack', (['(self.s, self.v)'], {}), '((self.s, self.v))\n', (1455, 1473), True, 'import numpy as np\n'), ((1775, 1800), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1783, 1800), True, 'import numpy as np\n'), ((2269, 2294), 'numpy.array', 'np.array', (['[self.da, 0, 0]'], {}), '([self.da, 0, 0])\n', (2277, 2294), True, 'import numpy as np\n'), ((395, 442), 'cw.simulation.AB3Integrator', 'AB3Integrator', ([], {'h': '(0.01)', 'rk4': '(True)', 'fd_max_order': '(1)'}), '(h=0.01, rk4=True, fd_max_order=1)\n', (408, 442), False, 'from cw.simulation import Simulation, StatesBase, AB3Integrator, ModuleBase, Logging, Plotter\n'), ((572, 581), 'cw.simulation.Logging', 'Logging', ([], {}), '()\n', (579, 581), False, 'from cw.simulation import Simulation, StatesBase, AB3Integrator, ModuleBase, Logging, Plotter\n'), ((780, 794), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (784, 794), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#things to do
#1 comment and review
#imports
import numpy as np
import matplotlib.pyplot as plt
import lightkurve as lk
import tqdm as tq
from scipy.interpolate import interp1d
# In[ ]:
# In[2]:
#downloading the lightcurve file for our example star KIC 10685175
lcfc = lk.search_lightcurvefile("KIC 10685175",mission="Kepler").download_all()
lc = lcfc.PDCSAP_FLUX.stitch().remove_nans()
# In[3]:
#noise threshhold function, determines at what noise levels the frequency crosses the .99 percent correct recovery line
def noise_threshold(time,noise ,frequency , max_frequency, min_frequency = 0, fap = .01, max_runs= 1000):
#creating sinusoidal light curve
flux = np.sin(2*np.pi*frequency*time)
lc = lk.LightCurve(time,flux)
#lightcurve to frequency periodogram
per = lc.to_periodogram(nyquist_factor = 0.01)
nyquist = per.nyquist.value
frequency_candidates = []
min_factor = int(np.floor(min_frequency/nyquist))
max_factor = int(np.ceil(max_frequency/nyquist))
#picking out which peaks to sample in each nyquist 'zone'
for i in range(min_factor, max_factor):
per = lc.to_periodogram(minimum_frequency=i*nyquist,maximum_frequency=(i+1)*nyquist)
frequency_candidates.append(per.frequency_at_max_power.value)
frequency_candidates = np.array(frequency_candidates)
frequency_candidates = frequency_candidates[(frequency_candidates > min_frequency)&(frequency_candidates < max_frequency)]
#sampling only near the peaks, increasing effeciency
frequency_resolution = 1/(time[-1]-time[0])
n_samples = 41
local_sampling = np.linspace(-.1*frequency_resolution,.1*frequency_resolution,n_samples)
frequency_sample = np.zeros(n_samples*len(frequency_candidates))
for i in range(len(frequency_candidates)):
frequency_sample[i*n_samples:(i+1)*n_samples] = local_sampling + frequency_candidates[i]
results = np.zeros(max_runs)
max_incorrect_runs = (fap*max_runs)
incorrect_results = 0
percentage = 0
for i in tq.tqdm(range(max_runs)):
flux = np.sin(2*np.pi*(frequency*time + np.random.rand())) + np.random.randn(len(time))*noise
lc = lk.LightCurve(time,flux)
per = lc.to_periodogram(frequency=frequency_sample,ls_method="slow")
frequency_dif = np.abs(per.frequency_at_max_power.value - frequency)
results[i] = (frequency_dif < frequency_resolution)
if(frequency_dif > frequency_resolution):
incorrect_results += 1
if(incorrect_results > max_incorrect_runs):
break
percentage = np.sum(results)/(i+1)
return percentage
# In[4]:
lc = lcfc.PDCSAP_FLUX.stitch().remove_nans()
time = lc.time
print(time)
print(noise_threshold(time,60.3, 289.39094, 300, min_frequency = 50, max_runs=100))
# In[ ]:
| [
"lightkurve.search_lightcurvefile",
"numpy.abs",
"numpy.ceil",
"numpy.sum",
"numpy.floor",
"numpy.zeros",
"numpy.sin",
"numpy.array",
"numpy.linspace",
"lightkurve.LightCurve",
"numpy.random.rand"
] | [((741, 777), 'numpy.sin', 'np.sin', (['(2 * np.pi * frequency * time)'], {}), '(2 * np.pi * frequency * time)\n', (747, 777), True, 'import numpy as np\n'), ((781, 806), 'lightkurve.LightCurve', 'lk.LightCurve', (['time', 'flux'], {}), '(time, flux)\n', (794, 806), True, 'import lightkurve as lk\n'), ((1384, 1414), 'numpy.array', 'np.array', (['frequency_candidates'], {}), '(frequency_candidates)\n', (1392, 1414), True, 'import numpy as np\n'), ((1692, 1771), 'numpy.linspace', 'np.linspace', (['(-0.1 * frequency_resolution)', '(0.1 * frequency_resolution)', 'n_samples'], {}), '(-0.1 * frequency_resolution, 0.1 * frequency_resolution, n_samples)\n', (1703, 1771), True, 'import numpy as np\n'), ((2006, 2024), 'numpy.zeros', 'np.zeros', (['max_runs'], {}), '(max_runs)\n', (2014, 2024), True, 'import numpy as np\n'), ((330, 388), 'lightkurve.search_lightcurvefile', 'lk.search_lightcurvefile', (['"""KIC 10685175"""'], {'mission': '"""Kepler"""'}), "('KIC 10685175', mission='Kepler')\n", (354, 388), True, 'import lightkurve as lk\n'), ((996, 1029), 'numpy.floor', 'np.floor', (['(min_frequency / nyquist)'], {}), '(min_frequency / nyquist)\n', (1004, 1029), True, 'import numpy as np\n'), ((1050, 1082), 'numpy.ceil', 'np.ceil', (['(max_frequency / nyquist)'], {}), '(max_frequency / nyquist)\n', (1057, 1082), True, 'import numpy as np\n'), ((2270, 2295), 'lightkurve.LightCurve', 'lk.LightCurve', (['time', 'flux'], {}), '(time, flux)\n', (2283, 2295), True, 'import lightkurve as lk\n'), ((2398, 2450), 'numpy.abs', 'np.abs', (['(per.frequency_at_max_power.value - frequency)'], {}), '(per.frequency_at_max_power.value - frequency)\n', (2404, 2450), True, 'import numpy as np\n'), ((2706, 2721), 'numpy.sum', 'np.sum', (['results'], {}), '(results)\n', (2712, 2721), True, 'import numpy as np\n'), ((2202, 2218), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2216, 2218), True, 'import numpy as np\n')] |
from datetime import timedelta
from datetime import datetime
import pandas as pd
import numpy as np
from optimization.performance import *
now = datetime.now().date()
# -------------------------------------------------------------------------- #
# Helper Functions #
# -utils- #
# -------------------------------------------------------------------------- #
def build_2D_matrix_by_rule(size, scalar):
'''
returns a matrix of given size, built through a generalized rule. The matrix must be 2D
Parameters:
size (tuple): declare the matrix size in this format (m, n), where m = rows and n = columns
scalar (tuple): scalars to multiply the log_10 by. Follow the format (m_scalar, n_scalar)
m_float: the current cell is equal to m_scalar * log_10(row #)
n_float: the current cell is equal to n_scalar * log_10(column #)
Returns:
a matrix of size m x n whose cell are given by m_float+n_float
'''
# Initialize a zero-matrix of size = (m x n)
matrix = np.zeros(size)
for m in range(matrix.shape[0]):
for n in range(matrix.shape[1]):
matrix[m][n] = round(scalar[0]*np.log10(m+1) +
scalar[1]*np.log10(n+1), 2)
return matrix
# -------------------------------------------------------------------------- #
# Score Matrices #
# -------------------------------------------------------------------------- #
# Scoring grids
# naming convention: shape+denominator, m7x7+Scalars+1.3+1.17 -> m7x7_03_17
# naming convention: shape+denominator, m3x7+Scalars+1.2+1.4 -> m3x7_2_4
m7x7_03_17 = build_2D_matrix_by_rule((7, 7), (1/3.03, 1/1.17))
m7x7_85_55 = build_2D_matrix_by_rule((7, 7), (1/1.85, 1/1.55))
m3x7_2_4 = build_2D_matrix_by_rule((3, 7), (1/1.2, 1/1.4))
m3x7_73_17 = build_2D_matrix_by_rule((3, 7), (1/1.73, 1/1.17))
fico = (np.array([300, 500, 560, 650, 740, 800, 870])-300) / \
600 # Fico score binning - normalized
fico_medians = [round(fico[i]+(fico[i+1]-fico[i])/2, 2)
for i in range(len(fico)-1)] # Medians of Fico scoring bins
fico_medians.append(1)
fico_medians = np.array(fico_medians)
# Categorical bins
duedate = np.array([3, 4, 5])
# bins: 0-90 | 91-120 | 121-150 | 151-180 | 181-270 | >270 days
duration = np.array([90, 120, 150, 180, 210, 270])
count0 = np.array([1, 2]) # bins: 0-1 | 2 | >=3
count_lively = np.array([round(x, 0) for x in fico*25])[1:]
count_txn_month = np.array([round(x, 0) for x in fico*40])[1:]
count_invest_acc = np.array([1, 2, 3, 4, 5, 6])
volume_flow = np.array([round(x, 0) for x in fico*1500])[1:]
volume_cred_limit = np.array([0.5, 1, 5, 8, 13, 18])*1000
volume_withdraw = np.array([round(x, 0) for x in fico*1500])[1:]
volume_deposit = np.array([round(x, 0) for x in fico*7000])[1:]
volume_invest = np.array([0.5, 1, 2, 4, 6, 8])*1000
volume_balance_now = np.array([3, 5, 9, 12, 15, 18])*1000
volume_min_run = np.array([round(x, 0) for x in fico*10000])[1:]
percent_cred_util = np.array([round(x, 2) for x in reversed(fico*0.9)][:-1])
frequency_interest = np.array([round(x, 2) for x in reversed(fico*0.6)][:-1])
ratio_flows = np.array([0.7, 1, 1.4, 2, 3, 4])
slope_product = np.array([0.5, 0.8, 1, 1.3, 1.6, 2])
slope_linregression = np.array([-0.5, 0, 0.5, 1, 1.5, 2])
# -------------------------------------------------------------------------- #
# Helper Functions #
# -------------------------------------------------------------------------- #
def dynamic_select(data, acc_name, feedback):
'''
dynamically pick the best credit account,
i.e. the account that performs best in 2 out of these 3 categories:
highest credit limit / largest txn count / longest txn history
Parameters:
data (dict): Plaid 'Transactions' product
acc_name (str): acccepts 'credit' or 'checking'
feedback (dict): feedback describing the score
Returns:
best (str or dict): Plaid account_id of best credit account
'''
try:
acc = data['accounts']
txn = data['transactions']
info = list()
matrix = []
for a in acc:
if acc_name in '{1}{0}{2}'.format('_', str(a['type']), str(a['subtype'])).lower():
id = a['account_id']
type = '{1}{0}{2}{0}{3}'.format('_', str(a['type']), str(
a['subtype']), str(a['official_name'])).lower()
limit = int(a['balances']['limit'] or 0)
transat = [t for t in txn if t['account_id'] == id]
txn_count = len(transat)
if len(transat) != 0:
length = (now - transat[-1]['date']).days
else:
length = 0
info.append([id, type, limit, txn_count, length])
matrix.append([limit, txn_count, length])
if len(info) != 0:
# Build a matrix where each column is a different account. Choose the one performing best among the 3 categories
m = np.array(matrix).T
m[0] = m[0]*1 # assign 1pt to credit limit
m[1] = m[1]*10 # assign 10pt to txn count
m[2] = m[2]*3 # assign 3pt to account length
cols = [sum(m[:, i]) for i in range(m.shape[1])]
index_best_acc = cols.index(max(cols))
best = {'id': info[index_best_acc][0],
'limit': info[index_best_acc][2]}
else:
best = {'id': 'inexistent', 'limit': 0}
except Exception as e:
feedback['fetch'][dynamic_select.__name__] = str(e)
best = {'id': 'inexistent', 'limit': 0}
finally:
return best
def flows(data, how_many_months, feedback):
'''
returns monthly net flow
Parameters:
data (dict): Plaid 'Transactions' product
how_many_month (float): how many months of transaction history are you considering?
feedback (dict): feedback describing the score
Returns:
flow (df): pandas dataframe with amounts for net monthly flow and datetime index
'''
try:
acc = data['accounts']
txn = data['transactions']
dates = list()
amounts = list()
deposit_acc = list()
# Keep only deposit->checking accounts
for a in acc:
id = a['account_id']
type = '{1}{0}{2}'.format(
'_', str(a['type']), str(a['subtype'])).lower()
if type == 'depository_checking':
deposit_acc.append(id)
# Keep only txn in deposit->checking accounts
transat = [t for t in txn if t['account_id'] in deposit_acc]
# Keep only income and expense transactions
for t in transat:
if not t['category']:
pass
else:
category = t['category']
# exclude micro txn and exclude internal transfers
if abs(t['amount']) > 5 and 'internal account transfer' not in category:
date = t['date']
dates.append(date)
amount = t['amount']
amounts.append(amount)
df = pd.DataFrame(data={'amounts': amounts},
index=pd.DatetimeIndex(dates))
# Bin by month
flow = df.groupby(pd.Grouper(freq='M')).sum()
# Exclude current month
if flow.iloc[-1, ].name.strftime('%Y-%m') == datetime.today().date().strftime('%Y-%m'):
flow = flow[:-1]
# Keep only past X months. If longer, then crop
flow.reset_index(drop=True, inplace=True)
if how_many_months-1 in flow.index:
flow = flow[-(how_many_months):]
return flow
except Exception as e:
feedback['fetch'][flows.__name__] = str(e)
def balance_now_checking_only(data, feedback):
'''
returns total balance available now in the user's checking accounts
Parameters:
data (dict): Plaid 'Transactions' product
feedback (dict): feedback describing the score
Returns:
balance (float): cumulative current balance in checking accounts
'''
try:
acc = data['accounts']
balance = 0
for a in acc:
type = '{1}{0}{2}'.format(
'_', str(a['type']), str(a['subtype'])).lower()
if type == 'depository_checking':
balance += int(a['balances']['current'] or 0)
return balance
except Exception as e:
feedback['fetch'][balance_now_checking_only.__name__] = str(e)
# -------------------------------------------------------------------------- #
# Metric #1 Credit #
# -------------------------------------------------------------------------- #
# @measure_time_and_memory
def credit_mix(data, feedback):
'''
Description:
A score based on user's credit accounts composition and status
Parameters:
data (dict): Plaid 'Transactions' product
feedback (dict): score feedback
Returns:
score (float): gained based on number of credit accounts owned and duration
feedback (dict): score feedback
'''
try:
credit_mix.credit = [d for d in data['accounts']
if d['type'].lower() == 'credit']
credit_mix.card_names = [d['name'].lower().replace('credit', '').title().strip() for d in credit_mix.credit if (
isinstance(d['name'], str) == True) and (d['name'].lower() != 'credit card')]
if credit_mix.credit:
size = len(credit_mix.credit)
credit_ids = [d['account_id'] for d in credit_mix.credit]
credit_txn = [d for d in data['transactions']
if d['account_id'] in credit_ids]
first_txn = credit_txn[-1]['date']
date_diff = (now - first_txn).days
m = np.digitize(size, count0, right=True)
n = np.digitize(date_diff, duration, right=True)
score = m3x7_2_4[m][n]
feedback['credit']['credit_cards'] = size
# card_names could be an empty list of the card name was a NoneType
feedback['credit']['card_names'] = credit_mix.card_names
else:
raise Exception('no credit card')
except Exception as e:
score = 0
feedback['credit']['error'] = str(e)
finally:
return score, feedback
# @measure_time_and_memory
def credit_limit(data, feedback):
'''
Description:
A score for the cumulative credit limit of a user across ALL of his credit accounts
Parameters:
data (dict): Plaid 'Transactions' product
feedback (dict): score feedback
Returns:
score (float): gained based on the cumulative credit limit across all credit accounts
feedback (dict): score feedback
'''
try:
credit = [d for d in data['accounts'] if d['type'].lower() == 'credit']
if credit:
credit_lim = sum(
[int(d['balances']['limit']) if d['balances']['limit'] else 0 for d in credit])
credit_ids = [d['account_id'] for d in credit]
credit_txn = [d for d in data['transactions']
if d['account_id'] in credit_ids]
first_txn = credit_txn[-1]['date']
date_diff = (now - first_txn).days
m = np.digitize(date_diff, duration, right=True)
n = np.digitize(credit_lim, volume_cred_limit, right=True)
score = m7x7_03_17[m][n]
feedback['credit']['credit_limit'] = credit_lim
else:
raise Exception('no credit limit')
except Exception as e:
score = 0
feedback['credit']['error'] = str(e)
finally:
return score, feedback
# @measure_time_and_memory
def credit_util_ratio(data, feedback):
'''
Description:
A score reflective of the user's credit utilization ratio, that is credit_used/credit_limit
Parameters:
data (dict): Plaid 'Transactions' product
feedback (dict): score feedback
Returns:
score (float): score for avg percent of credit limit used
feedback (dict): score feedback
'''
try:
txn = data['transactions']
# Dynamically select best credit account
dynamic = dynamic_select(data, 'credit', feedback)
if dynamic['id'] == 'inexistent' or dynamic['limit'] == 0:
score = 0
else:
id = dynamic['id']
limit = dynamic['limit']
# Keep ony transactions in best credit account
transat = [x for x in txn if x['account_id'] == id]
if transat:
dates = list()
amounts = list()
for t in transat:
date = t['date']
dates.append(date)
amount = t['amount']
amounts.append(amount)
df = pd.DataFrame(
data={'amounts': amounts}, index=pd.DatetimeIndex(dates))
# Bin by month credit card 'purchases' and 'paybacks'
util = df.groupby(pd.Grouper(freq='M'))['amounts'].agg([
('payback', lambda x: x[x < 0].sum()),
('purchases', lambda x: x[x > 0].sum())
])
util['cred_util'] = [x/limit for x in util['purchases']]
# Exclude current month
if util.iloc[-1, ].name.strftime('%Y-%m') == datetime.today().date().strftime('%Y-%m'):
util = util[:-1]
avg_util = np.mean(util['cred_util'])
m = np.digitize(len(util)*30, duration, right=True)
n = np.digitize(avg_util, percent_cred_util, right=True)
score = m7x7_85_55[m][n]
feedback['credit']['utilization_ratio'] = round(avg_util, 2)
else:
raise Exception('no credit history')
except Exception as e:
score = 0
feedback['credit']['error'] = str(e)
finally:
return score, feedback
def credit_interest(data, feedback):
'''
returns score based on number of times user was charged credit card interest fees in past 24 months
Parameters:
data (dict): Plaid 'Transactions' product
feedback (dict): feedback describing the score
Returns:
score (float): gained based on interest charged
feedback (dict): feedback describing the score
'''
try:
id = dynamic_select(data, 'credit', feedback)['id']
if id == 'inexistent':
score = 0
else:
txn = data['transactions']
alltxn = [t for t in txn if t['account_id'] == id]
interests = list()
if alltxn:
length = min(24, round((now - alltxn[-1]['date']).days/30, 0))
for t in alltxn:
# keep only txn of type 'interest on credit card'
if 'Interest Charged' in t['category']:
date = t['date']
# keep only txn of last 24 months
if date > now - timedelta(days=2*365):
interests.append(t)
frequency = len(interests)/length
score = fico_medians[np.digitize(
frequency, frequency_interest, right=True)]
feedback['credit']['count_charged_interest'] = round(
frequency, 0)
else:
raise Exception('no credit interest')
except Exception as e:
score = 0
feedback['credit']['error'] = str(e)
finally:
return score, feedback
def credit_length(data, feedback):
'''
returns score based on length of user's best credit account
Parameters:
data (dict): Plaid 'Transactions' product
feedback (dict): feedback describing the score
Returns:
score (float): gained because of credit account duration
feedback (dict): feedback describing the score
'''
try:
id = dynamic_select(data, 'credit', feedback)['id']
txn = data['transactions']
alltxn = [t for t in txn if t['account_id'] == id]
if alltxn:
oldest_txn = alltxn[-1]['date']
# date today - date of oldest credit transaction
how_long = (now - oldest_txn).days
score = fico_medians[np.digitize(how_long, duration, right=True)]
feedback['credit']['credit_duration_(days)'] = how_long
else:
raise Exception('no credit length')
except Exception as e:
score = 0
feedback['credit']['error'] = str(e)
finally:
return score, feedback
def credit_livelihood(data, feedback):
'''
returns score quantifying the avg monthly txn count for your best credit account
Parameters:
data (dict): Plaid 'Transactions' product
feedback (dict): feedback describing the score
Returns:
score (float): based on avg monthly txn count
feedback (dict): feedback describing the score
'''
try:
id = dynamic_select(data, 'credit', feedback)['id']
txn = data['transactions']
alltxn = [t for t in txn if t['account_id'] == id]
if alltxn:
dates = list()
amounts = list()
for i in range(len(alltxn)):
date = alltxn[i]['date']
dates.append(date)
amount = alltxn[i]['amount']
amounts.append(amount)
df = pd.DataFrame(data={'amounts': amounts},
index=pd.DatetimeIndex(dates))
d = df.groupby(pd.Grouper(freq='M')).count()
credit_livelihood.d = d
if len(d['amounts']) >= 2:
if d['amounts'][0] < 5: # exclude initial and final month with < 5 txn
d = d[1:]
if d['amounts'][-1] < 5:
d = d[:-1]
mean = d['amounts'].mean()
score = fico_medians[np.digitize(mean, count_lively, right=True)]
feedback['credit']['avg_count_monthly_txn'] = round(mean, 0)
else:
raise Exception('no credit transactions')
except Exception as e:
score = 0
feedback['credit']['error'] = str(e)
finally:
return score, feedback
# -------------------------------------------------------------------------- #
# Metric #2 Velocity #
# -------------------------------------------------------------------------- #
# @measure_time_and_memory
def velocity_withdrawals(data, feedback):
'''
returns score based on count and volumne of monthly automated withdrawals
Parameters:
data (dict): Plaid 'Transactions' product
feedback (dict): feedback describing the score
Returns:
score (float): score associated with reccurring monthly withdrawals
feedback (dict): feedback describing the score
'''
try:
txn = data['transactions']
withdraw = [['Service', 'Subscription'], ['Service', 'Financial',
'Loans and Mortgages'], ['Service', 'Insurance'], ['Payment', 'Rent']]
dates = list()
amounts = list()
for t in txn:
if t['category'] in withdraw and t['amount'] > 15:
date = t['date']
dates.append(date)
amount = abs(t['amount'])
amounts.append(amount)
df = pd.DataFrame(data={'amounts': amounts},
index=pd.DatetimeIndex(dates))
if len(df.index) > 0:
how_many = np.mean(df.groupby(pd.Grouper(
freq='M')).count().iloc[:, 0].tolist())
if how_many > 0:
volume = np.mean(df.groupby(pd.Grouper(
freq='M')).sum().iloc[:, 0].tolist())
m = np.digitize(how_many, count0, right=True)
n = np.digitize(volume, volume_withdraw, right=True)
score = m3x7_73_17[m][n]
feedback['velocity']['withdrawals'] = round(how_many, 0)
feedback['velocity']['withdrawals_volume'] = round(volume, 0)
else:
raise Exception('no withdrawals')
except Exception as e:
score = 0
feedback['velocity']['error'] = str(e)
finally:
return score, feedback
# @measure_time_and_memory
def velocity_deposits(data, feedback):
'''
returns score based on count and volumne of monthly automated deposits
Parameters:
data (dict): Plaid 'Transactions' product
feedback (dict): feedback describing the score
Returns:
score (float): score associated with direct deposits
feedback (dict): feedback describing the score
'''
try:
txn = data['transactions']
dates = list()
amounts = list()
for t in txn:
if t['amount'] < -200 and 'payroll' in [c.lower() for c in t['category']]:
date = t['date']
dates.append(date)
amount = abs(t['amount'])
amounts.append(amount)
df = pd.DataFrame(data={'amounts': amounts},
index=pd.DatetimeIndex(dates))
if len(df.index) > 0:
how_many = np.mean(df.groupby(pd.Grouper(
freq='M')).count().iloc[:, 0].tolist())
if how_many > 0:
volume = np.mean(df.groupby(pd.Grouper(
freq='M')).sum().iloc[:, 0].tolist())
m = np.digitize(how_many, count0, right=True)
n = np.digitize(volume, volume_deposit, right=True)
score = m3x7_73_17[m][n]
feedback['velocity']['deposits'] = round(how_many, 0)
feedback['velocity']['deposits_volume'] = round(volume, 0)
else:
raise Exception('no deposits')
except Exception as e:
score = 0
feedback['velocity']['error'] = str(e)
finally:
return score, feedback
def velocity_month_net_flow(data, feedback):
'''
returns score for monthly net flow
Parameters:
data (dict): Plaid 'Transactions' product
feedback (dict): feedback describing the score
Returns:
score (float): score associated with monthly new flow
feedback (dict): feedback describing the score
'''
try:
flow = flows(data, 12, feedback)
# Calculate magnitude of flow (how much is flowing monthly?)
cum_flow = [abs(x) for x in flow['amounts'].tolist()]
magnitude = np.mean(cum_flow)
# Calculate direction of flow (is money coming in or going out?)
neg = list(filter(lambda x: (x < 0), flow['amounts'].tolist()))
pos = list(filter(lambda x: (x >= 0), flow['amounts'].tolist()))
if neg:
direction = len(pos)/len(neg) # output in range [0, ...)
else:
direction = 10 # 10 is an arbitralkity chosen large positive inteegr
# Calculate score
m = np.digitize(direction, ratio_flows, right=True)
n = np.digitize(magnitude, volume_flow, right=True)
score = m7x7_03_17[m][n]
feedback['velocity']['avg_net_flow'] = round(magnitude, 2)
except Exception as e:
score = 0
feedback['velocity']['error'] = str(e)
finally:
return score, feedback
def velocity_month_txn_count(data, feedback):
'''
returns score based on count of mounthly transactions
Parameters:
data (dict): Plaid 'Transactions' product
feedback (dict): feedback describing the score
Returns:
score (float): the larger the monthly count the larger the score
feedback (dict): feedback describing the score
'''
try:
acc = data['accounts']
txn = data['transactions']
dates = list()
amounts = list()
mycounts = list()
deposit_acc = list()
# Keep only deposit->checking accounts
for a in acc:
id = a['account_id']
type = '{1}{0}{2}'.format(
'_', str(a['type']), str(a['subtype'])).lower()
if type == 'depository_checking':
deposit_acc.append(id)
# Keep only txn in deposit->checking accounts
for d in deposit_acc:
transat = [x for x in txn if x['account_id'] == d]
# Bin transactions by month
for t in transat:
if abs(t['amount']) > 5:
date = t['date']
dates.append(date)
amount = t['amount']
amounts.append(amount)
df = pd.DataFrame(data={'amounts': amounts},
index=pd.DatetimeIndex(dates))
# Calculate avg count of monthly transactions for one checking account at a time
if len(df.index) > 0:
cnt = df.groupby(pd.Grouper(freq='M')
).count().iloc[:, 0].tolist()
else:
score = 0
mycounts.append(cnt)
mycounts = [x for y in mycounts for x in y]
how_many = np.mean(mycounts)
score = fico_medians[np.digitize(
how_many, count_txn_month, right=True)]
feedback['velocity']['count_monthly_txn'] = round(how_many, 0)
except Exception as e:
score = 0
feedback['velocity']['error'] = str(e)
finally:
return score, feedback
def velocity_slope(data, feedback):
'''
returns score for the historical behavior of the net monthly flow for past 24 months
Parameters:
data (dict): Plaid 'Transactions' product
feedback (dict): feedback describing the score
Returns:
score (float): score for flow net behavior over past 24 months
feedback (dict): feedback describing the score
'''
try:
flow = flows(data, 24, feedback)
# If you have > 10 data points OR all net flows are positive, then perform linear regression
if len(flow) >= 10 or len(list(filter(lambda x: (x < 0), flow['amounts'].tolist()))) == 0:
# Perform Linear Regression using numpy.polyfit()
x = range(len(flow['amounts']))
y = flow['amounts']
a, b = np.polyfit(x, y, 1)
score = fico_medians[np.digitize(
a, slope_linregression, right=True)]
feedback['velocity']['slope'] = round(a, 2)
# If you have < 10 data points, then calculate the score accounting for two ratios
else:
# Multiply two ratios by each other
neg = list(filter(lambda x: (x < 0), flow['amounts'].tolist()))
pos = list(filter(lambda x: (x >= 0), flow['amounts'].tolist()))
direction = len(pos) / len(neg) # output in range [0, 2+]
magnitude = abs(sum(pos)/sum(neg)) # output in range [0, 2+]
if direction >= 1:
pass
else:
magnitude = magnitude * -1
m = np.digitize(direction, slope_product, right=True)
n = np.digitize(magnitude, slope_product, right=True)
score = m7x7_03_17.T[m][n]
feedback['velocity']['monthly_flow'] = round(magnitude, 2)
except Exception as e:
score = 0
feedback['velocity']['error'] = str(e)
finally:
return score, feedback
# -------------------------------------------------------------------------- #
# Metric #3 Stability #
# -------------------------------------------------------------------------- #
# @measure_time_and_memory
def stability_tot_balance_now(data, feedback):
'''
Description:
A score based on total balance now across ALL accounts owned by the user
Parameters:
data (dict): Plaid 'Transactions' product
feedback (dict): score feedback
Returns:
score (float): cumulative current balance
feedback (dict): score feedback
'''
try:
depository = [d for d in data['accounts']
if d['type'].lower() == 'depository']
non_depository = [d for d in data['accounts']
if d['type'].lower() != 'depository']
x = sum([int(d['balances']['current']) if d['balances']
['current'] else 0 for d in depository])
y = sum([int(d['balances']['available']) if d['balances']
['available'] else 0 for d in non_depository])
balance = x+y
if balance > 0:
score = fico_medians[np.digitize(
balance, volume_balance_now, right=True)]
feedback['stability']['cumulative_current_balance'] = balance
stability_tot_balance_now.balance = balance
else:
raise Exception('no balance')
except Exception as e:
score = 0
feedback['stability']['error'] = str(e)
finally:
return score, feedback
# @measure_time_and_memory
def stability_loan_duedate(data, feedback):
'''
Description:
returns how many months it'll take the user to pay back their loan
Parameters:
data (dict): Plaid 'Transactions' product
feedback (dict): score feedback
Returns:
feedback (dict): score feedback with a new key-value pair 'loan_duedate':float (# of months in range [3,6])
'''
try:
# Read in the date of the oldest txn
first_txn = data['transactions'][-1]['date']
txn_length = int((now - first_txn).days/30) # months
# Loan duedate is equal to the month of txn history there are
due = np.digitize(txn_length, duedate, right=True)
how_many_months = np.append(duedate, 6)
feedback['stability']['loan_duedate'] = how_many_months[due]
except Exception as e:
feedback['stability']['error'] = str(e)
finally:
return feedback
# @measure_time_and_memory
def stability_min_running_balance(data, feedback):
'''
Description:
A score based on the average minimum balance maintained for 12 months
Parameters:
data (dict): Plaid 'Transactions' product
feedback (dict): score feedback
Returns:
score (float): volume of minimum balance and duration
feedback (dict): score feedback
'''
try:
# Calculate net flow each month for past 12 months i.e, |income-expenses|
nets = flows(data, 12, feedback)['amounts'].tolist()
# Calculate total current balance now
balance = balance_now_checking_only(data, feedback)
# Subtract net flow from balancenow to calculate the running balance for the past 12 months
running_balances = [balance+n for n in reversed(nets)]
# Calculate volume using a weighted average
weights = np.linspace(0.01, 1, len(running_balances)
).tolist() # define your weights
volume = sum([x*w for x, w in zip(running_balances,
reversed(weights))]) / sum(weights)
length = len(running_balances)*30
# Compute the score
m = np.digitize(length, duration, right=True)
n = np.digitize(volume, volume_min_run, right=True)
# add 0.025 score penalty for each overdrafts
score = round(
m7x7_85_55[m][n] - 0.025*len(list(filter(lambda x: (x < 0), running_balances))), 2)
feedback['stability']['min_running_balance'] = round(volume, 2)
feedback['stability']['min_running_timeframe'] = length
except Exception as e:
score = 0
feedback['stability']['error'] = str(e)
finally:
return score, feedback
# -------------------------------------------------------------------------- #
# Metric #4 Diversity #
# -------------------------------------------------------------------------- #
# @measure_time_and_memory
def diversity_acc_count(data, feedback):
'''
Description:
A score based on count of accounts owned by the user and account duration
Parameters:
data (dict): Plaid 'Transactions' product
feedback (dict): score feedback
Returns:
score (float): score for accounts count
feedback (dict): score feedback
'''
try:
size = len(data['accounts'])
first_txn = data['transactions'][-1]['date']
date_diff = (now - first_txn).days
m = np.digitize(size, [i+2 for i in count0], right=False)
n = np.digitize(date_diff, duration, right=True)
score = m3x7_73_17[m][n]
feedback['diversity']['bank_accounts'] = size
except Exception as e:
score = 0
feedback['diversity']['error'] = str(e)
finally:
return score, feedback
# @measure_time_and_memory
def diversity_profile(data, feedback):
'''
Description:
A score for number of saving and investment accounts owned
Parameters:
data (dict): Plaid 'Transactions' product
feedback (dict): score feedback
Returns:
score (float): points scored for accounts owned
feedback (dict): score feedback
'''
try:
myacc = list()
acc = [x for x in data['accounts'] if x['type'] == 'loan' or int(
x['balances']['current'] or 0) != 0] # exclude $0 balance accounts
balance = 0
for a in acc:
id = a['account_id']
type = '{}_{}'.format(a['type'], str(a['subtype']))
# Consider savings, hda, cd, money mart, paypal, prepaid, cash management, edt accounts
if (type.split('_')[0] == 'depository') & (type.split('_')[1] != 'checking'):
balance += int(a['balances']['current'] or 0)
myacc.append(id)
# Consider ANY type of investment account
if type.split('_')[0] == 'investment':
balance += int(a['balances']['current'] or 0)
myacc.append(id)
if balance != 0:
score = fico_medians[np.digitize(
balance, volume_invest, right=True)]
feedback['diversity']['investment_accounts'] = len(myacc)
feedback['diversity']['investment_total_balance'] = balance
else:
raise Exception('no investing nor savings accounts')
except Exception as e:
score = 0
feedback['diversity']['error'] = str(e)
finally:
return score, feedback
| [
"datetime.datetime.today",
"numpy.polyfit",
"numpy.zeros",
"pandas.DatetimeIndex",
"numpy.append",
"numpy.mean",
"numpy.array",
"pandas.Grouper",
"datetime.timedelta",
"numpy.log10",
"numpy.digitize",
"datetime.datetime.now"
] | [((2374, 2396), 'numpy.array', 'np.array', (['fico_medians'], {}), '(fico_medians)\n', (2382, 2396), True, 'import numpy as np\n'), ((2427, 2446), 'numpy.array', 'np.array', (['[3, 4, 5]'], {}), '([3, 4, 5])\n', (2435, 2446), True, 'import numpy as np\n'), ((2522, 2561), 'numpy.array', 'np.array', (['[90, 120, 150, 180, 210, 270]'], {}), '([90, 120, 150, 180, 210, 270])\n', (2530, 2561), True, 'import numpy as np\n'), ((2571, 2587), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (2579, 2587), True, 'import numpy as np\n'), ((2753, 2781), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (2761, 2781), True, 'import numpy as np\n'), ((3376, 3408), 'numpy.array', 'np.array', (['[0.7, 1, 1.4, 2, 3, 4]'], {}), '([0.7, 1, 1.4, 2, 3, 4])\n', (3384, 3408), True, 'import numpy as np\n'), ((3425, 3461), 'numpy.array', 'np.array', (['[0.5, 0.8, 1, 1.3, 1.6, 2]'], {}), '([0.5, 0.8, 1, 1.3, 1.6, 2])\n', (3433, 3461), True, 'import numpy as np\n'), ((3484, 3519), 'numpy.array', 'np.array', (['[-0.5, 0, 0.5, 1, 1.5, 2]'], {}), '([-0.5, 0, 0.5, 1, 1.5, 2])\n', (3492, 3519), True, 'import numpy as np\n'), ((1212, 1226), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (1220, 1226), True, 'import numpy as np\n'), ((2864, 2896), 'numpy.array', 'np.array', (['[0.5, 1, 5, 8, 13, 18]'], {}), '([0.5, 1, 5, 8, 13, 18])\n', (2872, 2896), True, 'import numpy as np\n'), ((3047, 3077), 'numpy.array', 'np.array', (['[0.5, 1, 2, 4, 6, 8]'], {}), '([0.5, 1, 2, 4, 6, 8])\n', (3055, 3077), True, 'import numpy as np\n'), ((3104, 3135), 'numpy.array', 'np.array', (['[3, 5, 9, 12, 15, 18]'], {}), '([3, 5, 9, 12, 15, 18])\n', (3112, 3135), True, 'import numpy as np\n'), ((147, 161), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (159, 161), False, 'from datetime import datetime\n'), ((2105, 2150), 'numpy.array', 'np.array', (['[300, 500, 560, 650, 740, 800, 870]'], {}), '([300, 500, 560, 650, 740, 800, 870])\n', (2113, 2150), True, 'import numpy as np\n'), ((23553, 23570), 'numpy.mean', 'np.mean', (['cum_flow'], {}), '(cum_flow)\n', (23560, 23570), True, 'import numpy as np\n'), ((24012, 24059), 'numpy.digitize', 'np.digitize', (['direction', 'ratio_flows'], {'right': '(True)'}), '(direction, ratio_flows, right=True)\n', (24023, 24059), True, 'import numpy as np\n'), ((24072, 24119), 'numpy.digitize', 'np.digitize', (['magnitude', 'volume_flow'], {'right': '(True)'}), '(magnitude, volume_flow, right=True)\n', (24083, 24119), True, 'import numpy as np\n'), ((26200, 26217), 'numpy.mean', 'np.mean', (['mycounts'], {}), '(mycounts)\n', (26207, 26217), True, 'import numpy as np\n'), ((30782, 30826), 'numpy.digitize', 'np.digitize', (['txn_length', 'duedate'], {'right': '(True)'}), '(txn_length, duedate, right=True)\n', (30793, 30826), True, 'import numpy as np\n'), ((30853, 30874), 'numpy.append', 'np.append', (['duedate', '(6)'], {}), '(duedate, 6)\n', (30862, 30874), True, 'import numpy as np\n'), ((32275, 32316), 'numpy.digitize', 'np.digitize', (['length', 'duration'], {'right': '(True)'}), '(length, duration, right=True)\n', (32286, 32316), True, 'import numpy as np\n'), ((32329, 32376), 'numpy.digitize', 'np.digitize', (['volume', 'volume_min_run'], {'right': '(True)'}), '(volume, volume_min_run, right=True)\n', (32340, 32376), True, 'import numpy as np\n'), ((33614, 33671), 'numpy.digitize', 'np.digitize', (['size', '[(i + 2) for i in count0]'], {'right': '(False)'}), '(size, [(i + 2) for i in count0], right=False)\n', (33625, 33671), True, 'import numpy as np\n'), ((33680, 33724), 'numpy.digitize', 'np.digitize', (['date_diff', 'duration'], {'right': '(True)'}), '(date_diff, duration, right=True)\n', (33691, 33724), True, 'import numpy as np\n'), ((10306, 10343), 'numpy.digitize', 'np.digitize', (['size', 'count0'], {'right': '(True)'}), '(size, count0, right=True)\n', (10317, 10343), True, 'import numpy as np\n'), ((10360, 10404), 'numpy.digitize', 'np.digitize', (['date_diff', 'duration'], {'right': '(True)'}), '(date_diff, duration, right=True)\n', (10371, 10404), True, 'import numpy as np\n'), ((11811, 11855), 'numpy.digitize', 'np.digitize', (['date_diff', 'duration'], {'right': '(True)'}), '(date_diff, duration, right=True)\n', (11822, 11855), True, 'import numpy as np\n'), ((11872, 11926), 'numpy.digitize', 'np.digitize', (['credit_lim', 'volume_cred_limit'], {'right': '(True)'}), '(credit_lim, volume_cred_limit, right=True)\n', (11883, 11926), True, 'import numpy as np\n'), ((26247, 26297), 'numpy.digitize', 'np.digitize', (['how_many', 'count_txn_month'], {'right': '(True)'}), '(how_many, count_txn_month, right=True)\n', (26258, 26297), True, 'import numpy as np\n'), ((27384, 27403), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (27394, 27403), True, 'import numpy as np\n'), ((28142, 28191), 'numpy.digitize', 'np.digitize', (['direction', 'slope_product'], {'right': '(True)'}), '(direction, slope_product, right=True)\n', (28153, 28191), True, 'import numpy as np\n'), ((28208, 28257), 'numpy.digitize', 'np.digitize', (['magnitude', 'slope_product'], {'right': '(True)'}), '(magnitude, slope_product, right=True)\n', (28219, 28257), True, 'import numpy as np\n'), ((5344, 5360), 'numpy.array', 'np.array', (['matrix'], {}), '(matrix)\n', (5352, 5360), True, 'import numpy as np\n'), ((7583, 7606), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['dates'], {}), '(dates)\n', (7599, 7606), True, 'import pandas as pd\n'), ((14063, 14089), 'numpy.mean', 'np.mean', (["util['cred_util']"], {}), "(util['cred_util'])\n", (14070, 14089), True, 'import numpy as np\n'), ((14178, 14230), 'numpy.digitize', 'np.digitize', (['avg_util', 'percent_cred_util'], {'right': '(True)'}), '(avg_util, percent_cred_util, right=True)\n', (14189, 14230), True, 'import numpy as np\n'), ((17034, 17077), 'numpy.digitize', 'np.digitize', (['how_long', 'duration'], {'right': '(True)'}), '(how_long, duration, right=True)\n', (17045, 17077), True, 'import numpy as np\n'), ((18740, 18783), 'numpy.digitize', 'np.digitize', (['mean', 'count_lively'], {'right': '(True)'}), '(mean, count_lively, right=True)\n', (18751, 18783), True, 'import numpy as np\n'), ((20392, 20415), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['dates'], {}), '(dates)\n', (20408, 20415), True, 'import pandas as pd\n'), ((20722, 20763), 'numpy.digitize', 'np.digitize', (['how_many', 'count0'], {'right': '(True)'}), '(how_many, count0, right=True)\n', (20733, 20763), True, 'import numpy as np\n'), ((20784, 20832), 'numpy.digitize', 'np.digitize', (['volume', 'volume_withdraw'], {'right': '(True)'}), '(volume, volume_withdraw, right=True)\n', (20795, 20832), True, 'import numpy as np\n'), ((22123, 22146), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['dates'], {}), '(dates)\n', (22139, 22146), True, 'import pandas as pd\n'), ((22453, 22494), 'numpy.digitize', 'np.digitize', (['how_many', 'count0'], {'right': '(True)'}), '(how_many, count0, right=True)\n', (22464, 22494), True, 'import numpy as np\n'), ((22515, 22562), 'numpy.digitize', 'np.digitize', (['volume', 'volume_deposit'], {'right': '(True)'}), '(volume, volume_deposit, right=True)\n', (22526, 22562), True, 'import numpy as np\n'), ((27438, 27485), 'numpy.digitize', 'np.digitize', (['a', 'slope_linregression'], {'right': '(True)'}), '(a, slope_linregression, right=True)\n', (27449, 27485), True, 'import numpy as np\n'), ((29712, 29764), 'numpy.digitize', 'np.digitize', (['balance', 'volume_balance_now'], {'right': '(True)'}), '(balance, volume_balance_now, right=True)\n', (29723, 29764), True, 'import numpy as np\n'), ((35212, 35259), 'numpy.digitize', 'np.digitize', (['balance', 'volume_invest'], {'right': '(True)'}), '(balance, volume_invest, right=True)\n', (35223, 35259), True, 'import numpy as np\n'), ((7658, 7678), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""M"""'}), "(freq='M')\n", (7668, 7678), True, 'import pandas as pd\n'), ((15852, 15906), 'numpy.digitize', 'np.digitize', (['frequency', 'frequency_interest'], {'right': '(True)'}), '(frequency, frequency_interest, right=True)\n', (15863, 15906), True, 'import numpy as np\n'), ((18319, 18342), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['dates'], {}), '(dates)\n', (18335, 18342), True, 'import pandas as pd\n'), ((25780, 25803), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['dates'], {}), '(dates)\n', (25796, 25803), True, 'import pandas as pd\n'), ((1348, 1363), 'numpy.log10', 'np.log10', (['(m + 1)'], {}), '(m + 1)\n', (1356, 1363), True, 'import numpy as np\n'), ((1407, 1422), 'numpy.log10', 'np.log10', (['(n + 1)'], {}), '(n + 1)\n', (1415, 1422), True, 'import numpy as np\n'), ((13473, 13496), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (['dates'], {}), '(dates)\n', (13489, 13496), True, 'import pandas as pd\n'), ((18371, 18391), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""M"""'}), "(freq='M')\n", (18381, 18391), True, 'import pandas as pd\n'), ((7772, 7788), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (7786, 7788), False, 'from datetime import datetime\n'), ((13603, 13623), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""M"""'}), "(freq='M')\n", (13613, 13623), True, 'import pandas as pd\n'), ((15693, 15716), 'datetime.timedelta', 'timedelta', ([], {'days': '(2 * 365)'}), '(days=2 * 365)\n', (15702, 15716), False, 'from datetime import timedelta\n'), ((13955, 13971), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (13969, 13971), False, 'from datetime import datetime\n'), ((20490, 20510), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""M"""'}), "(freq='M')\n", (20500, 20510), True, 'import pandas as pd\n'), ((22221, 22241), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""M"""'}), "(freq='M')\n", (22231, 22241), True, 'import pandas as pd\n'), ((25966, 25986), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""M"""'}), "(freq='M')\n", (25976, 25986), True, 'import pandas as pd\n'), ((20631, 20651), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""M"""'}), "(freq='M')\n", (20641, 20651), True, 'import pandas as pd\n'), ((22362, 22382), 'pandas.Grouper', 'pd.Grouper', ([], {'freq': '"""M"""'}), "(freq='M')\n", (22372, 22382), True, 'import pandas as pd\n')] |
"""
<NAME>
University of Manitoba
August 06th, 2020
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import roc_auc_score
from tensorflow.keras.backend import clear_session
from tensorflow.keras.utils import to_categorical
from umbms import get_proj_path, get_script_logger, verify_path
from umbms.loadsave import load_pickle, save_pickle
from umbms.ai.augment import full_aug
from umbms.ai.gencompare import correct_g1_ini_ant_ang
from umbms.ai.models import get_sino_cnn
from umbms.ai.makesets import get_class_labels
from umbms.ai.preproc import resize_features_for_keras, to_td
from umbms.ai.metrics import (get_acc, get_sens, get_spec, get_opt_thresh,
report_metrics)
###############################################################################
__DATA_DIR = os.path.join(get_proj_path(), 'data/umbmid/')
###############################################################################
# Number of epochs to train over
__N_EPOCHS = 496
###############################################################################
def plt_roc_curve(preds, labels, save_str='', save=False):
"""Plots the ROC curve of the classifier
Parameters
----------
preds : array_like
Classifier predictions
labels : array_like
True class labels
save_str : str
String to use to save fig and data, if save. Should not have
file extension, should not be full path - just name of .pickle
and .png files that will be saved
save : bool
If True, will save the fig and data
"""
# Thresholds to use for plt
thresholds = np.linspace(0, 1, 1000)
# Init arrays for storing FPR and TPR
fprs = np.zeros_like(thresholds)
tprs = np.zeros_like(thresholds)
for ii in range(np.size(thresholds)):
# Get TPR here
tprs[ii] = get_sens(preds=preds, labels=labels,
threshold=thresholds[ii])
# Get FPR here
fprs[ii] = 1 - get_spec(preds=preds, labels=labels,
threshold=thresholds[ii])
# Make fig
plt.figure(figsize=(12, 6))
plt.rc("font", family="Times New Roman")
plt.tick_params(labelsize=20)
plt.plot(fprs, tprs, 'k-')
plt.plot(np.linspace(0, 1, 1000), np.linspace(0, 1, 1000), 'b--')
plt.xlabel('False Positive Rate', fontsize=24)
plt.ylabel('True Positive Rate', fontsize=24)
plt.tight_layout()
if save: # If saving
verify_path(os.path.join(get_proj_path(), 'output/roc-figs/'))
out_path = os.path.join(get_proj_path(), 'output/roc-figs/')
plt.savefig(os.path.join(out_path, '%s.png' % save_str), dpi=150)
plt.close()
save_pickle(np.array([fprs, tprs]),
os.path.join(out_path, '%s.pickle' % save_str))
###############################################################################
if __name__ == "__main__":
logger = get_script_logger(__file__)
# Load the training data and metadata from Gen-2
g2_d = load_pickle(os.path.join(__DATA_DIR, 'g2/g2_fd.pickle'))
g2_md = load_pickle(os.path.join(__DATA_DIR, 'g2/g2_metadata.pickle'))
# Load the training data and metadata from Gen-1
g1_d = load_pickle(os.path.join(__DATA_DIR,
'g1-train-test/test_fd.pickle'))
g1_md = load_pickle(os.path.join(__DATA_DIR,
'g1-train-test/test_md.pickle'))
# Convert data to time domain, take magnitude, apply window
g1_d = correct_g1_ini_ant_ang(g1_d)
g1_d = np.abs(to_td(g1_d))
g2_d = np.abs(to_td(g2_d))
# Perform data augmentation
g2_d, g2_md = full_aug(g2_d, g2_md)
g2_d = resize_features_for_keras(g2_d)
g1_d = resize_features_for_keras(g1_d)
g2_labels = to_categorical(get_class_labels(g2_md))
g1_labels = to_categorical(get_class_labels(g1_md))
n_runs = 20
# Init arrays for storing performance metrics
auc_scores = np.zeros([n_runs, ])
accs = np.zeros([n_runs, ])
sens = np.zeros([n_runs, ])
spec = np.zeros([n_runs, ])
# Init list for storing the metadata of samples which are
# incorrectly / correctly classified
incorrect_preds = []
correct_preds = []
for run_idx in range(n_runs):
logger.info('\tWorking on run [%d / %d]...' % (run_idx + 1, n_runs))
# Get model
cnn = get_sino_cnn(input_shape=np.shape(g2_d)[1:], lr=0.001)
# Train the model
cnn.fit(x=g2_d, y=g2_labels,
epochs=__N_EPOCHS,
shuffle=True,
batch_size=2048,
verbose=False)
# Calculate the predictions
g1_preds = cnn.predict(x=g1_d)
# Get and store ROC AUC
g1_auc = 100 * roc_auc_score(y_true=g1_labels, y_score=g1_preds)
auc_scores[run_idx] = g1_auc
# Plot ROC curve
plt_roc_curve(preds=g1_preds[:, 1], labels=g1_labels[:, 1],
save_str='cnn_run_%d_roc' % run_idx, save=True)
# Get optimal decision threshold
opt_thresh = get_opt_thresh(preds=g1_preds[:, 1],
labels=g1_labels[:, 1])
# Store performance metrics
accs[run_idx] = 100 * get_acc(preds=g1_preds[:, 1],
labels=g1_labels[:, 1],
threshold=opt_thresh)
sens[run_idx] = 100 * get_sens(preds=g1_preds[:, 1],
labels=g1_labels[:, 1],
threshold=opt_thresh)
spec[run_idx] = 100 * get_spec(preds=g1_preds[:, 1],
labels=g1_labels[:, 1],
threshold=opt_thresh)
# Report AUC at this run
logger.info('\t\tAUC:\t%.2f' % g1_auc)
# Get the class predictions
class_preds = g1_preds * np.zeros_like(g1_preds)
class_preds[g1_preds >= opt_thresh] = 1
# Store correct and incorrect predictions
for s_idx in range(np.size(g1_preds, axis=0)):
if class_preds[s_idx, 1] != g1_labels[s_idx, 1]:
incorrect_preds.append(g1_md[s_idx])
else:
correct_preds.append(g1_md[s_idx])
# Reset the model
clear_session()
# Report performance metrics to logger
logger.info('Average performance metrics')
logger.info('')
report_metrics(aucs=auc_scores, accs=accs, sens=sens, spec=spec,
logger=logger)
# Save the incorrect predictions
out_dir = os.path.join(get_proj_path(), 'output/incorrect-preds/')
verify_path(out_dir)
save_pickle(incorrect_preds,
os.path.join(out_dir, 'incorrect_preds.pickle'))
save_pickle(correct_preds,
os.path.join(out_dir, 'correct_preds.pickle'))
| [
"numpy.shape",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"umbms.ai.metrics.report_metrics",
"umbms.ai.metrics.get_spec",
"numpy.zeros_like",
"umbms.ai.augment.full_aug",
"matplotlib.pyplot.close",
"umbms.verify_path",
"matplo... | [((883, 898), 'umbms.get_proj_path', 'get_proj_path', ([], {}), '()\n', (896, 898), False, 'from umbms import get_proj_path, get_script_logger, verify_path\n'), ((1717, 1740), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (1728, 1740), True, 'import numpy as np\n'), ((1798, 1823), 'numpy.zeros_like', 'np.zeros_like', (['thresholds'], {}), '(thresholds)\n', (1811, 1823), True, 'import numpy as np\n'), ((1836, 1861), 'numpy.zeros_like', 'np.zeros_like', (['thresholds'], {}), '(thresholds)\n', (1849, 1861), True, 'import numpy as np\n'), ((2214, 2241), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (2224, 2241), True, 'import matplotlib.pyplot as plt\n'), ((2247, 2287), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""Times New Roman"""'}), "('font', family='Times New Roman')\n", (2253, 2287), True, 'import matplotlib.pyplot as plt\n'), ((2293, 2322), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'labelsize': '(20)'}), '(labelsize=20)\n', (2308, 2322), True, 'import matplotlib.pyplot as plt\n'), ((2328, 2354), 'matplotlib.pyplot.plot', 'plt.plot', (['fprs', 'tprs', '"""k-"""'], {}), "(fprs, tprs, 'k-')\n", (2336, 2354), True, 'import matplotlib.pyplot as plt\n'), ((2431, 2477), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {'fontsize': '(24)'}), "('False Positive Rate', fontsize=24)\n", (2441, 2477), True, 'import matplotlib.pyplot as plt\n'), ((2483, 2528), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {'fontsize': '(24)'}), "('True Positive Rate', fontsize=24)\n", (2493, 2528), True, 'import matplotlib.pyplot as plt\n'), ((2534, 2552), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2550, 2552), True, 'import matplotlib.pyplot as plt\n'), ((3071, 3098), 'umbms.get_script_logger', 'get_script_logger', (['__file__'], {}), '(__file__)\n', (3088, 3098), False, 'from umbms import get_proj_path, get_script_logger, verify_path\n'), ((3675, 3703), 'umbms.ai.gencompare.correct_g1_ini_ant_ang', 'correct_g1_ini_ant_ang', (['g1_d'], {}), '(g1_d)\n', (3697, 3703), False, 'from umbms.ai.gencompare import correct_g1_ini_ant_ang\n'), ((3822, 3843), 'umbms.ai.augment.full_aug', 'full_aug', (['g2_d', 'g2_md'], {}), '(g2_d, g2_md)\n', (3830, 3843), False, 'from umbms.ai.augment import full_aug\n'), ((3858, 3889), 'umbms.ai.preproc.resize_features_for_keras', 'resize_features_for_keras', (['g2_d'], {}), '(g2_d)\n', (3883, 3889), False, 'from umbms.ai.preproc import resize_features_for_keras, to_td\n'), ((3902, 3933), 'umbms.ai.preproc.resize_features_for_keras', 'resize_features_for_keras', (['g1_d'], {}), '(g1_d)\n', (3927, 3933), False, 'from umbms.ai.preproc import resize_features_for_keras, to_td\n'), ((4138, 4156), 'numpy.zeros', 'np.zeros', (['[n_runs]'], {}), '([n_runs])\n', (4146, 4156), True, 'import numpy as np\n'), ((4171, 4189), 'numpy.zeros', 'np.zeros', (['[n_runs]'], {}), '([n_runs])\n', (4179, 4189), True, 'import numpy as np\n'), ((4204, 4222), 'numpy.zeros', 'np.zeros', (['[n_runs]'], {}), '([n_runs])\n', (4212, 4222), True, 'import numpy as np\n'), ((4237, 4255), 'numpy.zeros', 'np.zeros', (['[n_runs]'], {}), '([n_runs])\n', (4245, 4255), True, 'import numpy as np\n'), ((6683, 6762), 'umbms.ai.metrics.report_metrics', 'report_metrics', ([], {'aucs': 'auc_scores', 'accs': 'accs', 'sens': 'sens', 'spec': 'spec', 'logger': 'logger'}), '(aucs=auc_scores, accs=accs, sens=sens, spec=spec, logger=logger)\n', (6697, 6762), False, 'from umbms.ai.metrics import get_acc, get_sens, get_spec, get_opt_thresh, report_metrics\n'), ((6900, 6920), 'umbms.verify_path', 'verify_path', (['out_dir'], {}), '(out_dir)\n', (6911, 6920), False, 'from umbms import get_proj_path, get_script_logger, verify_path\n'), ((1885, 1904), 'numpy.size', 'np.size', (['thresholds'], {}), '(thresholds)\n', (1892, 1904), True, 'import numpy as np\n'), ((1953, 2015), 'umbms.ai.metrics.get_sens', 'get_sens', ([], {'preds': 'preds', 'labels': 'labels', 'threshold': 'thresholds[ii]'}), '(preds=preds, labels=labels, threshold=thresholds[ii])\n', (1961, 2015), False, 'from umbms.ai.metrics import get_acc, get_sens, get_spec, get_opt_thresh, report_metrics\n'), ((2369, 2392), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (2380, 2392), True, 'import numpy as np\n'), ((2394, 2417), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (2405, 2417), True, 'import numpy as np\n'), ((2812, 2823), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2821, 2823), True, 'import matplotlib.pyplot as plt\n'), ((3179, 3222), 'os.path.join', 'os.path.join', (['__DATA_DIR', '"""g2/g2_fd.pickle"""'], {}), "(__DATA_DIR, 'g2/g2_fd.pickle')\n", (3191, 3222), False, 'import os\n'), ((3249, 3298), 'os.path.join', 'os.path.join', (['__DATA_DIR', '"""g2/g2_metadata.pickle"""'], {}), "(__DATA_DIR, 'g2/g2_metadata.pickle')\n", (3261, 3298), False, 'import os\n'), ((3380, 3436), 'os.path.join', 'os.path.join', (['__DATA_DIR', '"""g1-train-test/test_fd.pickle"""'], {}), "(__DATA_DIR, 'g1-train-test/test_fd.pickle')\n", (3392, 3436), False, 'import os\n'), ((3500, 3556), 'os.path.join', 'os.path.join', (['__DATA_DIR', '"""g1-train-test/test_md.pickle"""'], {}), "(__DATA_DIR, 'g1-train-test/test_md.pickle')\n", (3512, 3556), False, 'import os\n'), ((3723, 3734), 'umbms.ai.preproc.to_td', 'to_td', (['g1_d'], {}), '(g1_d)\n', (3728, 3734), False, 'from umbms.ai.preproc import resize_features_for_keras, to_td\n'), ((3755, 3766), 'umbms.ai.preproc.to_td', 'to_td', (['g2_d'], {}), '(g2_d)\n', (3760, 3766), False, 'from umbms.ai.preproc import resize_features_for_keras, to_td\n'), ((3966, 3989), 'umbms.ai.makesets.get_class_labels', 'get_class_labels', (['g2_md'], {}), '(g2_md)\n', (3982, 3989), False, 'from umbms.ai.makesets import get_class_labels\n'), ((4023, 4046), 'umbms.ai.makesets.get_class_labels', 'get_class_labels', (['g1_md'], {}), '(g1_md)\n', (4039, 4046), False, 'from umbms.ai.makesets import get_class_labels\n'), ((5285, 5345), 'umbms.ai.metrics.get_opt_thresh', 'get_opt_thresh', ([], {'preds': 'g1_preds[:, 1]', 'labels': 'g1_labels[:, 1]'}), '(preds=g1_preds[:, 1], labels=g1_labels[:, 1])\n', (5299, 5345), False, 'from umbms.ai.metrics import get_acc, get_sens, get_spec, get_opt_thresh, report_metrics\n'), ((6547, 6562), 'tensorflow.keras.backend.clear_session', 'clear_session', ([], {}), '()\n', (6560, 6562), False, 'from tensorflow.keras.backend import clear_session\n'), ((6851, 6866), 'umbms.get_proj_path', 'get_proj_path', ([], {}), '()\n', (6864, 6866), False, 'from umbms import get_proj_path, get_script_logger, verify_path\n'), ((6974, 7021), 'os.path.join', 'os.path.join', (['out_dir', '"""incorrect_preds.pickle"""'], {}), "(out_dir, 'incorrect_preds.pickle')\n", (6986, 7021), False, 'import os\n'), ((7072, 7117), 'os.path.join', 'os.path.join', (['out_dir', '"""correct_preds.pickle"""'], {}), "(out_dir, 'correct_preds.pickle')\n", (7084, 7117), False, 'import os\n'), ((2095, 2157), 'umbms.ai.metrics.get_spec', 'get_spec', ([], {'preds': 'preds', 'labels': 'labels', 'threshold': 'thresholds[ii]'}), '(preds=preds, labels=labels, threshold=thresholds[ii])\n', (2103, 2157), False, 'from umbms.ai.metrics import get_acc, get_sens, get_spec, get_opt_thresh, report_metrics\n'), ((2689, 2704), 'umbms.get_proj_path', 'get_proj_path', ([], {}), '()\n', (2702, 2704), False, 'from umbms import get_proj_path, get_script_logger, verify_path\n'), ((2749, 2792), 'os.path.join', 'os.path.join', (['out_path', "('%s.png' % save_str)"], {}), "(out_path, '%s.png' % save_str)\n", (2761, 2792), False, 'import os\n'), ((2845, 2867), 'numpy.array', 'np.array', (['[fprs, tprs]'], {}), '([fprs, tprs])\n', (2853, 2867), True, 'import numpy as np\n'), ((2890, 2936), 'os.path.join', 'os.path.join', (['out_path', "('%s.pickle' % save_str)"], {}), "(out_path, '%s.pickle' % save_str)\n", (2902, 2936), False, 'import os\n'), ((4963, 5012), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', ([], {'y_true': 'g1_labels', 'y_score': 'g1_preds'}), '(y_true=g1_labels, y_score=g1_preds)\n', (4976, 5012), False, 'from sklearn.metrics import roc_auc_score\n'), ((5453, 5528), 'umbms.ai.metrics.get_acc', 'get_acc', ([], {'preds': 'g1_preds[:, 1]', 'labels': 'g1_labels[:, 1]', 'threshold': 'opt_thresh'}), '(preds=g1_preds[:, 1], labels=g1_labels[:, 1], threshold=opt_thresh)\n', (5460, 5528), False, 'from umbms.ai.metrics import get_acc, get_sens, get_spec, get_opt_thresh, report_metrics\n'), ((5638, 5714), 'umbms.ai.metrics.get_sens', 'get_sens', ([], {'preds': 'g1_preds[:, 1]', 'labels': 'g1_labels[:, 1]', 'threshold': 'opt_thresh'}), '(preds=g1_preds[:, 1], labels=g1_labels[:, 1], threshold=opt_thresh)\n', (5646, 5714), False, 'from umbms.ai.metrics import get_acc, get_sens, get_spec, get_opt_thresh, report_metrics\n'), ((5826, 5902), 'umbms.ai.metrics.get_spec', 'get_spec', ([], {'preds': 'g1_preds[:, 1]', 'labels': 'g1_labels[:, 1]', 'threshold': 'opt_thresh'}), '(preds=g1_preds[:, 1], labels=g1_labels[:, 1], threshold=opt_thresh)\n', (5834, 5902), False, 'from umbms.ai.metrics import get_acc, get_sens, get_spec, get_opt_thresh, report_metrics\n'), ((6140, 6163), 'numpy.zeros_like', 'np.zeros_like', (['g1_preds'], {}), '(g1_preds)\n', (6153, 6163), True, 'import numpy as np\n'), ((6294, 6319), 'numpy.size', 'np.size', (['g1_preds'], {'axis': '(0)'}), '(g1_preds, axis=0)\n', (6301, 6319), True, 'import numpy as np\n'), ((2618, 2633), 'umbms.get_proj_path', 'get_proj_path', ([], {}), '()\n', (2631, 2633), False, 'from umbms import get_proj_path, get_script_logger, verify_path\n'), ((4595, 4609), 'numpy.shape', 'np.shape', (['g2_d'], {}), '(g2_d)\n', (4603, 4609), True, 'import numpy as np\n')] |
####################################################################
# Interfaces with the GENESIS version of the auditory cortex model of Beeman,
# BMC Neuroscience (Suppl. 1), 2013 (i.e. a slightly modified version
# as used in Metzner et al., Front Comp Neu, 2016)
#
# @author: <NAME>, 02/11/2017
####################################################################
import os
import subprocess
import sys
import matplotlib.mlab as mlab
import numpy as np
import sciunit
from assrunit.capabilities import ProduceXY
from assrunit.constants import ACNET2_GENESIS_PATH
class GenesisModel(object):
def __init__(self, params):
# extract the model parameters from the params dictionary
self.filename = params["Filename"]
self.random_seed = params["Random Seed"]
self.ee_weight = params["E-E Weight"]
self.ie_weight = params["I-E Weight"]
self.ei_weight = params["E-I Weight"]
self.ii_weight = params["I-I Weight"]
self.bg_weight = params["Background Noise Weight"]
self.edrive_weight = params["E-Drive Weight"]
self.idrive_weight = params["I-Drive Weight"]
self.bg_noise_frequency = params["Background Noise Frequency"]
def genesisModelRun(self, stimfrequency, power_band):
"""
Runs the simulation, calculates the power spectrum
and returns the power in the specified the frequency band (Note: so far only 3 frequency
bands are possible; 20, 30 and 40 Hz).
Parameters:
stimfrequency: the frequency at which the network is driven
power_band: the frequency band of interest
"""
# put the execution string together
execstring = "genesis {} {} {} {} {} {} {} {} {} {} {} {}".format(
os.path.join(ACNET2_GENESIS_PATH, "ACnet2-batch-new-standard.g"),
self.filename,
str(stimfrequency),
str(self.random_seed),
str(self.ee_weight),
str(self.ie_weight),
str(self.ei_weight),
str(self.ii_weight),
str(self.bg_weight),
str(self.edrive_weight),
str(self.idrive_weight),
str(self.bg_noise_frequency),
)
print("running simulation")
# execute the simulation
self._runProcess(execstring)
print("finished simulation")
# load and analyse the data
datafile = "EPSC_sum_" + self.filename + ".txt"
pxx, freqs = self._calculate_psd(datafile)
os.chdir("../notebooks/")
# extract power at the frequency band of interest
lbounds = {"forty": 93, "thirty": 69, "twenty": 44}
ubounds = {"forty": 103, "thirty": 78, "twenty": 54}
lb = lbounds[power_band]
ub = ubounds[power_band]
power = np.sum(pxx[lb:ub])
return power
def _runProcess(self, execstring):
"""
This function executes the Genesis simulation in a shell.
Parameters:
execstring : the command which executes the Genesis model with all its parameters
"""
return subprocess.call(execstring, shell=True)
def _calculate_psd(self, datafile):
"""
Calculates the power spectral density of the simulated EEG/MEG signal
"""
i = 0
if not datafile:
print("No files were specified for plotting!")
sys.exit()
with open(datafile, "r") as fp:
lines = fp.readlines()
count = len(lines)
tn = np.zeros(count)
yn = np.zeros(count)
i = 0
for line in lines:
# Note that tn[i] is replaced, and yn[i] is addded to,
tn[i] = float(line[0])
yn[i] = float(line[1])
i += 1
# Now do the plotting of the array data
dt = tn[1] - tn[0]
# fourier sample rate
fs = 1.0 / dt
npts = len(yn)
startpt = int(0.2 * fs)
if (npts - startpt) % 2 != 0:
startpt = startpt + 1
yn = yn[startpt:]
tn = tn[startpt:]
nfft = len(tn) // 4
overlap = nfft // 2
pxx, freqs = mlab.psd(
yn, NFFT=nfft, Fs=fs, noverlap=overlap, window=mlab.window_none
)
pxx[0] = 0.0
return pxx, freqs
class BeemanGenesisModel(sciunit.Model, ProduceXY):
"""The auditory cortex model from Beeman (2013) [using a slightly modified version of the
original Genesis model; For more details see Metzner et al. (2016)]"""
def __init__(self, controlparams, schizparams):
"""
Constructor method. Both parameter sets, for the control and the
schizophrenia-like network, have to be a dictionary containing the following
parmaeters (Filename,Stimulation Frequency,Random Seed,
E-E Weight,I-E Weight,E-I Weight,I-I Weight,Background Noise Weight,E-Drive Weight,I-Drive
Weight,Background Noise Frequency)
Parameters:
controlparams: Parameters for the control network
schizparams: Parameters for the schizophrenia-like network
name: name of the instance
"""
self.controlparams = controlparams
self.schizparams = schizparams
super(BeemanGenesisModel, self).__init__(
controlparams=controlparams, schizparams=schizparams
)
def produce_XY(self, stimfrequency=40.0, powerfrequency=40.0):
"""
Simulates Y Hz drive to the control and the schizophrenia-like network for all random
seeds, calculates a Fourier transform of the simulated MEG
and extracts the power in the X Hz frequency band for each simulation. Returns the mean
power for the control and the schizophrenia-like network, respectively.
Note: So far, only three power bands [20,30,40] are possible.
"""
powerband = "forty" # default frequency band
if powerfrequency == 30.0:
powerband = "thirty"
elif powerfrequency == 20.0:
powerband = "twenty"
# generate the control network and run simulation
print("Generating control model")
ctrl_model = GenesisModel(self.controlparams)
print("Running control model")
controlXY = ctrl_model.genesisModelRun(stimfrequency, powerband)
# generate the schizophrenia-like network and run simulation
print("Generating schizophrenia model")
schiz_model = BeemanGenesisModel(self.schizparams)
print("Running schizophrenia model")
schizXY = schiz_model.genesisModelRun(stimfrequency, powerband)
return [controlXY, schizXY]
| [
"numpy.sum",
"numpy.zeros",
"subprocess.call",
"matplotlib.mlab.psd",
"os.path.join",
"os.chdir",
"sys.exit"
] | [((2515, 2540), 'os.chdir', 'os.chdir', (['"""../notebooks/"""'], {}), "('../notebooks/')\n", (2523, 2540), False, 'import os\n'), ((2806, 2824), 'numpy.sum', 'np.sum', (['pxx[lb:ub]'], {}), '(pxx[lb:ub])\n', (2812, 2824), True, 'import numpy as np\n'), ((3103, 3142), 'subprocess.call', 'subprocess.call', (['execstring'], {'shell': '(True)'}), '(execstring, shell=True)\n', (3118, 3142), False, 'import subprocess\n'), ((4191, 4264), 'matplotlib.mlab.psd', 'mlab.psd', (['yn'], {'NFFT': 'nfft', 'Fs': 'fs', 'noverlap': 'overlap', 'window': 'mlab.window_none'}), '(yn, NFFT=nfft, Fs=fs, noverlap=overlap, window=mlab.window_none)\n', (4199, 4264), True, 'import matplotlib.mlab as mlab\n'), ((1768, 1832), 'os.path.join', 'os.path.join', (['ACNET2_GENESIS_PATH', '"""ACnet2-batch-new-standard.g"""'], {}), "(ACNET2_GENESIS_PATH, 'ACnet2-batch-new-standard.g')\n", (1780, 1832), False, 'import os\n'), ((3396, 3406), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3404, 3406), False, 'import sys\n'), ((3532, 3547), 'numpy.zeros', 'np.zeros', (['count'], {}), '(count)\n', (3540, 3547), True, 'import numpy as np\n'), ((3565, 3580), 'numpy.zeros', 'np.zeros', (['count'], {}), '(count)\n', (3573, 3580), True, 'import numpy as np\n')] |
""" General helper functions """
import logging
import math
import os
from collections import Counter
from queue import Full, Queue
from typing import TYPE_CHECKING, Any, Callable, Dict, Hashable, List, Tuple
import cv2
import numpy as np
import slugify as unicode_slug
import tornado.queues as tq
import voluptuous as vol
import viseron.mqtt
from viseron.const import FONT, FONT_SIZE, FONT_THICKNESS
if TYPE_CHECKING:
from viseron.camera.frame import Frame
from viseron.config.config_object_detection import LabelConfig
from viseron.detector.detected_object import DetectedObject
from viseron.zones import Zone
LOGGER = logging.getLogger(__name__)
def calculate_relative_contours(contours, resolution: Tuple[int, int]):
"""Convert contours with absolute coords to relative."""
relative_contours = []
for contour in contours:
relative_contours.append(np.divide(contour, resolution))
return relative_contours
def calculate_relative_coords(
bounding_box: Tuple[int, int, int, int], resolution: Tuple[int, int]
) -> Tuple[float, float, float, float]:
"""Convert absolute coords to relative."""
x1_relative = round(bounding_box[0] / resolution[0], 3)
y1_relative = round(bounding_box[1] / resolution[1], 3)
x2_relative = round(bounding_box[2] / resolution[0], 3)
y2_relative = round(bounding_box[3] / resolution[1], 3)
return x1_relative, y1_relative, x2_relative, y2_relative
def calculate_absolute_coords(
bounding_box: Tuple[int, int, int, int], frame_res: Tuple[int, int]
) -> Tuple[int, int, int, int]:
"""Convert relative coords to absolute."""
return (
math.floor(bounding_box[0] * frame_res[0]),
math.floor(bounding_box[1] * frame_res[1]),
math.floor(bounding_box[2] * frame_res[0]),
math.floor(bounding_box[3] * frame_res[1]),
)
def scale_bounding_box(
image_size: Tuple[int, int, int, int],
bounding_box: Tuple[int, int, int, int],
target_size,
) -> Tuple[float, float, float, float]:
"""Scale a bounding box to target image size."""
x1p = bounding_box[0] / image_size[0]
y1p = bounding_box[1] / image_size[1]
x2p = bounding_box[2] / image_size[0]
y2p = bounding_box[3] / image_size[1]
return (
x1p * target_size[0],
y1p * target_size[1],
x2p * target_size[0],
y2p * target_size[1],
)
def draw_bounding_box_relative(
frame, bounding_box, frame_res, color=(255, 0, 0), thickness=1
) -> Any:
"""Draw a bounding box using relative coordinates."""
topleft = (
math.floor(bounding_box[0] * frame_res[0]),
math.floor(bounding_box[1] * frame_res[1]),
)
bottomright = (
math.floor(bounding_box[2] * frame_res[0]),
math.floor(bounding_box[3] * frame_res[1]),
)
return cv2.rectangle(frame, topleft, bottomright, color, thickness)
def put_object_label_relative(frame, obj, frame_res, color=(255, 0, 0)) -> None:
"""Draw a label using relative coordinates."""
coordinates = (
math.floor(obj.rel_x1 * frame_res[0]),
(math.floor(obj.rel_y1 * frame_res[1])) - 5,
)
# If label is outside the top of the frame, put it below the bounding box
if coordinates[1] < 10:
coordinates = (
math.floor(obj.rel_x1 * frame_res[0]),
(math.floor(obj.rel_y2 * frame_res[1])) + 5,
)
text = f"{obj.label} {int(obj.confidence * 100)}%"
(text_width, text_height) = cv2.getTextSize(
text=text,
fontFace=FONT,
fontScale=FONT_SIZE,
thickness=FONT_THICKNESS,
)[0]
box_coords = (
(coordinates[0], coordinates[1] + 5),
(coordinates[0] + text_width + 2, coordinates[1] - text_height - 3),
)
cv2.rectangle(frame, box_coords[0], box_coords[1], color, cv2.FILLED)
cv2.putText(
img=frame,
text=text,
org=coordinates,
fontFace=FONT,
fontScale=FONT_SIZE,
color=(255, 255, 255),
thickness=FONT_THICKNESS,
)
def draw_object(
frame, obj, camera_resolution: Tuple[int, int], color=(150, 0, 0), thickness=1
):
"""Draw a single object on supplied frame."""
if obj.relevant:
color = (0, 150, 0)
frame = draw_bounding_box_relative(
frame,
(
obj.rel_x1,
obj.rel_y1,
obj.rel_x2,
obj.rel_y2,
),
camera_resolution,
color=color,
thickness=thickness,
)
put_object_label_relative(frame, obj, camera_resolution, color=color)
def draw_objects(frame, objects, camera_resolution) -> None:
"""Draw objects on supplied frame."""
for obj in objects:
draw_object(frame, obj, camera_resolution)
def draw_zones(frame, zones) -> None:
"""Draw zones on supplied frame."""
for zone in zones:
if zone.objects_in_zone:
color = (0, 255, 0)
else:
color = (0, 0, 255)
cv2.polylines(frame, [zone.coordinates], True, color, 2)
cv2.putText(
frame,
zone.name,
(zone.coordinates[0][0] + 5, zone.coordinates[0][1] + 15),
FONT,
FONT_SIZE,
color,
FONT_THICKNESS,
)
def draw_contours(frame, contours, resolution, threshold) -> None:
"""Draw contours on supplied frame."""
filtered_contours = []
relevant_contours = []
for relative_contour, area in zip(contours.rel_contours, contours.contour_areas):
abs_contour = np.multiply(relative_contour, resolution).astype("int32")
if area > threshold:
relevant_contours.append(abs_contour)
continue
filtered_contours.append(abs_contour)
cv2.drawContours(frame, relevant_contours, -1, (255, 0, 255), thickness=2)
cv2.drawContours(frame, filtered_contours, -1, (130, 0, 75), thickness=1)
def draw_mask(frame, mask_points) -> None:
"""Draw mask on supplied frame."""
mask_overlay = frame.copy()
# Draw polygon filled with black color
cv2.fillPoly(
mask_overlay,
pts=mask_points,
color=(0),
)
# Apply overlay on frame with 70% opacity
cv2.addWeighted(
mask_overlay,
0.7,
frame,
1 - 0.7,
0,
frame,
)
# Draw polygon outline in orange
cv2.polylines(frame, mask_points, True, (0, 140, 255), 2)
for mask in mask_points:
image_moment = cv2.moments(mask)
center_x = int(image_moment["m10"] / image_moment["m00"])
center_y = int(image_moment["m01"] / image_moment["m00"])
cv2.putText(
frame,
"Mask",
(center_x - 20, center_y + 5),
FONT,
FONT_SIZE,
(255, 255, 255),
FONT_THICKNESS,
)
def pop_if_full(queue: Queue, item: Any, logger=LOGGER, name="unknown", warn=False):
"""If queue is full, pop oldest item and put the new item."""
try:
queue.put_nowait(item)
except (Full, tq.QueueFull):
if warn:
logger.warning(f"{name} queue is full. Removing oldest entry")
queue.get()
queue.put_nowait(item)
def slugify(text: str) -> str:
"""Slugify a given text."""
return unicode_slug.slugify(text, separator="_")
def print_slugs(config: dict):
"""Prints all camera names as slugs."""
cameras = config["cameras"]
for camera in cameras:
print(
f"Name: {camera['name']}, "
f"slug: {unicode_slug.slugify(camera['name'], separator='_')}"
)
def report_labels(
labels,
labels_in_fov: List[str],
reported_label_count: Dict[str, int],
mqtt_devices,
) -> Tuple[List[str], Dict[str, int]]:
"""Send on/off to MQTT for labels.
Only if state has changed since last report."""
labels = sorted(labels)
if labels == labels_in_fov:
return labels_in_fov, reported_label_count
labels_added = list(set(labels) - set(labels_in_fov))
labels_removed = list(set(labels_in_fov) - set(labels))
# Count occurrences of each label
counter: Counter = Counter(labels)
if viseron.mqtt.MQTT.client:
for label in labels_added:
attributes = {}
attributes["count"] = counter[label]
mqtt_devices[label].publish(True, attributes)
reported_label_count[label] = counter[label] # Save reported count
for label in labels_removed:
mqtt_devices[label].publish(False)
for label, count in counter.items():
if reported_label_count.get(label, 0) != count:
attributes = {}
attributes["count"] = count
mqtt_devices[label].publish(True, attributes)
reported_label_count[label] = count
return labels, reported_label_count
def combined_objects(
objects_in_fov: List["DetectedObject"], zones: List["Zone"]
) -> List["DetectedObject"]:
"""Combine the object lists of a frame and all zones."""
all_objects = objects_in_fov
for zone in zones:
all_objects += zone.objects_in_zone
return all_objects
def key_dependency(
key: Hashable, dependency: Hashable
) -> Callable[[Dict[Hashable, Any]], Dict[Hashable, Any]]:
"""Validate that all dependencies exist for key."""
def validator(value: Dict[Hashable, Any]) -> Dict[Hashable, Any]:
"""Test dependencies."""
if not isinstance(value, dict):
raise vol.Invalid("key dependencies require a dict")
if key in value and dependency not in value:
raise vol.Invalid(
f'dependency violation - key "{key}" requires '
f'key "{dependency}" to exist'
)
return value
return validator
def create_directory(path):
"""Create a directory."""
try:
if not os.path.isdir(path):
LOGGER.debug(f"Creating folder {path}")
os.makedirs(path)
except FileExistsError:
pass
| [
"numpy.divide",
"cv2.putText",
"cv2.polylines",
"slugify.slugify",
"os.makedirs",
"os.path.isdir",
"numpy.multiply",
"collections.Counter",
"math.floor",
"cv2.getTextSize",
"cv2.moments",
"cv2.fillPoly",
"cv2.addWeighted",
"voluptuous.Invalid",
"cv2.rectangle",
"cv2.drawContours",
"l... | [((641, 668), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (658, 668), False, 'import logging\n'), ((2828, 2888), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'topleft', 'bottomright', 'color', 'thickness'], {}), '(frame, topleft, bottomright, color, thickness)\n', (2841, 2888), False, 'import cv2\n'), ((3770, 3839), 'cv2.rectangle', 'cv2.rectangle', (['frame', 'box_coords[0]', 'box_coords[1]', 'color', 'cv2.FILLED'], {}), '(frame, box_coords[0], box_coords[1], color, cv2.FILLED)\n', (3783, 3839), False, 'import cv2\n'), ((3844, 3984), 'cv2.putText', 'cv2.putText', ([], {'img': 'frame', 'text': 'text', 'org': 'coordinates', 'fontFace': 'FONT', 'fontScale': 'FONT_SIZE', 'color': '(255, 255, 255)', 'thickness': 'FONT_THICKNESS'}), '(img=frame, text=text, org=coordinates, fontFace=FONT, fontScale\n =FONT_SIZE, color=(255, 255, 255), thickness=FONT_THICKNESS)\n', (3855, 3984), False, 'import cv2\n'), ((5751, 5825), 'cv2.drawContours', 'cv2.drawContours', (['frame', 'relevant_contours', '(-1)', '(255, 0, 255)'], {'thickness': '(2)'}), '(frame, relevant_contours, -1, (255, 0, 255), thickness=2)\n', (5767, 5825), False, 'import cv2\n'), ((5830, 5903), 'cv2.drawContours', 'cv2.drawContours', (['frame', 'filtered_contours', '(-1)', '(130, 0, 75)'], {'thickness': '(1)'}), '(frame, filtered_contours, -1, (130, 0, 75), thickness=1)\n', (5846, 5903), False, 'import cv2\n'), ((6067, 6119), 'cv2.fillPoly', 'cv2.fillPoly', (['mask_overlay'], {'pts': 'mask_points', 'color': '(0)'}), '(mask_overlay, pts=mask_points, color=0)\n', (6079, 6119), False, 'import cv2\n'), ((6203, 6263), 'cv2.addWeighted', 'cv2.addWeighted', (['mask_overlay', '(0.7)', 'frame', '(1 - 0.7)', '(0)', 'frame'], {}), '(mask_overlay, 0.7, frame, 1 - 0.7, 0, frame)\n', (6218, 6263), False, 'import cv2\n'), ((6360, 6417), 'cv2.polylines', 'cv2.polylines', (['frame', 'mask_points', '(True)', '(0, 140, 255)', '(2)'], {}), '(frame, mask_points, True, (0, 140, 255), 2)\n', (6373, 6417), False, 'import cv2\n'), ((7276, 7317), 'slugify.slugify', 'unicode_slug.slugify', (['text'], {'separator': '"""_"""'}), "(text, separator='_')\n", (7296, 7317), True, 'import slugify as unicode_slug\n'), ((8139, 8154), 'collections.Counter', 'Counter', (['labels'], {}), '(labels)\n', (8146, 8154), False, 'from collections import Counter\n'), ((1655, 1697), 'math.floor', 'math.floor', (['(bounding_box[0] * frame_res[0])'], {}), '(bounding_box[0] * frame_res[0])\n', (1665, 1697), False, 'import math\n'), ((1707, 1749), 'math.floor', 'math.floor', (['(bounding_box[1] * frame_res[1])'], {}), '(bounding_box[1] * frame_res[1])\n', (1717, 1749), False, 'import math\n'), ((1759, 1801), 'math.floor', 'math.floor', (['(bounding_box[2] * frame_res[0])'], {}), '(bounding_box[2] * frame_res[0])\n', (1769, 1801), False, 'import math\n'), ((1811, 1853), 'math.floor', 'math.floor', (['(bounding_box[3] * frame_res[1])'], {}), '(bounding_box[3] * frame_res[1])\n', (1821, 1853), False, 'import math\n'), ((2585, 2627), 'math.floor', 'math.floor', (['(bounding_box[0] * frame_res[0])'], {}), '(bounding_box[0] * frame_res[0])\n', (2595, 2627), False, 'import math\n'), ((2637, 2679), 'math.floor', 'math.floor', (['(bounding_box[1] * frame_res[1])'], {}), '(bounding_box[1] * frame_res[1])\n', (2647, 2679), False, 'import math\n'), ((2715, 2757), 'math.floor', 'math.floor', (['(bounding_box[2] * frame_res[0])'], {}), '(bounding_box[2] * frame_res[0])\n', (2725, 2757), False, 'import math\n'), ((2767, 2809), 'math.floor', 'math.floor', (['(bounding_box[3] * frame_res[1])'], {}), '(bounding_box[3] * frame_res[1])\n', (2777, 2809), False, 'import math\n'), ((3051, 3088), 'math.floor', 'math.floor', (['(obj.rel_x1 * frame_res[0])'], {}), '(obj.rel_x1 * frame_res[0])\n', (3061, 3088), False, 'import math\n'), ((3487, 3580), 'cv2.getTextSize', 'cv2.getTextSize', ([], {'text': 'text', 'fontFace': 'FONT', 'fontScale': 'FONT_SIZE', 'thickness': 'FONT_THICKNESS'}), '(text=text, fontFace=FONT, fontScale=FONT_SIZE, thickness=\n FONT_THICKNESS)\n', (3502, 3580), False, 'import cv2\n'), ((4978, 5034), 'cv2.polylines', 'cv2.polylines', (['frame', '[zone.coordinates]', '(True)', 'color', '(2)'], {}), '(frame, [zone.coordinates], True, color, 2)\n', (4991, 5034), False, 'import cv2\n'), ((5044, 5177), 'cv2.putText', 'cv2.putText', (['frame', 'zone.name', '(zone.coordinates[0][0] + 5, zone.coordinates[0][1] + 15)', 'FONT', 'FONT_SIZE', 'color', 'FONT_THICKNESS'], {}), '(frame, zone.name, (zone.coordinates[0][0] + 5, zone.coordinates\n [0][1] + 15), FONT, FONT_SIZE, color, FONT_THICKNESS)\n', (5055, 5177), False, 'import cv2\n'), ((6470, 6487), 'cv2.moments', 'cv2.moments', (['mask'], {}), '(mask)\n', (6481, 6487), False, 'import cv2\n'), ((6628, 6739), 'cv2.putText', 'cv2.putText', (['frame', '"""Mask"""', '(center_x - 20, center_y + 5)', 'FONT', 'FONT_SIZE', '(255, 255, 255)', 'FONT_THICKNESS'], {}), "(frame, 'Mask', (center_x - 20, center_y + 5), FONT, FONT_SIZE,\n (255, 255, 255), FONT_THICKNESS)\n", (6639, 6739), False, 'import cv2\n'), ((893, 923), 'numpy.divide', 'np.divide', (['contour', 'resolution'], {}), '(contour, resolution)\n', (902, 923), True, 'import numpy as np\n'), ((3099, 3136), 'math.floor', 'math.floor', (['(obj.rel_y1 * frame_res[1])'], {}), '(obj.rel_y1 * frame_res[1])\n', (3109, 3136), False, 'import math\n'), ((3292, 3329), 'math.floor', 'math.floor', (['(obj.rel_x1 * frame_res[0])'], {}), '(obj.rel_x1 * frame_res[0])\n', (3302, 3329), False, 'import math\n'), ((9501, 9547), 'voluptuous.Invalid', 'vol.Invalid', (['"""key dependencies require a dict"""'], {}), "('key dependencies require a dict')\n", (9512, 9547), True, 'import voluptuous as vol\n'), ((9619, 9711), 'voluptuous.Invalid', 'vol.Invalid', (['f"""dependency violation - key "{key}" requires key "{dependency}" to exist"""'], {}), '(\n f\'dependency violation - key "{key}" requires key "{dependency}" to exist\')\n', (9630, 9711), True, 'import voluptuous as vol\n'), ((9885, 9904), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (9898, 9904), False, 'import os\n'), ((9970, 9987), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (9981, 9987), False, 'import os\n'), ((3344, 3381), 'math.floor', 'math.floor', (['(obj.rel_y2 * frame_res[1])'], {}), '(obj.rel_y2 * frame_res[1])\n', (3354, 3381), False, 'import math\n'), ((5542, 5583), 'numpy.multiply', 'np.multiply', (['relative_contour', 'resolution'], {}), '(relative_contour, resolution)\n', (5553, 5583), True, 'import numpy as np\n'), ((7530, 7581), 'slugify.slugify', 'unicode_slug.slugify', (["camera['name']"], {'separator': '"""_"""'}), "(camera['name'], separator='_')\n", (7550, 7581), True, 'import slugify as unicode_slug\n')] |
# ~~~
# This file is part of the paper:
#
# "An adaptive projected Newton non-conforming dual approach
# for trust-region reduced basis approximation of PDE-constrained
# parameter optimization"
#
# https://github.com/TiKeil/Proj-Newton-NCD-corrected-TR-RB-for-pde-opt
#
# Copyright 2019-2020 all developers. All rights reserved.
# License: Licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
# Authors:
# <NAME> (2020)
# <NAME> (2019 - 2020)
# ~~~
import numpy as np
def plot_functional(opt_fom, steps, ranges):
first_component_steps = steps
second_component_steps = steps
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
mu_first_component = np.linspace(ranges[0][0],ranges[0][1],first_component_steps)
mu_second_component = np.linspace(ranges[1][0],ranges[1][1],second_component_steps)
x1,y1 = np.meshgrid(mu_first_component,mu_second_component)
func_ = np.zeros([second_component_steps,first_component_steps]) #meshgrid shape the first component as column index
for i in range(first_component_steps):
for j in range(second_component_steps):
mu_ = opt_fom.parameters.parse([x1[j][i],y1[j][i]])
func_[j][i] = opt_fom.output_functional_hat(mu_)
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x1, y1, func_, rstride=1, cstride=1, cmap='hot', linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=5)
#fig.savefig('3d', format='pdf', bbox_inches="tight")
fig2 = plt.figure()
number_of_contour_levels= 100
cont = plt.contour(x1,y1,func_,number_of_contour_levels)
fig2.colorbar(cont, shrink=0.5, aspect=5)
# fig2.savefig('2d', format='pdf', bbox_inches="tight")
return x1, y1, func_
def compute_errors(opt_fom, parameter_space, J_start, J_opt,
mu_start, mu_opt, mus, Js, times, tictoc, FOC):
mu_error = [np.linalg.norm(mu_start.to_numpy() - mu_opt)]
J_error = [J_start - J_opt]
for mu_i in mus[1:]: # the first entry is mu_start
if isinstance(mu_i,dict):
mu_error.append(np.linalg.norm(mu_i.to_numpy() - mu_opt))
else:
mu_error.append(np.linalg.norm(mu_i - mu_opt))
i = 1 if (len(Js) >= len(mus)) else 0
for Ji in Js[i:]: # the first entry is J_start
J_error.append(np.abs(Ji - J_opt))
times_full = [tictoc]
for tim in times:
times_full.append(tim + tictoc)
if len(FOC)!= len(times_full):
print("Computing only the initial FOC")
gradient = opt_fom.output_functional_hat_gradient(mu_start)
mu_box = opt_fom.parameters.parse(mu_start.to_numpy()-gradient)
from pdeopt.TR import projection_onto_range
first_order_criticity = mu_start.to_numpy() - projection_onto_range(parameter_space, mu_box).to_numpy()
normgrad = np.linalg.norm(first_order_criticity)
FOCs= [normgrad]
FOCs.extend(FOC)
else:
FOCs = FOC
if len(J_error) > len(times_full):
# this happens sometimes in the optional enrichment. For this we need to compute the last J error
# the last entry is zero and only there to detect this case
assert not Js[-1]
J_error.pop(-1)
J_error.pop(-1)
J_error.append(np.abs(J_opt-Js[-2]))
return times_full, J_error, mu_error, FOCs
import scipy
def compute_eigvals(A,B):
print('WARNING: THIS MIGHT BE VERY EXPENSIVE')
return scipy.sparse.linalg.eigsh(A, M=B, return_eigenvectors=False)
import csv
def save_data(directory, times, J_error, n, mu_error=None, FOC=None, additional_data=None):
with open('{}/error_{}.txt'.format(directory, n), 'w') as csvfile:
writer = csv.writer(csvfile)
for val in J_error:
writer.writerow([val])
with open('{}/times_{}.txt'.format(directory, n), 'w') as csvfile:
writer = csv.writer(csvfile)
for val in times:
writer.writerow([val])
if mu_error is not None:
with open('{}/mu_error_{}.txt'.format(directory, n), 'w') as csvfile:
writer = csv.writer(csvfile)
for val in mu_error:
writer.writerow([val])
if FOC is not None:
with open('{}/FOC_{}.txt'.format(directory, n), 'w') as csvfile:
writer = csv.writer(csvfile)
for val in FOC:
writer.writerow([val])
if additional_data:
for key in additional_data.keys():
if not key == "opt_rom":
with open('{}/{}_{}.txt'.format(directory, key, n), 'w') as csvfile:
writer = csv.writer(csvfile)
for val in additional_data[key]:
writer.writerow([val])
def get_data(directory, n, mu_error_=False, mu_est_=False, FOC=False, j_list=True):
J_error = []
times = []
mu_error = []
mu_time = []
mu_est = []
FOC_ = []
j_list_ = []
if mu_error_ is True:
f = open('{}mu_error_{}.txt'.format(directory, n), 'r')
reader = csv.reader(f)
for val in reader:
mu_error.append(float(val[0]))
if FOC is True:
f = open('{}FOC_{}.txt'.format(directory, n), 'r')
reader = csv.reader(f)
for val in reader:
FOC_.append(float(val[0]))
if j_list is True:
f = open('{}j_list_{}.txt'.format(directory, n), 'r')
reader = csv.reader(f)
for val in reader:
j_list_.append(float(val[0]))
if mu_est_ is True:
f = open('{}mu_est_{}.txt'.format(directory, n), 'r')
reader = csv.reader(f)
for val in reader:
mu_est.append(float(val[0]))
f = open('{}mu_time_{}.txt'.format(directory, n), 'r')
reader = csv.reader(f)
for val in reader:
mu_time.append(float(val[0]))
f = open('{}error_{}.txt'.format(directory, n), 'r')
reader = csv.reader(f)
for val in reader:
J_error.append(abs(float(val[0])))
f = open('{}times_{}.txt'.format(directory, n), 'r')
reader = csv.reader(f)
for val in reader:
times.append(float(val[0]))
if mu_error_:
if mu_est_:
if FOC:
return times, J_error, mu_error, mu_time, mu_est, FOC_, j_list_
else:
return times, J_error, mu_error, mu_time, mu_est, j_list_
else:
if FOC:
return times, J_error, mu_error, FOC_, j_list_
else:
return times, J_error, mu_error, j_list_
else:
if FOC:
return times, J_error, FOC_, j_list_
else:
return times, J_error, j_list
def truncated_conj_grad(A_func,b,x_0=None,tol=10e-6, maxiter = None, atol = None):
if x_0 is None:
x_0 = np.zeros(b.size)
if atol is None:
atol = tol
if maxiter is None:
maxiter = 10*b.size
test = A_func(x_0)
if len(test) == len(b):
def action(x):
return A_func(x)
else:
print('wrong input for A in the CG method')
return
#define r_0, note that test= action(x_0)
r_k = b-test
#defin p_0
p_k = r_k
count = 0
#define x_0
x_k = x_0
#cause we need the norm more often than one time, we save it
tmp_r_k_norm = np.linalg.norm(r_k)
norm_b = np.linalg.norm(b)
while count < maxiter and tmp_r_k_norm > max(tol*norm_b,atol):
#save the matrix vector product
#print(tmp_r_k_norm)
tmp = action(p_k)
p_kxtmp = np.dot(p_k,tmp)
#check if p_k is a descent direction, otherwise terminate
if p_kxtmp<= 1.e-10*(np.linalg.norm(p_k))**2:
print("CG truncated at iteration: {} with residual: {:.5f}, because p_k is not a descent direction".format(count,tmp_r_k_norm))
if count>0:
return x_k, count, tmp_r_k_norm, 0
else:
return x_k, count, tmp_r_k_norm, 1
else:
#calculate alpha_k
alpha_k = ((tmp_r_k_norm)**2)/(p_kxtmp)
#calculate x_k+1
x_k = x_k + alpha_k*p_k
#calculate r_k+1
r_k = r_k - alpha_k*tmp
#save the new norm of r_k+1
tmp_r_k1 = np.linalg.norm(r_k)
#calculate beta_k
beta_k = (tmp_r_k1)**2/(tmp_r_k_norm)**2
tmp_r_k_norm = tmp_r_k1
#calculate p_k+1
p_k = r_k + beta_k*p_k
count += 1
if count >= maxiter:
print("Maximum number of iteration for CG reached, residual= {}".format(tmp_r_k_norm))
return x_k,count, tmp_r_k_norm, 1
return x_k, count, tmp_r_k_norm, 0
def truncated_stabilzed_biconj_grad(A_func,b,x_0=None,tol=1e-12, maxiter = None, atol = None):
if x_0 is None:
x_0 = np.zeros(b.size)
if atol is None:
atol = tol
if maxiter is None:
maxiter = b.size*10
test = A_func(x_0)
if len(test) == len(b):
def action(x):
return A_func(x)
else:
print('wrong input for A in the BICGstab method')
return
#define r_0, note that test= action(x_0)
r_k = b-test
#for r_0_hat we can choose any random vector which is not orthogonal to r_0, for simplicity we take r_0 itself
r_0_hat = r_k
#define p_0
p_k = r_k
count = 0
#define x_0
x_k = x_0
#cause we need the norm more often than one time, we save it
tmp_r_k_norm = np.linalg.norm(r_k)
norm_b = np.linalg.norm(b)
tmp_r_k_r_0_hat = np.dot(r_k,r_0_hat)
while count < maxiter and tmp_r_k_norm > max(tol*norm_b,atol):
#save the matrix vector product
#print(tmp_r_k_norm)
Ap_k = action(p_k)
#check if p_k is a descent direction, otherwise terminate
if np.dot(p_k,Ap_k)<= 1.e-10*(np.linalg.norm(p_k))**2:
print("BICGstab truncated at iteration: {} with residual: {:.5f}, because (p_k)'Ap_k <= 0.0".format(count,tmp_r_k_norm))
if count>0:
return x_k, count, tmp_r_k_norm, 0
else:
return x_k, count, tmp_r_k_norm, 1
else:
#calculate alpha_k
alpha_k = tmp_r_k_r_0_hat/(np.dot(r_0_hat,Ap_k))
#calculate s_k
s_k = r_k-alpha_k*Ap_k
As_k = action(s_k)
if np.dot(s_k,As_k)<=1.e-10*(np.linalg.norm(s_k))**2:
print("BICGstab truncated at iteration: {} with residual: {:.5f}, because (s_k)'As_k <= 0.0".format(count,tmp_r_k_norm))
if count>0:
return x_k, count, tmp_r_k_norm, 0
else:
return x_k, count, tmp_r_k_norm, 1
else:
#calculate omega_k
omega_k = np.dot(As_k,s_k)/np.dot(As_k,As_k)
#calculate x_k+1
x_k = x_k + alpha_k*p_k + omega_k*s_k
#calculate r_k+1
r_k = s_k - omega_k*As_k
#save the new norm of r_k+1
tmp_r_k1 = np.linalg.norm(r_k)
#save the product r_k+1, r_0_hat
tmp_r_k1_r_0_hat = np.dot(r_k,r_0_hat)
#calculate beta_k
beta_k = (alpha_k/omega_k)*(tmp_r_k1_r_0_hat)/(tmp_r_k_r_0_hat)
#update the quantities need in the next loop
tmp_r_k_norm = tmp_r_k1
tmp_r_k_r_0_hat = tmp_r_k1_r_0_hat
#calculate p_k+1
p_k = r_k + beta_k*(p_k-omega_k*Ap_k)
count += 1
if count >= maxiter:
print("Maximum number of iteration for CG reached, residual= {}".format(tmp_r_k_norm))
return x_k,count, tmp_r_k_norm, 1
return x_k, count, tmp_r_k_norm, 0
def truncated_conj_grad_Steihaug(A_func, b, TR_radius, x_0=None, tol=10e-6, maxiter=None, atol=None):
if x_0 is None:
x_0 = np.zeros(b.size)
if atol is None:
atol = tol
if maxiter is None:
maxiter = 10 * b.size
test = A_func(x_0)
if len(test) == len(b):
def action(x):
return A_func(x)
else:
print('wrong input for A in the CG method')
return
# define r_0, note that test= action(x_0)
r_k = b - test
# defin p_0
p_k = r_k
count = 0
# define x_0
x_k = x_0
# cause we need the norm more often than one time, we save it
tmp_r_k_norm = np.linalg.norm(r_k)
norm_b = np.linalg.norm(b)
while count < maxiter and tmp_r_k_norm > max(tol * norm_b, atol):
# save the matrix vector product
# print(tmp_r_k_norm)
tmp = action(p_k)
p_kxtmp = np.dot(p_k, tmp)
# check if p_k is a descent direction, otherwise terminate
if p_kxtmp <= 1.e-10 * (np.linalg.norm(p_k)) ** 2:
print(
"CG-Steihaug truncated at iteration: {} with residual: {:.5f}, because H_k is not pos def".format(count,
tmp_r_k_norm))
#b = x_k.dot(p_k)
#a = p_k.dot(p_k)
#c = x_k.dot(x_k)-TR_radius**2
#alpha_k = (b+np.sqrt(b**2-a*c))/a
### CHECK
#if np.abs(np.linalg.norm(x_k+alpha_k*p_k) - TR_radius)/TR_radius <= 1e-6:
# print("check steig ok")
#else:
# print("error!!!! in STEIG {}".format(np.linalg.norm(x_k+alpha_k*p_k)))
#return x_k+alpha_k*p_k, count, tmp_r_k_norm, 0
return x_k+p_k, count, tmp_r_k_norm, 0
else:
# calculate alpha_k
alpha_k = ((tmp_r_k_norm) ** 2) / (p_kxtmp)
# calculate x_k+1
x_k = x_k + alpha_k * p_k
if np.linalg.norm(x_k,np.inf)>= TR_radius:
#b = x_k.dot(p_k)
#a = p_k.dot(p_k)
#c = x_k.dot(x_k) - TR_radius ** 2
#alpha_k = (b + np.sqrt(b ** 2 - a * c)) / a
#return x_k+alpha_k*p_k, count, tmp_r_k_norm, 0
return x_k, count, tmp_r_k_norm, 0
# calculate r_k+1
r_k = r_k - alpha_k * tmp
# save the new norm of r_k+1
tmp_r_k1 = np.linalg.norm(r_k)
# calculate beta_k
beta_k = (tmp_r_k1) ** 2 / (tmp_r_k_norm) ** 2
tmp_r_k_norm = tmp_r_k1
# calculate p_k+1
p_k = r_k + beta_k * p_k
count += 1
if count >= maxiter:
print("Maximum number of iteration for CG reached, residual= {}".format(tmp_r_k_norm))
return x_k, count, tmp_r_k_norm, 1
return x_k, count, tmp_r_k_norm, 0
| [
"numpy.meshgrid",
"csv.reader",
"csv.writer",
"numpy.abs",
"numpy.zeros",
"scipy.sparse.linalg.eigsh",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.contour",
"numpy.linalg.norm",
"numpy.linspace",
"numpy.dot",
"pdeopt.TR.projection_onto_range"
] | [((796, 858), 'numpy.linspace', 'np.linspace', (['ranges[0][0]', 'ranges[0][1]', 'first_component_steps'], {}), '(ranges[0][0], ranges[0][1], first_component_steps)\n', (807, 858), True, 'import numpy as np\n'), ((883, 946), 'numpy.linspace', 'np.linspace', (['ranges[1][0]', 'ranges[1][1]', 'second_component_steps'], {}), '(ranges[1][0], ranges[1][1], second_component_steps)\n', (894, 946), True, 'import numpy as np\n'), ((958, 1010), 'numpy.meshgrid', 'np.meshgrid', (['mu_first_component', 'mu_second_component'], {}), '(mu_first_component, mu_second_component)\n', (969, 1010), True, 'import numpy as np\n'), ((1022, 1079), 'numpy.zeros', 'np.zeros', (['[second_component_steps, first_component_steps]'], {}), '([second_component_steps, first_component_steps])\n', (1030, 1079), True, 'import numpy as np\n'), ((1359, 1371), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1369, 1371), True, 'import matplotlib.pyplot as plt\n'), ((1630, 1642), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1640, 1642), True, 'import matplotlib.pyplot as plt\n'), ((1689, 1741), 'matplotlib.pyplot.contour', 'plt.contour', (['x1', 'y1', 'func_', 'number_of_contour_levels'], {}), '(x1, y1, func_, number_of_contour_levels)\n', (1700, 1741), True, 'import matplotlib.pyplot as plt\n'), ((3559, 3619), 'scipy.sparse.linalg.eigsh', 'scipy.sparse.linalg.eigsh', (['A'], {'M': 'B', 'return_eigenvectors': '(False)'}), '(A, M=B, return_eigenvectors=False)\n', (3584, 3619), False, 'import scipy\n'), ((5997, 6010), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (6007, 6010), False, 'import csv\n'), ((6147, 6160), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (6157, 6160), False, 'import csv\n'), ((7385, 7404), 'numpy.linalg.norm', 'np.linalg.norm', (['r_k'], {}), '(r_k)\n', (7399, 7404), True, 'import numpy as np\n'), ((7418, 7435), 'numpy.linalg.norm', 'np.linalg.norm', (['b'], {}), '(b)\n', (7432, 7435), True, 'import numpy as np\n'), ((9538, 9557), 'numpy.linalg.norm', 'np.linalg.norm', (['r_k'], {}), '(r_k)\n', (9552, 9557), True, 'import numpy as np\n'), ((9571, 9588), 'numpy.linalg.norm', 'np.linalg.norm', (['b'], {}), '(b)\n', (9585, 9588), True, 'import numpy as np\n'), ((9611, 9631), 'numpy.dot', 'np.dot', (['r_k', 'r_0_hat'], {}), '(r_k, r_0_hat)\n', (9617, 9631), True, 'import numpy as np\n'), ((12474, 12493), 'numpy.linalg.norm', 'np.linalg.norm', (['r_k'], {}), '(r_k)\n', (12488, 12493), True, 'import numpy as np\n'), ((12507, 12524), 'numpy.linalg.norm', 'np.linalg.norm', (['b'], {}), '(b)\n', (12521, 12524), True, 'import numpy as np\n'), ((2959, 2996), 'numpy.linalg.norm', 'np.linalg.norm', (['first_order_criticity'], {}), '(first_order_criticity)\n', (2973, 2996), True, 'import numpy as np\n'), ((3812, 3831), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (3822, 3831), False, 'import csv\n'), ((3983, 4002), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (3993, 4002), False, 'import csv\n'), ((5134, 5147), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (5144, 5147), False, 'import csv\n'), ((5314, 5327), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (5324, 5327), False, 'import csv\n'), ((5496, 5509), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (5506, 5509), False, 'import csv\n'), ((5682, 5695), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (5692, 5695), False, 'import csv\n'), ((5844, 5857), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (5854, 5857), False, 'import csv\n'), ((6871, 6887), 'numpy.zeros', 'np.zeros', (['b.size'], {}), '(b.size)\n', (6879, 6887), True, 'import numpy as np\n'), ((7616, 7632), 'numpy.dot', 'np.dot', (['p_k', 'tmp'], {}), '(p_k, tmp)\n', (7622, 7632), True, 'import numpy as np\n'), ((8884, 8900), 'numpy.zeros', 'np.zeros', (['b.size'], {}), '(b.size)\n', (8892, 8900), True, 'import numpy as np\n'), ((11956, 11972), 'numpy.zeros', 'np.zeros', (['b.size'], {}), '(b.size)\n', (11964, 11972), True, 'import numpy as np\n'), ((12710, 12726), 'numpy.dot', 'np.dot', (['p_k', 'tmp'], {}), '(p_k, tmp)\n', (12716, 12726), True, 'import numpy as np\n'), ((2444, 2462), 'numpy.abs', 'np.abs', (['(Ji - J_opt)'], {}), '(Ji - J_opt)\n', (2450, 2462), True, 'import numpy as np\n'), ((3387, 3409), 'numpy.abs', 'np.abs', (['(J_opt - Js[-2])'], {}), '(J_opt - Js[-2])\n', (3393, 3409), True, 'import numpy as np\n'), ((4192, 4211), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (4202, 4211), False, 'import csv\n'), ((4402, 4421), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (4412, 4421), False, 'import csv\n'), ((8326, 8345), 'numpy.linalg.norm', 'np.linalg.norm', (['r_k'], {}), '(r_k)\n', (8340, 8345), True, 'import numpy as np\n'), ((9871, 9888), 'numpy.dot', 'np.dot', (['p_k', 'Ap_k'], {}), '(p_k, Ap_k)\n', (9877, 9888), True, 'import numpy as np\n'), ((14297, 14316), 'numpy.linalg.norm', 'np.linalg.norm', (['r_k'], {}), '(r_k)\n', (14311, 14316), True, 'import numpy as np\n'), ((2296, 2325), 'numpy.linalg.norm', 'np.linalg.norm', (['(mu_i - mu_opt)'], {}), '(mu_i - mu_opt)\n', (2310, 2325), True, 'import numpy as np\n'), ((10288, 10309), 'numpy.dot', 'np.dot', (['r_0_hat', 'Ap_k'], {}), '(r_0_hat, Ap_k)\n', (10294, 10309), True, 'import numpy as np\n'), ((10418, 10435), 'numpy.dot', 'np.dot', (['s_k', 'As_k'], {}), '(s_k, As_k)\n', (10424, 10435), True, 'import numpy as np\n'), ((11112, 11131), 'numpy.linalg.norm', 'np.linalg.norm', (['r_k'], {}), '(r_k)\n', (11126, 11131), True, 'import numpy as np\n'), ((11216, 11236), 'numpy.dot', 'np.dot', (['r_k', 'r_0_hat'], {}), '(r_k, r_0_hat)\n', (11222, 11236), True, 'import numpy as np\n'), ((13828, 13855), 'numpy.linalg.norm', 'np.linalg.norm', (['x_k', 'np.inf'], {}), '(x_k, np.inf)\n', (13842, 13855), True, 'import numpy as np\n'), ((2882, 2928), 'pdeopt.TR.projection_onto_range', 'projection_onto_range', (['parameter_space', 'mu_box'], {}), '(parameter_space, mu_box)\n', (2903, 2928), False, 'from pdeopt.TR import projection_onto_range\n'), ((4707, 4726), 'csv.writer', 'csv.writer', (['csvfile'], {}), '(csvfile)\n', (4717, 4726), False, 'import csv\n'), ((7727, 7746), 'numpy.linalg.norm', 'np.linalg.norm', (['p_k'], {}), '(p_k)\n', (7741, 7746), True, 'import numpy as np\n'), ((9898, 9917), 'numpy.linalg.norm', 'np.linalg.norm', (['p_k'], {}), '(p_k)\n', (9912, 9917), True, 'import numpy as np\n'), ((10845, 10862), 'numpy.dot', 'np.dot', (['As_k', 's_k'], {}), '(As_k, s_k)\n', (10851, 10862), True, 'import numpy as np\n'), ((10862, 10880), 'numpy.dot', 'np.dot', (['As_k', 'As_k'], {}), '(As_k, As_k)\n', (10868, 10880), True, 'import numpy as np\n'), ((12826, 12845), 'numpy.linalg.norm', 'np.linalg.norm', (['p_k'], {}), '(p_k)\n', (12840, 12845), True, 'import numpy as np\n'), ((10444, 10463), 'numpy.linalg.norm', 'np.linalg.norm', (['s_k'], {}), '(s_k)\n', (10458, 10463), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 1 12:34:54 2019
@author: Warmachine
"""
from __future__ import print_function, division
import os,sys
pwd = os.getcwd()
sys.path.insert(0,pwd)
#%%
print('-'*30)
print(os.getcwd())
print('-'*30)
#%%
import scipy.io as sio
import os
import torch
from torch import nn
from torch.nn import functional as F
import pandas as pd
from PIL import Image
from skimage import io, transform
import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
from torchvision import transforms, utils, models
import h5py
import time
import pdb
import pickle
from core.FeatureVGGDataset_CrossTask import FeatureVGGDataset_CrossTask
from core.attention_based_summarization import AttentionSummarization
from core.self_supervision_summarization import SelfSupervisionSummarization
from core.helper import aggregated_keysteps,fcsn_preprocess_keystep,\
get_parameters,get_weight_decay,evaluation_align,\
visualize_attention,Logger
from global_setting import NFS_path,data_path_tr_CrossTask,data_path_tst_CrossTask
# Ignore warnings
#import warnings
#warnings.filterwarnings("ignore")
#%%
print('Folder {}'.format(sys.argv[1]))
#%%
plt.ion() # interactive mode
#%% use GPU
M = 30
repNum = int(sys.argv[4])
target_cat_idx = int(sys.argv[2])
#%%
idx_GPU = sys.argv[3]
device = torch.device("cuda:{}".format(idx_GPU) if torch.cuda.is_available() else "cpu")
#%% hyper-params
batch_size = 1
target_fps = 2
verbose = False
n_video_iters = 1
n_class = M
num_worker = 5
#number_eval = 5
is_save = True
n_epoches = 5
is_balance = True
#%%
if is_save:
print("Save")
print("!"*30)
#%%
list_cat = os.listdir(data_path_tr_CrossTask)
cat_name = list_cat[target_cat_idx]
#%%
feature_dataset_all = FeatureVGGDataset_CrossTask(data_path_tr_CrossTask,verbose = verbose,is_visualize=False,target_cat=cat_name, is_all = True)
dataset_loader_all = DataLoader(feature_dataset_all,
batch_size = batch_size,
shuffle = False,
num_workers = num_worker)
feature_dataset_vis = FeatureVGGDataset_CrossTask(data_path_tr_CrossTask, verbose = verbose,is_visualize=True,target_cat=cat_name, is_all = True)
dataset_loader_vis = DataLoader(feature_dataset_vis,
batch_size = batch_size,
shuffle = False,
num_workers = 0)
n_category = len(feature_dataset_all.cat2idx)
n_train = feature_dataset_all.n_video
print('Training set size: {}'.format(n_train))
#%%
n_keysteps = feature_dataset_all.dict_n_keystep[cat_name]
n_class = n_keysteps+1
#%%
experiment_dir = NFS_path+'results/'+sys.argv[1]+'/rank_key_same_cat_ss_target_cat_{}_K_{}_GPU_{}_time_{}/'.format(cat_name,repNum,idx_GPU,str(time.time()).replace('.','d'))
try:
similar_folder = [f for f in os.listdir(NFS_path+'results/'+sys.argv[1]) if "rank_key_same_cat_ss_target_cat_{}_K_{}".format(cat_name,repNum) in f]
except:
similar_folder = []
if len(similar_folder) > 0:
print("Experiment existed {}".format(similar_folder))
sys.exit()
if is_save:
os.makedirs(experiment_dir)
orig_stdout = sys.stdout
f = open(experiment_dir+'specs.txt', 'w')
sys.stdout = f
assert M >= n_class
if is_save:
with open(experiment_dir+'log.txt', 'a') as file:
file.write("n_keystep: {}\n".format(n_keysteps))
#%%
lambda_1 = 0.0
model = AttentionSummarization(M,n_category,lambda_1,dim_input = 512,verbose=verbose,temporal_att=True,is_balance=is_balance)
assert model.lambda_1 == 0
model.to(device)
print('fcsn_params')
att_params = model.att_params
fcsn_params = model.fcsn_params
#%%
ss_model = SelfSupervisionSummarization(M = M,repNum=repNum)
#%%
lr = 0.001
weight_decay = 0.0001
momentum = 0.0
params = [{'params':att_params,'lr':lr,'weight_decay':weight_decay},
{'params':fcsn_params,'lr':lr,'weight_decay':weight_decay}]
#optimizer = optim.Adam(params)
optimizer = optim.RMSprop( params ,lr=lr,weight_decay=weight_decay, momentum=momentum)
#%%
print('-'*30)
print('rank loss for keystep')
print('pos_weight')
print('-'*30)
print('GPU {}'.format(idx_GPU))
print('lambda_1 {}'.format(lambda_1))
print('lr {} weight_decay {} momentum {}'.format(lr,weight_decay,momentum))
print('target_fps {}'.format(target_fps))
print('n_video_iters {}'.format(n_video_iters))
print('num_worker {}'.format(num_worker))
print('n_epoches {}'.format(n_epoches))
print('repNum {}'.format(repNum))
print('is_balance {}'.format(is_balance))
#input('confirm?')
#%%
if is_save:
train_logger=Logger(experiment_dir+'train.csv',['loss','loss_cat','loss_key','pos_weight'])
all_logger=Logger(experiment_dir+'all.csv',['cat_F1_pred_or','cat_F1_pseudo_ks'])
#%%
if is_save:
sys.stdout = orig_stdout
f.close()
#%%
list_F1_pseudo = []
list_F1_pred = []
#%%
def measurement():
prefix = 'all_'
out_package = evaluation_align(model,ss_model,dataset_loader_all,device)
R_pred, P_pred = out_package['R_pred'],out_package['P_pred']
R_pseudo, P_pseudo = out_package['R_pseudo'],out_package['P_pseudo']
if is_save:
with open(experiment_dir+prefix+'log.txt', 'a') as file:
file.write("R_pred {} P_pred {}\n".format(R_pred,P_pred))
file.write("R_pseudo {} P_pseudo {}\n".format(R_pseudo,P_pseudo))
file.write("-"*30)
file.write("\n")
#%%
for i_epoch in range(n_epoches):
#1st pass
counter = 0
with torch.no_grad():
for _,data_package in enumerate(dataset_loader_all):
counter += 1
model.eval()
cat_labels, cat_names, video, subsampled_feature, subsampled_segment_list, key_step_list, n_og_keysteps \
= data_package['cat_labels'],data_package['cat_names'],data_package['video'],data_package['subsampled_feature'],data_package['subsampled_segment_list'],data_package['key_step_list'],data_package['n_og_keysteps']
# flatten the feature vector: [512,7,7] -> [512,49]
flatten_feature = subsampled_feature.view(batch_size,-1,512,7*7).to(device)
# print("Flatten tensor shape:", flatten_feature.shape)
#Transposing the flattened features
flatten_feature = torch.transpose(flatten_feature, dim0 = 2, dim1 = 3)
# print("Transposed Flatten tensor shape:", flatten_feature.shape)
print(counter,cat_names, video)
keystep_labels = aggregated_keysteps(subsampled_segment_list, key_step_list)
keystep_labels = fcsn_preprocess_keystep(keystep_labels,verbose = verbose)
fbar_seg = model.forward_middle(flatten_feature,subsampled_segment_list) #[1,512,T]
ss_model.add_video(fbar_seg,video) #[T,512]
print('-'*30)
print('subset selection')
ss_model.foward()
print('-'*30)
measurement()
torch.save(model.state_dict(), experiment_dir+'model_ES_pred_or_{}'.format(all_logger.get_len()-1))
pickle.dump(ss_model,open(experiment_dir+'SS_model_ES_pred_or_{}'.format(all_logger.get_len()-1),'wb'))
print('unique assignment {} number of represent {} number cluster {}'.format(np.unique(ss_model.reps).shape,np.unique(ss_model.assignments).shape,ss_model.kmeans.cluster_centers_.shape))
if is_save:
with open(experiment_dir+'log.txt', 'a') as file:
file.write('unique assignment {} number of represent {} number cluster {}\n'.format(np.unique(ss_model.reps).shape,np.unique(ss_model.assignments).shape,ss_model.kmeans.cluster_centers_.shape))
#2nd pass
counter = 0
for data_package in dataset_loader_all:
counter += 1
model.train()
optimizer.zero_grad()
cat_labels, cat_names, video, subsampled_feature, subsampled_segment_list, key_step_list, n_og_keysteps \
= data_package['cat_labels'],data_package['cat_names'],data_package['video'],data_package['subsampled_feature'],data_package['subsampled_segment_list'],data_package['key_step_list'],data_package['n_og_keysteps']
# flatten the feature vector: [1,T,512,7,7] -> [1,T,512,49]
flatten_feature = subsampled_feature.view(batch_size,-1,512,7*7).to(device)
# print("Flatten tensor shape:", flatten_feature.shape)
#Transposing the flattened features
flatten_feature = torch.transpose(flatten_feature, dim0 = 2, dim1 = 3) #[1,T,49,512] <== [1,T,512,49]
# print("Transposed Flatten tensor shape:", flatten_feature.shape)
print(counter,cat_names, video)
keystep_labels = aggregated_keysteps(subsampled_segment_list, key_step_list)
keystep_labels = fcsn_preprocess_keystep(keystep_labels,verbose = verbose)
keysteps,cats,_,_ = model(flatten_feature,subsampled_segment_list)
keystep_pseudo_labels = ss_model.get_key_step_label(video)
# package = model.compute_loss_rank_keystep_cat(keysteps,cats,keystep_labels,cat_labels)
package = model.compute_loss_rank_keystep_cat(keysteps,cats,keystep_pseudo_labels,cat_labels)
loss,loss_cat,loss_key,class_weights = package['loss'],package['loss_cat'],package['loss_keystep'],package['class_weights']
train_stats = [loss.item(),loss_cat.item(),loss_key.item(),class_weights.cpu().numpy()]
print('loss {} loss_cat {} loss_key {} pos_weight {}'.format(*train_stats))
print('weight_decay {}'.format(get_weight_decay(optimizer)))
if is_save:
train_logger.add(train_stats)
train_logger.save()
loss.backward()
optimizer.step()
print('-'*30)
print("train Pseudo {} Pred {}".format(np.mean(list_F1_pseudo),np.mean(list_F1_pred)))
ss_model.flush()
if is_save and all_logger.is_max('cat_F1_pred_or'):
torch.save(model.state_dict(), experiment_dir+'model_ES_pred_or')
pickle.dump(ss_model,open(experiment_dir+'SS_model_ES_pred_or','wb'))
if is_save and all_logger.is_max('cat_F1_pseudo_ks'):
torch.save(model.state_dict(), experiment_dir+'model_ES_pseudo_ks')
pickle.dump(ss_model,open(experiment_dir+'SS_model_ES_pred_ks','wb'))
#%%
measurement()
#%%
torch.save(model.state_dict(), experiment_dir+'model_final')
pickle.dump(ss_model,open(experiment_dir+'SS_model_final','wb'))
#%% | [
"numpy.mean",
"core.helper.aggregated_keysteps",
"torch.no_grad",
"numpy.unique",
"core.helper.evaluation_align",
"torch.utils.data.DataLoader",
"core.helper.Logger",
"core.attention_based_summarization.AttentionSummarization",
"matplotlib.pyplot.ion",
"core.FeatureVGGDataset_CrossTask.FeatureVGGD... | [((167, 178), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (176, 178), False, 'import os\n'), ((180, 203), 'sys.path.insert', 'sys.path.insert', (['(0)', 'pwd'], {}), '(0, pwd)\n', (195, 203), False, 'import os, sys\n'), ((1383, 1392), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1390, 1392), True, 'import matplotlib.pyplot as plt\n'), ((1875, 1909), 'os.listdir', 'os.listdir', (['data_path_tr_CrossTask'], {}), '(data_path_tr_CrossTask)\n', (1885, 1909), False, 'import os\n'), ((1975, 2101), 'core.FeatureVGGDataset_CrossTask.FeatureVGGDataset_CrossTask', 'FeatureVGGDataset_CrossTask', (['data_path_tr_CrossTask'], {'verbose': 'verbose', 'is_visualize': '(False)', 'target_cat': 'cat_name', 'is_all': '(True)'}), '(data_path_tr_CrossTask, verbose=verbose,\n is_visualize=False, target_cat=cat_name, is_all=True)\n', (2002, 2101), False, 'from core.FeatureVGGDataset_CrossTask import FeatureVGGDataset_CrossTask\n'), ((2121, 2218), 'torch.utils.data.DataLoader', 'DataLoader', (['feature_dataset_all'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': 'num_worker'}), '(feature_dataset_all, batch_size=batch_size, shuffle=False,\n num_workers=num_worker)\n', (2131, 2218), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2333, 2458), 'core.FeatureVGGDataset_CrossTask.FeatureVGGDataset_CrossTask', 'FeatureVGGDataset_CrossTask', (['data_path_tr_CrossTask'], {'verbose': 'verbose', 'is_visualize': '(True)', 'target_cat': 'cat_name', 'is_all': '(True)'}), '(data_path_tr_CrossTask, verbose=verbose,\n is_visualize=True, target_cat=cat_name, is_all=True)\n', (2360, 2458), False, 'from core.FeatureVGGDataset_CrossTask import FeatureVGGDataset_CrossTask\n'), ((2479, 2567), 'torch.utils.data.DataLoader', 'DataLoader', (['feature_dataset_vis'], {'batch_size': 'batch_size', 'shuffle': '(False)', 'num_workers': '(0)'}), '(feature_dataset_vis, batch_size=batch_size, shuffle=False,\n num_workers=0)\n', (2489, 2567), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((3694, 3820), 'core.attention_based_summarization.AttentionSummarization', 'AttentionSummarization', (['M', 'n_category', 'lambda_1'], {'dim_input': '(512)', 'verbose': 'verbose', 'temporal_att': '(True)', 'is_balance': 'is_balance'}), '(M, n_category, lambda_1, dim_input=512, verbose=\n verbose, temporal_att=True, is_balance=is_balance)\n', (3716, 3820), False, 'from core.attention_based_summarization import AttentionSummarization\n'), ((3965, 4013), 'core.self_supervision_summarization.SelfSupervisionSummarization', 'SelfSupervisionSummarization', ([], {'M': 'M', 'repNum': 'repNum'}), '(M=M, repNum=repNum)\n', (3993, 4013), False, 'from core.self_supervision_summarization import SelfSupervisionSummarization\n'), ((4259, 4333), 'torch.optim.RMSprop', 'optim.RMSprop', (['params'], {'lr': 'lr', 'weight_decay': 'weight_decay', 'momentum': 'momentum'}), '(params, lr=lr, weight_decay=weight_decay, momentum=momentum)\n', (4272, 4333), True, 'import torch.optim as optim\n'), ((230, 241), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (239, 241), False, 'import os\n'), ((3351, 3361), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3359, 3361), False, 'import os, sys\n'), ((3382, 3409), 'os.makedirs', 'os.makedirs', (['experiment_dir'], {}), '(experiment_dir)\n', (3393, 3409), False, 'import os\n'), ((4881, 4969), 'core.helper.Logger', 'Logger', (["(experiment_dir + 'train.csv')", "['loss', 'loss_cat', 'loss_key', 'pos_weight']"], {}), "(experiment_dir + 'train.csv', ['loss', 'loss_cat', 'loss_key',\n 'pos_weight'])\n", (4887, 4969), False, 'from core.helper import aggregated_keysteps, fcsn_preprocess_keystep, get_parameters, get_weight_decay, evaluation_align, visualize_attention, Logger\n'), ((4976, 5050), 'core.helper.Logger', 'Logger', (["(experiment_dir + 'all.csv')", "['cat_F1_pred_or', 'cat_F1_pseudo_ks']"], {}), "(experiment_dir + 'all.csv', ['cat_F1_pred_or', 'cat_F1_pseudo_ks'])\n", (4982, 5050), False, 'from core.helper import aggregated_keysteps, fcsn_preprocess_keystep, get_parameters, get_weight_decay, evaluation_align, visualize_attention, Logger\n'), ((5236, 5297), 'core.helper.evaluation_align', 'evaluation_align', (['model', 'ss_model', 'dataset_loader_all', 'device'], {}), '(model, ss_model, dataset_loader_all, device)\n', (5252, 5297), False, 'from core.helper import aggregated_keysteps, fcsn_preprocess_keystep, get_parameters, get_weight_decay, evaluation_align, visualize_attention, Logger\n'), ((1577, 1602), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1600, 1602), False, 'import torch\n'), ((5837, 5852), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5850, 5852), False, 'import torch\n'), ((8899, 8947), 'torch.transpose', 'torch.transpose', (['flatten_feature'], {'dim0': '(2)', 'dim1': '(3)'}), '(flatten_feature, dim0=2, dim1=3)\n', (8914, 8947), False, 'import torch\n'), ((9164, 9223), 'core.helper.aggregated_keysteps', 'aggregated_keysteps', (['subsampled_segment_list', 'key_step_list'], {}), '(subsampled_segment_list, key_step_list)\n', (9183, 9223), False, 'from core.helper import aggregated_keysteps, fcsn_preprocess_keystep, get_parameters, get_weight_decay, evaluation_align, visualize_attention, Logger\n'), ((9250, 9306), 'core.helper.fcsn_preprocess_keystep', 'fcsn_preprocess_keystep', (['keystep_labels'], {'verbose': 'verbose'}), '(keystep_labels, verbose=verbose)\n', (9273, 9306), False, 'from core.helper import aggregated_keysteps, fcsn_preprocess_keystep, get_parameters, get_weight_decay, evaluation_align, visualize_attention, Logger\n'), ((3103, 3150), 'os.listdir', 'os.listdir', (["(NFS_path + 'results/' + sys.argv[1])"], {}), "(NFS_path + 'results/' + sys.argv[1])\n", (3113, 3150), False, 'import os\n'), ((6652, 6700), 'torch.transpose', 'torch.transpose', (['flatten_feature'], {'dim0': '(2)', 'dim1': '(3)'}), '(flatten_feature, dim0=2, dim1=3)\n', (6667, 6700), False, 'import torch\n'), ((6873, 6932), 'core.helper.aggregated_keysteps', 'aggregated_keysteps', (['subsampled_segment_list', 'key_step_list'], {}), '(subsampled_segment_list, key_step_list)\n', (6892, 6932), False, 'from core.helper import aggregated_keysteps, fcsn_preprocess_keystep, get_parameters, get_weight_decay, evaluation_align, visualize_attention, Logger\n'), ((6963, 7019), 'core.helper.fcsn_preprocess_keystep', 'fcsn_preprocess_keystep', (['keystep_labels'], {'verbose': 'verbose'}), '(keystep_labels, verbose=verbose)\n', (6986, 7019), False, 'from core.helper import aggregated_keysteps, fcsn_preprocess_keystep, get_parameters, get_weight_decay, evaluation_align, visualize_attention, Logger\n'), ((10344, 10367), 'numpy.mean', 'np.mean', (['list_F1_pseudo'], {}), '(list_F1_pseudo)\n', (10351, 10367), True, 'import numpy as np\n'), ((10368, 10389), 'numpy.mean', 'np.mean', (['list_F1_pred'], {}), '(list_F1_pred)\n', (10375, 10389), True, 'import numpy as np\n'), ((7680, 7704), 'numpy.unique', 'np.unique', (['ss_model.reps'], {}), '(ss_model.reps)\n', (7689, 7704), True, 'import numpy as np\n'), ((7711, 7742), 'numpy.unique', 'np.unique', (['ss_model.assignments'], {}), '(ss_model.assignments)\n', (7720, 7742), True, 'import numpy as np\n'), ((10083, 10110), 'core.helper.get_weight_decay', 'get_weight_decay', (['optimizer'], {}), '(optimizer)\n', (10099, 10110), False, 'from core.helper import aggregated_keysteps, fcsn_preprocess_keystep, get_parameters, get_weight_decay, evaluation_align, visualize_attention, Logger\n'), ((3030, 3041), 'time.time', 'time.time', ([], {}), '()\n', (3039, 3041), False, 'import time\n'), ((7967, 7991), 'numpy.unique', 'np.unique', (['ss_model.reps'], {}), '(ss_model.reps)\n', (7976, 7991), True, 'import numpy as np\n'), ((7998, 8029), 'numpy.unique', 'np.unique', (['ss_model.assignments'], {}), '(ss_model.assignments)\n', (8007, 8029), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
class MLP:
def __init__(self, classes_count, inputs, architecture, learning_rate, min_error, max_ephocs):
self.classes_count = classes_count
self.inputs = inputs
self.inputs.insert(0,np.random.rand())
self.architecture = architecture
self.learning_rate = learning_rate
self.min_error = min_error
self.max_ephocs = max_ephocs
self.errors = []
def init_weights(self):
self.weights = []
# Set layers
for m in range(len(self.architecture)):
self.weights.append([])
# Set neurons
for i in range(self.architecture[m]):
self.weights[m].append([])
# Set weights
for _ in range(len(self.inputs)):
self.weights[m][i].append(np.random.rand())
def train(self, progressBar):
# progress = 100 / self.max_ephocs
# progressCount = 0
# self.init_weights()
# self.a_vectors = [[self.inputs]]
# self.net_vectos = []
# for ephoc in range(self.max_ephocs):
# progressBar.setValue(progressCount)
# progressCount += progress
# for m in range(len(self.architecture)):
# for i in range(self.architecture[m]):
# for j in range(len(self.inputs)):
# for input in self.inputs:
# pass
self.errors = np.random.rand(self.max_ephocs)*15
| [
"numpy.random.rand"
] | [((263, 279), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (277, 279), True, 'import numpy as np\n'), ((1530, 1561), 'numpy.random.rand', 'np.random.rand', (['self.max_ephocs'], {}), '(self.max_ephocs)\n', (1544, 1561), True, 'import numpy as np\n'), ((870, 886), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (884, 886), True, 'import numpy as np\n')] |
# File: diffusion.py
# Author: <NAME>
# Creation Date: 5/Nov/2018
# Description: Modeling of diffusion in 2D and 3D
import numpy as np
from random import randint
import matplotlib.pyplot as plt
class Substance:
def __init__(self, i_size):
"""
:param i_size: size of diffusion volume
"""
self.dim = i_size
self.D_2d = None
self.D_3d = None
self.z_switch = False
def add_mass_2d(self, mass_size):
"""
Adds mass to be diffused to the 2D grid
:param mass_size: size of the mass in the grid
"""
self.D_2d = np.zeros((self.dim, self.dim))
m = int(mass_size/2)
center = int(self.dim/2)
self.D_2d[center - m: center + m, center - m: center + m] = 1
self.z_switch = False
def add_mass_3d(self, mass_size):
"""
Adds mass to be diffused to the 3D grid
:param mass_size: size of the mass in the grid
"""
self.D_3d = np.zeros((self.dim, self.dim, self.dim))
m = int(mass_size / 2)
center = int(self.dim / 2)
self.D_3d[center - m: center + m, center - m: center + m, center - m: center + m] = 1
def diffuse_2d(self, num_iter):
"""
Begin diffusion in 2D
:param num_iter: number of the time steps
:return: numpy array of final position of the masses
"""
D = self.D_2d
for n in range(num_iter):
for i in range(1, self.dim - 1):
for j in range(1, self.dim - 1):
if D[i][j] == 1:
D[i][j] = 0
r = randint(0, 100)
# --------------- +x --------------- #
if r <= 25:
if D[i + 1][j] != 1:
D[i + 1][j] = 1
else:
if D[i - 1][j] != 1:
D[i - 1][j] = 1
elif D[i][j + 1] != 1:
D[i][j + 1] = 1
elif D[i][j - 1] != 1:
D[i][j - 1] = 1
else:
D[i][j] = 1
# --------------- -x --------------- #
elif 25 < r <= 50:
if D[i - 1][j] != 1:
D[i - 1][j] = 1
else:
if D[i + 1][j] != 1:
D[i + 1][j] = 1
elif D[i][j + 1] != 1:
D[i][j + 1] = 1
elif D[i][j - 1] != 1:
D[i][j - 1] = 1
else:
D[i][j] = 1
# --------------- +y --------------- #
elif 50 < r <= 75:
if D[i][j + 1] != 1:
D[i][j + 1] = 1
else:
if D[i + 1][j] != 1:
D[i + 1][j] = 1
elif D[i - 1][j] != 1:
D[i - 1][j] = 1
elif D[i][j - 1] != 1:
D[i][j - 1] = 1
else:
D[i][j] = 1
# --------------- -y --------------- #
else:
if D[i][j - 1] != 1:
D[i][j - 1] = 1
else:
if D[i + 1][j] != 1:
D[i + 1][j] = 1
elif D[i - 1][j] != 1:
D[i - 1][j] = 1
elif D[i][j + 1] != 1:
D[i][j + 1] = 1
else:
D[i][j] = 1
self.z_switch = False
self.D_2d = D
return self.D_2d
def diffuse_3d(self, num_iter):
"""
Begin diffusion in 2D
:param num_iter: number of the time steps
:return: numpy array of final position of the masses
"""
self.temp_N = num_iter
D = self.D_3d
for i in range(self.dim):
for j in range(self.dim):
for k in range(self.dim):
if D[i][j][k] == 1:
D[i][j][k] = 0
r = randint(0, 100)
if r <= 16.66:
if D[i+1][j][k] == 1:
continue
else:
D[i+1][j][k] = 1
elif 16.66 < r <= 33.32:
if D[i-1][j][k] == 1:
continue
else:
D[i-1][j][k] = 1
elif 33.32 < r <= 49.98:
if D[i][j+1][k] == 1:
continue
else:
D[i][j+1][k] = 1
elif 49.98 < r <= 66.64:
if D[i][j-1][k] == 1:
continue
else:
D[i][j-1][k] = 1
elif 66.64 < r <= 83.33:
if D[i][j][k+1] == 1:
continue
else:
D[i][j][k+1] = 1
else:
if D[i][j][k-1] == 1:
continue
else:
D[i][j][k-1] = 1
self.D_3d = D
self.z_switch = True
return self.D_3d
def plot_potential_3d_slice(self, slice_pos):
"""
Plots a slice of the 3d potential matrix
:param slice_pos: slice position between two plates
"""
# TODO: support 3d plotting
def plot(self):
plt.pcolormesh(self.D_2d)
plt.show()
| [
"numpy.zeros",
"random.randint",
"matplotlib.pyplot.pcolormesh",
"matplotlib.pyplot.show"
] | [((617, 647), 'numpy.zeros', 'np.zeros', (['(self.dim, self.dim)'], {}), '((self.dim, self.dim))\n', (625, 647), True, 'import numpy as np\n'), ((996, 1036), 'numpy.zeros', 'np.zeros', (['(self.dim, self.dim, self.dim)'], {}), '((self.dim, self.dim, self.dim))\n', (1004, 1036), True, 'import numpy as np\n'), ((6417, 6442), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['self.D_2d'], {}), '(self.D_2d)\n', (6431, 6442), True, 'import matplotlib.pyplot as plt\n'), ((6451, 6461), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6459, 6461), True, 'import matplotlib.pyplot as plt\n'), ((1650, 1665), 'random.randint', 'randint', (['(0)', '(100)'], {}), '(0, 100)\n', (1657, 1665), False, 'from random import randint\n'), ((4767, 4782), 'random.randint', 'randint', (['(0)', '(100)'], {}), '(0, 100)\n', (4774, 4782), False, 'from random import randint\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 10 20:14:09 2020
@author: <EMAIL>
"""
import time
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
#from models import tiramisu_focal as tiramisu
from models import tiramisu as tiramisu
from datasets import camvid
from datasets import joint_transforms
import utils.imgs
import utils.training_crack as train_utils
import pandas as pd
import argparse
import json
import os
pid = os.getpid()
import subprocess
subprocess.Popen("renice -n 10 -p {}".format(pid),shell=True)
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
# !!! check if there is normalization or not !!!
# !!! check in_channels
"""
python3 weights_evaluate2.py --path_folder .weights/alpha015_save_weights \
--start_epoch 94 \
--end_epoch 96 \
--step 2 \
--predict_list SegNet-Tutorial/CamVid/CFD/list/shallow_crack_no_aug_val.txt \
--filename val.csv
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# parser.add_argument("--epochs", type=int, default=100, help="number of epochs")
parser.add_argument("--batch_size", type=int, default=4, help="size of each image batch")
parser.add_argument("--n_class", type=int, default=2, help="the number of the class")
# parser.add_argument("--pretrained_weights", type=str, default=None, help="if specified starts from checkpoint model")
# parser.add_argument("--weights_name", type=str, default='weights-100.pth', help="path to save the weights")
parser.add_argument("--path_folder", type=str, default='.weights/MDT_rutting/test', help="path to dataset")
parser.add_argument("--gamma", type=float, default=2.0, help="gamma value for focal loss")
# parser.add_argument("--train_list", type=str, default='SegNet-Tutorial/CamVid/CFD_/_train.txt', help="path to save the weights")
parser.add_argument("--predict_list", type=str, default='SegNet-Tutorial/CamVid/MDT_rutting/file_list/MDT_no_aug_predict.txt',
help="path to dataset")
parser.add_argument("--start_epoch", type=int, default=60, help="start epoch")
parser.add_argument("--end_epoch", type=int, default=181, help="end epoch")
parser.add_argument("--step", type=int, default=2, help="epoch step")
parser.add_argument("--filename", type=str, default='predict_set.csv', help="csv name")
parser.add_argument("--tolerance", type=int, default=5, help="tolerance margin")
opt = parser.parse_args()
print(opt)
n_classes = opt.n_class
batch_size = opt.batch_size
gamma = opt.gamma
#weights_name = opt.weights_name
WEIGHTS_PATH = opt.path_folder
CAMVID_PATH = Path(opt.path_folder)
mean = [0.50898083, 0.52446532, 0.54404199]
std = [0.08326811, 0.07471673, 0.07621879]
# mean = [0.50932450,0.50932450,0.50932450]
# std = [0.147114998,0.147114998,0.147114998]
# mean = [0.50932450]
# std = [0.147114998]
normalize = transforms.Normalize(mean=mean, std=std)
val_joint_transformer = transforms.Compose([
# joint_transforms.JointRandomCrop(640), # commented for fine-tuning
# joint_transforms.JointRandomHorizontalFlip()
])
val_dset = camvid.CamVid2(
CAMVID_PATH, path=opt.predict_list,
joint_transform=val_joint_transformer,
transform=transforms.Compose([
transforms.ToTensor(),
normalize
]))
val_loader = torch.utils.data.DataLoader(
val_dset, batch_size=batch_size, shuffle=True)
model = tiramisu.FCDenseNet67(n_classes=n_classes,in_channels=3).cuda()
metric = []
#criterion = train_utils.FocalLoss(gamma)
criterion = nn.NLLLoss().cuda()
for epoch in range(opt.start_epoch,opt.end_epoch,opt.step):
weights_name = f"weights-{epoch}.pth"
train_utils.load_weights(model, os.path.join(WEIGHTS_PATH,weights_name))
val_loss, result2, result5 = train_utils.test1(model,
val_loader,
criterion,
cls_num=n_classes,
tolerance=opt.tolerance)
result = result5
metric.append([epoch,result[4][1],result[5][1],result[6][1]])
print(epoch,np.round(np.array([result[4][1],result[5][1],result[6][1]]),5))
# precisioin recall f1
# print(os.path.join(WEIGHTS_PATH,weights_name))
# break
# for continuous epoch:
metric = np.round(np.array(metric),5)
file_name = opt.filename
dict_loss = {'epoch':metric[:,0],
'precision':metric[:,1],
'recall':metric[:,2],
'f1':metric[:,3]}
df = pd.DataFrame(dict_loss)
df.to_csv(os.path.join(WEIGHTS_PATH, file_name))
'''
record:
.weights/CFD/whole_image_norm/with_aug/with_norm/focal_train/gamma20/test2_save_weights/weights-100.pth
[0.97633 0.80983 0.88532]
'''
| [
"pandas.DataFrame",
"models.tiramisu.FCDenseNet67",
"os.getpid",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"utils.training_crack.test1",
"torch.nn.NLLLoss",
"pathlib.Path",
"torchvision.transforms.Compose",
"numpy.array",
"torchvision.transforms.Normalize",
"os.path.join",
"t... | [((605, 616), 'os.getpid', 'os.getpid', ([], {}), '()\n', (614, 616), False, 'import os\n'), ((1082, 1107), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1105, 1107), False, 'import argparse\n'), ((2785, 2806), 'pathlib.Path', 'Path', (['opt.path_folder'], {}), '(opt.path_folder)\n', (2789, 2806), False, 'from pathlib import Path\n'), ((3081, 3121), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (3101, 3121), True, 'import torchvision.transforms as transforms\n'), ((3152, 3174), 'torchvision.transforms.Compose', 'transforms.Compose', (['[]'], {}), '([])\n', (3170, 3174), True, 'import torchvision.transforms as transforms\n'), ((3576, 3650), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(val_dset, batch_size=batch_size, shuffle=True)\n', (3603, 3650), False, 'import torch\n'), ((4934, 4957), 'pandas.DataFrame', 'pd.DataFrame', (['dict_loss'], {}), '(dict_loss)\n', (4946, 4957), True, 'import pandas as pd\n'), ((4087, 4182), 'utils.training_crack.test1', 'train_utils.test1', (['model', 'val_loader', 'criterion'], {'cls_num': 'n_classes', 'tolerance': 'opt.tolerance'}), '(model, val_loader, criterion, cls_num=n_classes,\n tolerance=opt.tolerance)\n', (4104, 4182), True, 'import utils.training_crack as train_utils\n'), ((4723, 4739), 'numpy.array', 'np.array', (['metric'], {}), '(metric)\n', (4731, 4739), True, 'import numpy as np\n'), ((4972, 5009), 'os.path.join', 'os.path.join', (['WEIGHTS_PATH', 'file_name'], {}), '(WEIGHTS_PATH, file_name)\n', (4984, 5009), False, 'import os\n'), ((3681, 3738), 'models.tiramisu.FCDenseNet67', 'tiramisu.FCDenseNet67', ([], {'n_classes': 'n_classes', 'in_channels': '(3)'}), '(n_classes=n_classes, in_channels=3)\n', (3702, 3738), True, 'from models import tiramisu as tiramisu\n'), ((3833, 3845), 'torch.nn.NLLLoss', 'nn.NLLLoss', ([], {}), '()\n', (3843, 3845), True, 'import torch.nn as nn\n'), ((4009, 4049), 'os.path.join', 'os.path.join', (['WEIGHTS_PATH', 'weights_name'], {}), '(WEIGHTS_PATH, weights_name)\n', (4021, 4049), False, 'import os\n'), ((4492, 4544), 'numpy.array', 'np.array', (['[result[4][1], result[5][1], result[6][1]]'], {}), '([result[4][1], result[5][1], result[6][1]])\n', (4500, 4544), True, 'import numpy as np\n'), ((3493, 3514), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3512, 3514), True, 'import torchvision.transforms as transforms\n')] |
from typing import Callable, Union
import matplotlib.pyplot as plt
import numpy as np
from sympy import Rational
def P(adapts: int, num_uniques: int) -> Union[Callable[int, int], float]: # type: ignore
"""Calculates the probability of getting a number of unique adapts, given a number of total adapts.
Args:
adapts: Number of adapts.
num_uniques: Number of wanted unique adapts.
Returns:
The probability of getting a specific number of unique adapts, given the number of total adapts.
"""
if adapts < 0:
return 0
elif adapts == 0 and num_uniques == 0:
return 1
elif num_uniques == 0:
return Rational(4, 8) * P(adapts - 1, 0)
elif num_uniques == 1:
return Rational(4, 8) * P(adapts - 1, 0) + Rational(4, 7) * P(adapts - 1, 1)
elif num_uniques == 2:
return Rational(3, 7) * P(adapts - 1, 1) + Rational(4, 6) * P(adapts - 1, 2)
elif num_uniques == 3:
return Rational(2, 6) * P(adapts - 1, 2) + Rational(4, 5) * P(adapts - 1, 3)
elif num_uniques == 4:
return Rational(1, 5) * P(adapts - 1, 3) + Rational(4, 4) * P(adapts - 1, 4)
def specific(adapts: int) -> float:
"""Calculates the probability of getting a specific unique adapt.
Args:
adpats: Number of adapts.
Returns:
Probability of getting a specific unique adapt.
"""
return (
Rational(6, 24) * P(adapts, 1)
+ Rational(12, 24) * P(adapts, 2)
+ Rational(18, 24) * P(adapts, 3)
+ Rational(24, 24) * P(adapts, 4)
)
def either(adapts: int) -> float:
"""Calculates the probability of getting a one or both of two unique adapts.
Args:
adpats: Number of adapts.
Returns:
Probability of getting a one or both of two unique adapts.
"""
return (
Rational(12, 24) * P(adapts, 1)
+ Rational(20, 24) * P(adapts, 2)
+ Rational(24, 24) * P(adapts, 3)
+ Rational(24, 24) * P(adapts, 4)
)
def both(adapts: int) -> float:
"""Calculates the probability of getting two specific adapts.
Args:
adpats: Number of adapts.
Returns:
Probability of getting two specific adapts.
"""
return (
Rational(0, 24) * P(adapts, 1)
+ Rational(4, 24) * P(adapts, 2)
+ Rational(12, 24) * P(adapts, 3)
+ Rational(24, 24) * P(adapts, 4)
)
DSorP = np.zeros(16)
Poison = np.zeros(16)
DSandP = np.zeros(16)
for i in range(0, 16):
DSorP[i] = either(i)
Poison[i] = specific(i)
DSandP[i] = both(i)
plt.rc("font", size=12)
plt.rc("axes", titlesize=12)
plt.rc("axes", labelsize=12)
plt.rc("xtick", labelsize=12)
plt.rc("ytick", labelsize=12)
plt.rc("legend", fontsize=12)
plt.rc("figure", titlesize=12)
fig, ax1 = plt.subplots(1, 1, figsize=(6, 4))
ax1.plot(np.linspace(0, 15, 16), DSorP, "go-", label="DS or P", markersize=7)
ax1.plot(np.linspace(0, 15, 16), Poison, "bo-", label="P", markersize=7)
ax1.plot(np.linspace(0, 15, 16), DSandP, "ro-", label="DS and P", markersize=7)
ax1.set_xlim(0, 15.5)
ax1.set_ylim(0, 1.05)
ax1.set_xticks(np.linspace(0, 15, 16, dtype=int))
ax1.set_yticks([0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
ax1.grid(which="minor")
ax1.grid(which="major")
ax1.set_ylabel("Probability")
ax1.set_xlabel("Number of adaptations")
plt.legend(frameon=False)
plt.tight_layout()
plt.savefig("amalgadon_chances.svg")
| [
"sympy.Rational",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.rc",
"numpy.linspace",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig"
] | [((2397, 2409), 'numpy.zeros', 'np.zeros', (['(16)'], {}), '(16)\n', (2405, 2409), True, 'import numpy as np\n'), ((2419, 2431), 'numpy.zeros', 'np.zeros', (['(16)'], {}), '(16)\n', (2427, 2431), True, 'import numpy as np\n'), ((2441, 2453), 'numpy.zeros', 'np.zeros', (['(16)'], {}), '(16)\n', (2449, 2453), True, 'import numpy as np\n'), ((2555, 2578), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': '(12)'}), "('font', size=12)\n", (2561, 2578), True, 'import matplotlib.pyplot as plt\n'), ((2579, 2607), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': '(12)'}), "('axes', titlesize=12)\n", (2585, 2607), True, 'import matplotlib.pyplot as plt\n'), ((2608, 2636), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': '(12)'}), "('axes', labelsize=12)\n", (2614, 2636), True, 'import matplotlib.pyplot as plt\n'), ((2637, 2666), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': '(12)'}), "('xtick', labelsize=12)\n", (2643, 2666), True, 'import matplotlib.pyplot as plt\n'), ((2667, 2696), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': '(12)'}), "('ytick', labelsize=12)\n", (2673, 2696), True, 'import matplotlib.pyplot as plt\n'), ((2697, 2726), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': '(12)'}), "('legend', fontsize=12)\n", (2703, 2726), True, 'import matplotlib.pyplot as plt\n'), ((2727, 2757), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'titlesize': '(12)'}), "('figure', titlesize=12)\n", (2733, 2757), True, 'import matplotlib.pyplot as plt\n'), ((2770, 2804), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(6, 4)'}), '(1, 1, figsize=(6, 4))\n', (2782, 2804), True, 'import matplotlib.pyplot as plt\n'), ((3322, 3347), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'frameon': '(False)'}), '(frameon=False)\n', (3332, 3347), True, 'import matplotlib.pyplot as plt\n'), ((3348, 3366), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3364, 3366), True, 'import matplotlib.pyplot as plt\n'), ((3367, 3403), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""amalgadon_chances.svg"""'], {}), "('amalgadon_chances.svg')\n", (3378, 3403), True, 'import matplotlib.pyplot as plt\n'), ((2815, 2837), 'numpy.linspace', 'np.linspace', (['(0)', '(15)', '(16)'], {}), '(0, 15, 16)\n', (2826, 2837), True, 'import numpy as np\n'), ((2893, 2915), 'numpy.linspace', 'np.linspace', (['(0)', '(15)', '(16)'], {}), '(0, 15, 16)\n', (2904, 2915), True, 'import numpy as np\n'), ((2966, 2988), 'numpy.linspace', 'np.linspace', (['(0)', '(15)', '(16)'], {}), '(0, 15, 16)\n', (2977, 2988), True, 'import numpy as np\n'), ((3097, 3130), 'numpy.linspace', 'np.linspace', (['(0)', '(15)', '(16)'], {'dtype': 'int'}), '(0, 15, 16, dtype=int)\n', (3108, 3130), True, 'import numpy as np\n'), ((1521, 1537), 'sympy.Rational', 'Rational', (['(24)', '(24)'], {}), '(24, 24)\n', (1529, 1537), False, 'from sympy import Rational\n'), ((1952, 1968), 'sympy.Rational', 'Rational', (['(24)', '(24)'], {}), '(24, 24)\n', (1960, 1968), False, 'from sympy import Rational\n'), ((2349, 2365), 'sympy.Rational', 'Rational', (['(24)', '(24)'], {}), '(24, 24)\n', (2357, 2365), False, 'from sympy import Rational\n'), ((1479, 1495), 'sympy.Rational', 'Rational', (['(18)', '(24)'], {}), '(18, 24)\n', (1487, 1495), False, 'from sympy import Rational\n'), ((1910, 1926), 'sympy.Rational', 'Rational', (['(24)', '(24)'], {}), '(24, 24)\n', (1918, 1926), False, 'from sympy import Rational\n'), ((2307, 2323), 'sympy.Rational', 'Rational', (['(12)', '(24)'], {}), '(12, 24)\n', (2315, 2323), False, 'from sympy import Rational\n'), ((667, 681), 'sympy.Rational', 'Rational', (['(4)', '(8)'], {}), '(4, 8)\n', (675, 681), False, 'from sympy import Rational\n'), ((1396, 1411), 'sympy.Rational', 'Rational', (['(6)', '(24)'], {}), '(6, 24)\n', (1404, 1411), False, 'from sympy import Rational\n'), ((1437, 1453), 'sympy.Rational', 'Rational', (['(12)', '(24)'], {}), '(12, 24)\n', (1445, 1453), False, 'from sympy import Rational\n'), ((1826, 1842), 'sympy.Rational', 'Rational', (['(12)', '(24)'], {}), '(12, 24)\n', (1834, 1842), False, 'from sympy import Rational\n'), ((1868, 1884), 'sympy.Rational', 'Rational', (['(20)', '(24)'], {}), '(20, 24)\n', (1876, 1884), False, 'from sympy import Rational\n'), ((2225, 2240), 'sympy.Rational', 'Rational', (['(0)', '(24)'], {}), '(0, 24)\n', (2233, 2240), False, 'from sympy import Rational\n'), ((2266, 2281), 'sympy.Rational', 'Rational', (['(4)', '(24)'], {}), '(4, 24)\n', (2274, 2281), False, 'from sympy import Rational\n'), ((743, 757), 'sympy.Rational', 'Rational', (['(4)', '(8)'], {}), '(4, 8)\n', (751, 757), False, 'from sympy import Rational\n'), ((779, 793), 'sympy.Rational', 'Rational', (['(4)', '(7)'], {}), '(4, 7)\n', (787, 793), False, 'from sympy import Rational\n'), ((855, 869), 'sympy.Rational', 'Rational', (['(3)', '(7)'], {}), '(3, 7)\n', (863, 869), False, 'from sympy import Rational\n'), ((891, 905), 'sympy.Rational', 'Rational', (['(4)', '(6)'], {}), '(4, 6)\n', (899, 905), False, 'from sympy import Rational\n'), ((967, 981), 'sympy.Rational', 'Rational', (['(2)', '(6)'], {}), '(2, 6)\n', (975, 981), False, 'from sympy import Rational\n'), ((1003, 1017), 'sympy.Rational', 'Rational', (['(4)', '(5)'], {}), '(4, 5)\n', (1011, 1017), False, 'from sympy import Rational\n'), ((1079, 1093), 'sympy.Rational', 'Rational', (['(1)', '(5)'], {}), '(1, 5)\n', (1087, 1093), False, 'from sympy import Rational\n'), ((1115, 1129), 'sympy.Rational', 'Rational', (['(4)', '(4)'], {}), '(4, 4)\n', (1123, 1129), False, 'from sympy import Rational\n')] |
# coding: utf-8
from __future__ import division, unicode_literals
"""
Created on March 25, 2013
@author: geoffroy
"""
import numpy as np
from math import ceil
from math import cos
from math import sin
from math import tan
from math import pi
from warnings import warn
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
class HighSymmKpath(object):
"""
This class looks for path along high symmetry lines in
the Brillouin Zone.
It is based on <NAME>., & <NAME>. (2010).
High-throughput electronic band structure calculations:
Challenges and tools. Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010
The symmetry is determined by spglib through the
SpacegroupAnalyzer class
Args:
structure (Structure): Structure object
symprec (float): Tolerance for symmetry finding
angle_tolerance (float): Angle tolerance for symmetry finding.
"""
def __init__(self, structure, symprec=0.01, angle_tolerance=5):
self._structure = structure
self._sym = SpacegroupAnalyzer(structure, symprec=symprec,
angle_tolerance=angle_tolerance)
self._prim = self._sym\
.get_primitive_standard_structure(international_monoclinic=False)
self._conv = self._sym.get_conventional_standard_structure(international_monoclinic=False)
self._prim_rec = self._prim.lattice.reciprocal_lattice
self._kpath = None
lattice_type = self._sym.get_lattice_type()
spg_symbol = self._sym.get_spacegroup_symbol()
if lattice_type == "cubic":
if "P" in spg_symbol:
self._kpath = self.cubic()
elif "F" in spg_symbol:
self._kpath = self.fcc()
elif "I" in spg_symbol:
self._kpath = self.bcc()
else:
warn("Unexpected value for spg_symbol: %s" % spg_symbol)
elif lattice_type == "tetragonal":
if "P" in spg_symbol:
self._kpath = self.tet()
elif "I" in spg_symbol:
a = self._conv.lattice.abc[0]
c = self._conv.lattice.abc[2]
if c < a:
self._kpath = self.bctet1(c, a)
else:
self._kpath = self.bctet2(c, a)
else:
warn("Unexpected value for spg_symbol: %s" % spg_symbol)
elif lattice_type == "orthorhombic":
a = self._conv.lattice.abc[0]
b = self._conv.lattice.abc[1]
c = self._conv.lattice.abc[2]
if "P" in spg_symbol:
self._kpath = self.orc()
elif "F" in spg_symbol:
if 1 / a ** 2 > 1 / b ** 2 + 1 / c ** 2:
self._kpath = self.orcf1(a, b, c)
elif 1 / a ** 2 < 1 / b ** 2 + 1 / c ** 2:
self._kpath = self.orcf2(a, b, c)
else:
self._kpath = self.orcf3(a, b, c)
elif "I" in spg_symbol:
self._kpath = self.orci(a, b, c)
elif "C" in spg_symbol:
self._kpath = self.orcc(a, b, c)
else:
warn("Unexpected value for spg_symbol: %s" % spg_symbol)
elif lattice_type == "hexagonal":
self._kpath = self.hex()
elif lattice_type == "rhombohedral":
alpha = self._prim.lattice.lengths_and_angles[1][0]
if alpha < 90:
self._kpath = self.rhl1(alpha * pi / 180)
else:
self._kpath = self.rhl2(alpha * pi / 180)
elif lattice_type == "monoclinic":
a, b, c = self._conv.lattice.abc
alpha = self._conv.lattice.lengths_and_angles[1][0]
#beta = self._conv.lattice.lengths_and_angles[1][1]
if "P" in spg_symbol:
self._kpath = self.mcl(b, c, alpha * pi / 180)
elif "C" in spg_symbol:
kgamma = self._prim_rec.lengths_and_angles[1][2]
if kgamma > 90:
self._kpath = self.mclc1(a, b, c, alpha * pi / 180)
if kgamma == 90:
self._kpath = self.mclc2(a, b, c, alpha * pi / 180)
if kgamma < 90:
if b * cos(alpha * pi / 180) / c\
+ b ** 2 * sin(alpha) ** 2 / a ** 2 < 1:
self._kpath = self.mclc3(a, b, c, alpha * pi / 180)
if b * cos(alpha * pi / 180) / c \
+ b ** 2 * sin(alpha) ** 2 / a ** 2 == 1:
self._kpath = self.mclc4(a, b, c, alpha * pi / 180)
if b * cos(alpha * pi / 180) / c \
+ b ** 2 * sin(alpha) ** 2 / a ** 2 > 1:
self._kpath = self.mclc5(a, b, c, alpha * pi / 180)
else:
warn("Unexpected value for spg_symbol: %s" % spg_symbol)
elif lattice_type == "triclinic":
kalpha = self._prim_rec.lengths_and_angles[1][0]
kbeta = self._prim_rec.lengths_and_angles[1][1]
kgamma = self._prim_rec.lengths_and_angles[1][2]
if kalpha > 90 and kbeta > 90 and kgamma > 90:
self._kpath = self.tria()
if kalpha < 90 and kbeta < 90 and kgamma < 90:
self._kpath = self.trib()
if kalpha > 90 and kbeta > 90 and kgamma == 90:
self._kpath = self.tria()
if kalpha < 90 and kbeta < 90 and kgamma == 90:
self._kpath = self.trib()
else:
warn("Unknown lattice type %s" % lattice_type)
@property
def structure(self):
"""
Returns:
The standardized primitive structure
"""
return self._prim
@property
def kpath(self):
"""
Returns:
The symmetry line path in reciprocal space
"""
return self._kpath
def get_kpoints(self, line_density=20):
"""
Returns:
the kpoints along the paths in cartesian coordinates
together with the labels for symmetry points -Wei
"""
list_k_points = []
sym_point_labels = []
for b in self.kpath['path']:
for i in range(1, len(b)):
start = np.array(self.kpath['kpoints'][b[i - 1]])
end = np.array(self.kpath['kpoints'][b[i]])
distance = np.linalg.norm(
self._prim_rec.get_cartesian_coords(start) -
self._prim_rec.get_cartesian_coords(end))
nb = int(ceil(distance * line_density))
sym_point_labels.extend([b[i - 1]] + [''] * (nb - 1) + [b[i]])
list_k_points.extend(
[self._prim_rec.get_cartesian_coords(start)
+ float(i) / float(nb) *
(self._prim_rec.get_cartesian_coords(end)
- self._prim_rec.get_cartesian_coords(start))
for i in range(0, nb + 1)])
return list_k_points, sym_point_labels
def get_kpath_plot(self, **kwargs):
"""
Gives the plot (as a matplotlib object) of the symmetry line path in
the Brillouin Zone.
Returns:
`matplotlib` figure.
================ ==============================================================
kwargs Meaning
================ ==============================================================
show True to show the figure (Default).
savefig 'abc.png' or 'abc.eps'* to save the figure to a file.
================ ==============================================================
"""
import itertools
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
def _plot_shape_skeleton(bz, style):
for iface in range(len(bz)):
for line in itertools.combinations(bz[iface], 2):
for jface in range(len(bz)):
if iface < jface and line[0] in bz[jface]\
and line[1] in bz[jface]:
ax.plot([line[0][0], line[1][0]],
[line[0][1], line[1][1]],
[line[0][2], line[1][2]], style)
def _plot_lattice(lattice):
vertex1 = lattice.get_cartesian_coords([0.0, 0.0, 0.0])
vertex2 = lattice.get_cartesian_coords([1.0, 0.0, 0.0])
ax.plot([vertex1[0], vertex2[0]], [vertex1[1], vertex2[1]],
[vertex1[2], vertex2[2]], color='g', linewidth=3)
vertex2 = lattice.get_cartesian_coords([0.0, 1.0, 0.0])
ax.plot([vertex1[0], vertex2[0]], [vertex1[1], vertex2[1]],
[vertex1[2], vertex2[2]], color='g', linewidth=3)
vertex2 = lattice.get_cartesian_coords([0.0, 0.0, 1.0])
ax.plot([vertex1[0], vertex2[0]], [vertex1[1], vertex2[1]],
[vertex1[2], vertex2[2]], color='g', linewidth=3)
def _plot_kpath(kpath, lattice):
for line in kpath['path']:
for k in range(len(line) - 1):
vertex1 = lattice.get_cartesian_coords(kpath['kpoints']
[line[k]])
vertex2 = lattice.get_cartesian_coords(kpath['kpoints']
[line[k + 1]])
ax.plot([vertex1[0], vertex2[0]], [vertex1[1], vertex2[1]],
[vertex1[2], vertex2[2]], color='r', linewidth=3)
def _plot_labels(kpath, lattice):
for k in kpath['kpoints']:
label = k
if k.startswith("\\") or k.find("_") != -1:
label = "$" + k + "$"
off = 0.01
ax.text(lattice.get_cartesian_coords(kpath['kpoints'][k])[0]
+ off,
lattice.get_cartesian_coords(kpath['kpoints'][k])[1]
+ off,
lattice.get_cartesian_coords(kpath['kpoints'][k])[2]
+ off,
label, color='b', size='25')
ax.scatter([lattice.get_cartesian_coords(
kpath['kpoints'][k])[0]],
[lattice.get_cartesian_coords(
kpath['kpoints'][k])[1]],
[lattice.get_cartesian_coords(
kpath['kpoints'][k])[2]], color='b')
fig = plt.figure()
ax = axes3d.Axes3D(fig)
_plot_lattice(self._prim_rec)
_plot_shape_skeleton(self._prim_rec.get_wigner_seitz_cell(), '-k')
_plot_kpath(self.kpath, self._prim_rec)
_plot_labels(self.kpath, self._prim_rec)
ax.axis("off")
show = kwargs.pop("show", True)
if show:
plt.show()
savefig = kwargs.pop("savefig", None)
if savefig:
fig.savefig(savefig)
return fig
def cubic(self):
self.name = "CUB"
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'X': np.array([0.0, 0.5, 0.0]),
'R': np.array([0.5, 0.5, 0.5]),
'M': np.array([0.5, 0.5, 0.0])}
path = [["\Gamma", "X", "M", "\Gamma", "R", "X"], ["M", "R"]]
return {'kpoints': kpoints, 'path': path}
def fcc(self):
self.name = "FCC"
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'K': np.array([3.0 / 8.0, 3.0 / 8.0, 3.0 / 4.0]),
'L': np.array([0.5, 0.5, 0.5]),
'U': np.array([5.0 / 8.0, 1.0 / 4.0, 5.0 / 8.0]),
'W': np.array([0.5, 1.0 / 4.0, 3.0 / 4.0]),
'X': np.array([0.5, 0.0, 0.5])}
path = [["\Gamma", "X", "W", "K",
"\Gamma", "L", "U", "W", "L", "K"], ["U", "X"]]
return {'kpoints': kpoints, 'path': path}
def bcc(self):
self.name = "BCC"
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'H': np.array([0.5, -0.5, 0.5]),
'P': np.array([0.25, 0.25, 0.25]),
'N': np.array([0.0, 0.0, 0.5])}
path = [["\Gamma", "H", "N", "\Gamma", "P", "H"], ["P", "N"]]
return {'kpoints': kpoints, 'path': path}
def tet(self):
self.name = "TET"
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([0.5, 0.5, 0.5]),
'M': np.array([0.5, 0.5, 0.0]),
'R': np.array([0.0, 0.5, 0.5]),
'X': np.array([0.0, 0.5, 0.0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\Gamma", "X", "M", "\Gamma", "Z", "R", "A", "Z"], ["X", "R"],
["M", "A"]]
return {'kpoints': kpoints, 'path': path}
def bctet1(self, c, a):
self.name = "BCT1"
eta = (1 + c ** 2 / a ** 2) / 4.0
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'M': np.array([-0.5, 0.5, 0.5]),
'N': np.array([0.0, 0.5, 0.0]),
'P': np.array([0.25, 0.25, 0.25]),
'X': np.array([0.0, 0.0, 0.5]),
'Z': np.array([eta, eta, -eta]),
'Z_1': np.array([-eta, 1 - eta, eta])}
path = [["\Gamma", "X", "M", "\Gamma", "Z", "P", "N", "Z_1", "M"],
["X", "P"]]
return {'kpoints': kpoints, 'path': path}
def bctet2(self, c, a):
self.name = "BCT2"
eta = (1 + a ** 2 / c ** 2) / 4.0
zeta = a ** 2 / (2 * c ** 2)
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'N': np.array([0.0, 0.5, 0.0]),
'P': np.array([0.25, 0.25, 0.25]),
'\Sigma': np.array([-eta, eta, eta]),
'\Sigma_1': np.array([eta, 1 - eta, -eta]),
'X': np.array([0.0, 0.0, 0.5]),
'Y': np.array([-zeta, zeta, 0.5]),
'Y_1': np.array([0.5, 0.5, -zeta]),
'Z': np.array([0.5, 0.5, -0.5])}
path = [["\Gamma", "X", "Y", "\Sigma", "\Gamma", "Z",
"\Sigma_1", "N", "P", "Y_1", "Z"], ["X", "P"]]
return {'kpoints': kpoints, 'path': path}
def orc(self):
self.name = "ORC"
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'R': np.array([0.5, 0.5, 0.5]),
'S': np.array([0.5, 0.5, 0.0]),
'T': np.array([0.0, 0.5, 0.5]),
'U': np.array([0.5, 0.0, 0.5]),
'X': np.array([0.5, 0.0, 0.0]),
'Y': np.array([0.0, 0.5, 0.0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\Gamma", "X", "S", "Y", "\Gamma",
"Z", "U", "R", "T", "Z"], ["Y", "T"], ["U", "X"], ["S", "R"]]
return {'kpoints': kpoints, 'path': path}
def orcf1(self, a, b, c):
self.name = "ORCF1"
zeta = (1 + a ** 2 / b ** 2 - a ** 2 / c ** 2) / 4
eta = (1 + a ** 2 / b ** 2 + a ** 2 / c ** 2) / 4
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([0.5, 0.5 + zeta, zeta]),
'A_1': np.array([0.5, 0.5 - zeta, 1 - zeta]),
'L': np.array([0.5, 0.5, 0.5]),
'T': np.array([1, 0.5, 0.5]),
'X': np.array([0.0, eta, eta]),
'X_1': np.array([1, 1 - eta, 1 - eta]),
'Y': np.array([0.5, 0.0, 0.5]),
'Z': np.array([0.5, 0.5, 0.0])}
path = [["\Gamma", "Y", "T", "Z", "\Gamma", "X", "A_1", "Y"],
["T", "X_1"], ["X", "A", "Z"], ["L", "\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def orcf2(self, a, b, c):
self.name = "ORCF2"
phi = (1 + c ** 2 / b ** 2 - c ** 2 / a ** 2) / 4
eta = (1 + a ** 2 / b ** 2 - a ** 2 / c ** 2) / 4
delta = (1 + b ** 2 / a ** 2 - b ** 2 / c ** 2) / 4
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'C': np.array([0.5, 0.5 - eta, 1 - eta]),
'C_1': np.array([0.5, 0.5 + eta, eta]),
'D': np.array([0.5 - delta, 0.5, 1 - delta]),
'D_1': np.array([0.5 + delta, 0.5, delta]),
'L': np.array([0.5, 0.5, 0.5]),
'H': np.array([1 - phi, 0.5 - phi, 0.5]),
'H_1': np.array([phi, 0.5 + phi, 0.5]),
'X': np.array([0.0, 0.5, 0.5]),
'Y': np.array([0.5, 0.0, 0.5]),
'Z': np.array([0.5, 0.5, 0.0])}
path = [["\Gamma", "Y", "C", "D", "X", "\Gamma",
"Z", "D_1", "H", "C"], ["C_1", "Z"], ["X", "H_1"], ["H", "Y"],
["L", "\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def orcf3(self, a, b, c):
self.name = "ORCF3"
zeta = (1 + a ** 2 / b ** 2 - a ** 2 / c ** 2) / 4
eta = (1 + a ** 2 / b ** 2 + a ** 2 / c ** 2) / 4
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([0.5, 0.5 + zeta, zeta]),
'A_1': np.array([0.5, 0.5 - zeta, 1 - zeta]),
'L': np.array([0.5, 0.5, 0.5]),
'T': np.array([1, 0.5, 0.5]),
'X': np.array([0.0, eta, eta]),
'X_1': np.array([1, 1 - eta, 1 - eta]),
'Y': np.array([0.5, 0.0, 0.5]),
'Z': np.array([0.5, 0.5, 0.0])}
path = [["\Gamma", "Y", "T", "Z", "\Gamma", "X", "A_1", "Y"],
["X", "A", "Z"], ["L", "\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def orci(self, a, b, c):
self.name = "ORCI"
zeta = (1 + a ** 2 / c ** 2) / 4
eta = (1 + b ** 2 / c ** 2) / 4
delta = (b ** 2 - a ** 2) / (4 * c ** 2)
mu = (a ** 2 + b ** 2) / (4 * c ** 2)
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'L': np.array([-mu, mu, 0.5 - delta]),
'L_1': np.array([mu, -mu, 0.5 + delta]),
'L_2': np.array([0.5 - delta, 0.5 + delta, -mu]),
'R': np.array([0.0, 0.5, 0.0]),
'S': np.array([0.5, 0.0, 0.0]),
'T': np.array([0.0, 0.0, 0.5]),
'W': np.array([0.25, 0.25, 0.25]),
'X': np.array([-zeta, zeta, zeta]),
'X_1': np.array([zeta, 1 - zeta, -zeta]),
'Y': np.array([eta, -eta, eta]),
'Y_1': np.array([1 - eta, eta, -eta]),
'Z': np.array([0.5, 0.5, -0.5])}
path = [["\Gamma", "X", "L", "T", "W", "R", "X_1", "Z",
"\Gamma", "Y", "S", "W"], ["L_1", "Y"], ["Y_1", "Z"]]
return {'kpoints': kpoints, 'path': path}
def orcc(self, a, b, c):
self.name = "ORCC"
zeta = (1 + a ** 2 / b ** 2) / 4
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([zeta, zeta, 0.5]),
'A_1': np.array([-zeta, 1 - zeta, 0.5]),
'R': np.array([0.0, 0.5, 0.5]),
'S': np.array([0.0, 0.5, 0.0]),
'T': np.array([-0.5, 0.5, 0.5]),
'X': np.array([zeta, zeta, 0.0]),
'X_1': np.array([-zeta, 1 - zeta, 0.0]),
'Y': np.array([-0.5, 0.5, 0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\Gamma", "X", "S", "R", "A", "Z",
"\Gamma", "Y", "X_1", "A_1", "T", "Y"], ["Z", "T"]]
return {'kpoints': kpoints, 'path': path}
def hex(self):
self.name = "HEX"
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([0.0, 0.0, 0.5]),
'H': np.array([1.0 / 3.0, 1.0 / 3.0, 0.5]),
'K': np.array([1.0 / 3.0, 1.0 / 3.0, 0.0]),
'L': np.array([0.5, 0.0, 0.5]),
'M': np.array([0.5, 0.0, 0.0])}
path = [["\Gamma", "M", "K", "\Gamma", "A", "L", "H", "A"], ["L", "M"],
["K", "H"]]
return {'kpoints': kpoints, 'path': path}
def rhl1(self, alpha):
self.name = "RHL1"
eta = (1 + 4 * cos(alpha)) / (2 + 4 * cos(alpha))
nu = 3.0 / 4.0 - eta / 2.0
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'B': np.array([eta, 0.5, 1.0 - eta]),
'B_1': np.array([1.0 / 2.0, 1.0 - eta, eta - 1.0]),
'F': np.array([0.5, 0.5, 0.0]),
'L': np.array([0.5, 0.0, 0.0]),
'L_1': np.array([0.0, 0.0, -0.5]),
'P': np.array([eta, nu, nu]),
'P_1': np.array([1.0 - nu, 1.0 - nu, 1.0 - eta]),
'P_2': np.array([nu, nu, eta - 1.0]),
'Q': np.array([1.0 - nu, nu, 0.0]),
'X': np.array([nu, 0.0, -nu]),
'Z': np.array([0.5, 0.5, 0.5])}
path = [["\Gamma", "L", "B_1"], ["B", "Z", "\Gamma", "X"],
["Q", "F", "P_1", "Z"], ["L", "P"]]
return {'kpoints': kpoints, 'path': path}
def rhl2(self, alpha):
self.name = "RHL2"
eta = 1 / (2 * tan(alpha / 2.0) ** 2)
nu = 3.0 / 4.0 - eta / 2.0
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'F': np.array([0.5, -0.5, 0.0]),
'L': np.array([0.5, 0.0, 0.0]),
'P': np.array([1 - nu, -nu, 1 - nu]),
'P_1': np.array([nu, nu - 1.0, nu - 1.0]),
'Q': np.array([eta, eta, eta]),
'Q_1': np.array([1.0 - eta, -eta, -eta]),
'Z': np.array([0.5, -0.5, 0.5])}
path = [["\Gamma", "P", "Z", "Q", "\Gamma",
"F", "P_1", "Q_1", "L", "Z"]]
return {'kpoints': kpoints, 'path': path}
def mcl(self, b, c, beta):
self.name = "MCL"
eta = (1 - b * cos(beta) / c) / (2 * sin(beta) ** 2)
nu = 0.5 - eta * c * cos(beta) / b
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'A': np.array([0.5, 0.5, 0.0]),
'C': np.array([0.0, 0.5, 0.5]),
'D': np.array([0.5, 0.0, 0.5]),
'D_1': np.array([0.5, 0.5, -0.5]),
'E': np.array([0.5, 0.5, 0.5]),
'H': np.array([0.0, eta, 1.0 - nu]),
'H_1': np.array([0.0, 1.0 - eta, nu]),
'H_2': np.array([0.0, eta, -nu]),
'M': np.array([0.5, eta, 1.0 - nu]),
'M_1': np.array([0.5, 1 - eta, nu]),
'M_2': np.array([0.5, 1 - eta, nu]),
'X': np.array([0.0, 0.5, 0.0]),
'Y': np.array([0.0, 0.0, 0.5]),
'Y_1': np.array([0.0, 0.0, -0.5]),
'Z': np.array([0.5, 0.0, 0.0])}
path = [["\Gamma", "Y", "H", "C", "E", "M_1", "A", "X", "H_1"],
["M", "D", "Z"], ["Y", "D"]]
return {'kpoints': kpoints, 'path': path}
def mclc1(self, a, b, c, alpha):
self.name = "MCLC1"
zeta = (2 - b * cos(alpha) / c) / (4 * sin(alpha) ** 2)
eta = 0.5 + 2 * zeta * c * cos(alpha) / b
psi = 0.75 - a ** 2 / (4 * b ** 2 * sin(alpha) ** 2)
phi = psi + (0.75 - psi) * b * cos(alpha) / c
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'N': np.array([0.5, 0.0, 0.0]),
'N_1': np.array([0.0, -0.5, 0.0]),
'F': np.array([1 - zeta, 1 - zeta, 1 - eta]),
'F_1': np.array([zeta, zeta, eta]),
'F_2': np.array([-zeta, -zeta, 1 - eta]),
#'F_3': np.array([1 - zeta, -zeta, 1 - eta]),
'I': np.array([phi, 1 - phi, 0.5]),
'I_1': np.array([1 - phi, phi - 1, 0.5]),
'L': np.array([0.5, 0.5, 0.5]),
'M': np.array([0.5, 0.0, 0.5]),
'X': np.array([1 - psi, psi - 1, 0.0]),
'X_1': np.array([psi, 1 - psi, 0.0]),
'X_2': np.array([psi - 1, -psi, 0.0]),
'Y': np.array([0.5, 0.5, 0.0]),
'Y_1': np.array([-0.5, -0.5, 0.0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\Gamma", "Y", "F", "L", "I"], ["I_1", "Z", "F_1"],
["Y", "X_1"], ["X", "\Gamma", "N"], ["M", "\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def mclc2(self, a, b, c, alpha):
self.name = "MCLC2"
zeta = (2 - b * cos(alpha) / c) / (4 * sin(alpha) ** 2)
eta = 0.5 + 2 * zeta * c * cos(alpha) / b
psi = 0.75 - a ** 2 / (4 * b ** 2 * sin(alpha) ** 2)
phi = psi + (0.75 - psi) * b * cos(alpha) / c
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'N': np.array([0.5, 0.0, 0.0]),
'N_1': np.array([0.0, -0.5, 0.0]),
'F': np.array([1 - zeta, 1 - zeta, 1 - eta]),
'F_1': np.array([zeta, zeta, eta]),
'F_2': np.array([-zeta, -zeta, 1 - eta]),
'F_3': np.array([1 - zeta, -zeta, 1 - eta]),
'I': np.array([phi, 1 - phi, 0.5]),
'I_1': np.array([1 - phi, phi - 1, 0.5]),
'L': np.array([0.5, 0.5, 0.5]),
'M': np.array([0.5, 0.0, 0.5]),
'X': np.array([1 - psi, psi - 1, 0.0]),
'X_1': np.array([psi, 1 - psi, 0.0]),
'X_2': np.array([psi - 1, -psi, 0.0]),
'Y': np.array([0.5, 0.5, 0.0]),
'Y_1': np.array([-0.5, -0.5, 0.0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\Gamma", "Y", "F", "L", "I"], ["I_1", "Z", "F_1"],
["N", "\Gamma", "M"]]
return {'kpoints': kpoints, 'path': path}
def mclc3(self, a, b, c, alpha):
self.name = "MCLC3"
mu = (1 + b ** 2 / a ** 2) / 4.0
delta = b * c * cos(alpha) / (2 * a ** 2)
zeta = mu - 0.25 + (1 - b * cos(alpha) / c)\
/ (4 * sin(alpha) ** 2)
eta = 0.5 + 2 * zeta * c * cos(alpha) / b
phi = 1 + zeta - 2 * mu
psi = eta - 2 * delta
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'F': np.array([1 - phi, 1 - phi, 1 - psi]),
'F_1': np.array([phi, phi - 1, psi]),
'F_2': np.array([1 - phi, -phi, 1 - psi]),
'H': np.array([zeta, zeta, eta]),
'H_1': np.array([1 - zeta, -zeta, 1 - eta]),
'H_2': np.array([-zeta, -zeta, 1 - eta]),
'I': np.array([0.5, -0.5, 0.5]),
'M': np.array([0.5, 0.0, 0.5]),
'N': np.array([0.5, 0.0, 0.0]),
'N_1': np.array([0.0, -0.5, 0.0]),
'X': np.array([0.5, -0.5, 0.0]),
'Y': np.array([mu, mu, delta]),
'Y_1': np.array([1 - mu, -mu, -delta]),
'Y_2': np.array([-mu, -mu, -delta]),
'Y_3': np.array([mu, mu - 1, delta]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\Gamma", "Y", "F", "H", "Z", "I", "F_1"],
["H_1", "Y_1", "X", "\Gamma", "N"], ["M", "\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def mclc4(self, a, b, c, alpha):
self.name = "MCLC4"
mu = (1 + b ** 2 / a ** 2) / 4.0
delta = b * c * cos(alpha) / (2 * a ** 2)
zeta = mu - 0.25 + (1 - b * cos(alpha) / c)\
/ (4 * sin(alpha) ** 2)
eta = 0.5 + 2 * zeta * c * cos(alpha) / b
phi = 1 + zeta - 2 * mu
psi = eta - 2 * delta
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'F': np.array([1 - phi, 1 - phi, 1 - psi]),
'F_1': np.array([phi, phi - 1, psi]),
'F_2': np.array([1 - phi, -phi, 1 - psi]),
'H': np.array([zeta, zeta, eta]),
'H_1': np.array([1 - zeta, -zeta, 1 - eta]),
'H_2': np.array([-zeta, -zeta, 1 - eta]),
'I': np.array([0.5, -0.5, 0.5]),
'M': np.array([0.5, 0.0, 0.5]),
'N': np.array([0.5, 0.0, 0.0]),
'N_1': np.array([0.0, -0.5, 0.0]),
'X': np.array([0.5, -0.5, 0.0]),
'Y': np.array([mu, mu, delta]),
'Y_1': np.array([1 - mu, -mu, -delta]),
'Y_2': np.array([-mu, -mu, -delta]),
'Y_3': np.array([mu, mu - 1, delta]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\Gamma", "Y", "F", "H", "Z", "I"],
["H_1", "Y_1", "X", "\Gamma", "N"], ["M", "\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def mclc5(self, a, b, c, alpha):
self.name = "MCLC5"
zeta = (b ** 2 / a ** 2 + (1 - b * cos(alpha) / c)
/ sin(alpha) ** 2) / 4
eta = 0.5 + 2 * zeta * c * cos(alpha) / b
mu = eta / 2 + b ** 2 / (4 * a ** 2) \
- b * c * cos(alpha) / (2 * a ** 2)
nu = 2 * mu - zeta
rho = 1 - zeta * a ** 2 / b ** 2
omega = (4 * nu - 1 - b ** 2 * sin(alpha) ** 2 / a ** 2)\
* c / (2 * b * cos(alpha))
delta = zeta * c * cos(alpha) / b + omega / 2 - 0.25
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'F': np.array([nu, nu, omega]),
'F_1': np.array([1 - nu, 1 - nu, 1 - omega]),
'F_2': np.array([nu, nu - 1, omega]),
'H': np.array([zeta, zeta, eta]),
'H_1': np.array([1 - zeta, -zeta, 1 - eta]),
'H_2': np.array([-zeta, -zeta, 1 - eta]),
'I': np.array([rho, 1 - rho, 0.5]),
'I_1': np.array([1 - rho, rho - 1, 0.5]),
'L': np.array([0.5, 0.5, 0.5]),
'M': np.array([0.5, 0.0, 0.5]),
'N': np.array([0.5, 0.0, 0.0]),
'N_1': np.array([0.0, -0.5, 0.0]),
'X': np.array([0.5, -0.5, 0.0]),
'Y': np.array([mu, mu, delta]),
'Y_1': np.array([1 - mu, -mu, -delta]),
'Y_2': np.array([-mu, -mu, -delta]),
'Y_3': np.array([mu, mu - 1, delta]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["\Gamma", "Y", "F", "L", "I"], ["I_1", "Z", "H", "F_1"],
["H_1", "Y_1", "X", "\Gamma", "N"], ["M", "\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def tria(self):
self.name = "TRI1a"
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'L': np.array([0.5, 0.5, 0.0]),
'M': np.array([0.0, 0.5, 0.5]),
'N': np.array([0.5, 0.0, 0.5]),
'R': np.array([0.5, 0.5, 0.5]),
'X': np.array([0.5, 0.0, 0.0]),
'Y': np.array([0.0, 0.5, 0.0]),
'Z': np.array([0.0, 0.0, 0.5])}
path = [["X", "\Gamma", "Y"], ["L", "\Gamma", "Z"],
["N", "\Gamma", "M"], ["R", "\Gamma"]]
return {'kpoints': kpoints, 'path': path}
def trib(self):
self.name = "TRI1b"
kpoints = {'\Gamma': np.array([0.0, 0.0, 0.0]),
'L': np.array([0.5, -0.5, 0.0]),
'M': np.array([0.0, 0.0, 0.5]),
'N': np.array([-0.5, -0.5, 0.5]),
'R': np.array([0.0, -0.5, 0.5]),
'X': np.array([0.0, -0.5, 0.0]),
'Y': np.array([0.5, 0.0, 0.0]),
'Z': np.array([-0.5, 0.0, 0.5])}
path = [["X", "\Gamma", "Y"], ["L", "\Gamma", "Z"],
["N", "\Gamma", "M"], ["R", "\Gamma"]]
return {'kpoints': kpoints, 'path': path}
| [
"matplotlib.pyplot.show",
"math.ceil",
"math.tan",
"math.sin",
"itertools.combinations",
"matplotlib.pyplot.figure",
"numpy.array",
"pymatgen.symmetry.analyzer.SpacegroupAnalyzer",
"math.cos",
"warnings.warn",
"mpl_toolkits.mplot3d.axes3d.Axes3D"
] | [((1074, 1153), 'pymatgen.symmetry.analyzer.SpacegroupAnalyzer', 'SpacegroupAnalyzer', (['structure'], {'symprec': 'symprec', 'angle_tolerance': 'angle_tolerance'}), '(structure, symprec=symprec, angle_tolerance=angle_tolerance)\n', (1092, 1153), False, 'from pymatgen.symmetry.analyzer import SpacegroupAnalyzer\n'), ((10754, 10766), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10764, 10766), True, 'import matplotlib.pyplot as plt\n'), ((10780, 10798), 'mpl_toolkits.mplot3d.axes3d.Axes3D', 'axes3d.Axes3D', (['fig'], {}), '(fig)\n', (10793, 10798), False, 'from mpl_toolkits.mplot3d import axes3d\n'), ((11102, 11112), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11110, 11112), True, 'import matplotlib.pyplot as plt\n'), ((11310, 11335), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (11318, 11335), True, 'import numpy as np\n'), ((11361, 11386), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.0]'], {}), '([0.0, 0.5, 0.0])\n', (11369, 11386), True, 'import numpy as np\n'), ((11412, 11437), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (11420, 11437), True, 'import numpy as np\n'), ((11463, 11488), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0]'], {}), '([0.5, 0.5, 0.0])\n', (11471, 11488), True, 'import numpy as np\n'), ((11685, 11710), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (11693, 11710), True, 'import numpy as np\n'), ((11736, 11779), 'numpy.array', 'np.array', (['[3.0 / 8.0, 3.0 / 8.0, 3.0 / 4.0]'], {}), '([3.0 / 8.0, 3.0 / 8.0, 3.0 / 4.0])\n', (11744, 11779), True, 'import numpy as np\n'), ((11805, 11830), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (11813, 11830), True, 'import numpy as np\n'), ((11856, 11899), 'numpy.array', 'np.array', (['[5.0 / 8.0, 1.0 / 4.0, 5.0 / 8.0]'], {}), '([5.0 / 8.0, 1.0 / 4.0, 5.0 / 8.0])\n', (11864, 11899), True, 'import numpy as np\n'), ((11925, 11962), 'numpy.array', 'np.array', (['[0.5, 1.0 / 4.0, 3.0 / 4.0]'], {}), '([0.5, 1.0 / 4.0, 3.0 / 4.0])\n', (11933, 11962), True, 'import numpy as np\n'), ((11988, 12013), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (11996, 12013), True, 'import numpy as np\n'), ((12247, 12272), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (12255, 12272), True, 'import numpy as np\n'), ((12298, 12324), 'numpy.array', 'np.array', (['[0.5, -0.5, 0.5]'], {}), '([0.5, -0.5, 0.5])\n', (12306, 12324), True, 'import numpy as np\n'), ((12350, 12378), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25])\n', (12358, 12378), True, 'import numpy as np\n'), ((12404, 12429), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5]'], {}), '([0.0, 0.0, 0.5])\n', (12412, 12429), True, 'import numpy as np\n'), ((12626, 12651), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (12634, 12651), True, 'import numpy as np\n'), ((12677, 12702), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (12685, 12702), True, 'import numpy as np\n'), ((12728, 12753), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0]'], {}), '([0.5, 0.5, 0.0])\n', (12736, 12753), True, 'import numpy as np\n'), ((12779, 12804), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.5]'], {}), '([0.0, 0.5, 0.5])\n', (12787, 12804), True, 'import numpy as np\n'), ((12830, 12855), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.0]'], {}), '([0.0, 0.5, 0.0])\n', (12838, 12855), True, 'import numpy as np\n'), ((12881, 12906), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5]'], {}), '([0.0, 0.0, 0.5])\n', (12889, 12906), True, 'import numpy as np\n'), ((13193, 13218), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (13201, 13218), True, 'import numpy as np\n'), ((13244, 13270), 'numpy.array', 'np.array', (['[-0.5, 0.5, 0.5]'], {}), '([-0.5, 0.5, 0.5])\n', (13252, 13270), True, 'import numpy as np\n'), ((13296, 13321), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.0]'], {}), '([0.0, 0.5, 0.0])\n', (13304, 13321), True, 'import numpy as np\n'), ((13347, 13375), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25])\n', (13355, 13375), True, 'import numpy as np\n'), ((13401, 13426), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5]'], {}), '([0.0, 0.0, 0.5])\n', (13409, 13426), True, 'import numpy as np\n'), ((13452, 13478), 'numpy.array', 'np.array', (['[eta, eta, -eta]'], {}), '([eta, eta, -eta])\n', (13460, 13478), True, 'import numpy as np\n'), ((13506, 13536), 'numpy.array', 'np.array', (['[-eta, 1 - eta, eta]'], {}), '([-eta, 1 - eta, eta])\n', (13514, 13536), True, 'import numpy as np\n'), ((13855, 13880), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (13863, 13880), True, 'import numpy as np\n'), ((13906, 13931), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.0]'], {}), '([0.0, 0.5, 0.0])\n', (13914, 13931), True, 'import numpy as np\n'), ((13957, 13985), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25])\n', (13965, 13985), True, 'import numpy as np\n'), ((14016, 14042), 'numpy.array', 'np.array', (['[-eta, eta, eta]'], {}), '([-eta, eta, eta])\n', (14024, 14042), True, 'import numpy as np\n'), ((14075, 14105), 'numpy.array', 'np.array', (['[eta, 1 - eta, -eta]'], {}), '([eta, 1 - eta, -eta])\n', (14083, 14105), True, 'import numpy as np\n'), ((14131, 14156), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5]'], {}), '([0.0, 0.0, 0.5])\n', (14139, 14156), True, 'import numpy as np\n'), ((14182, 14210), 'numpy.array', 'np.array', (['[-zeta, zeta, 0.5]'], {}), '([-zeta, zeta, 0.5])\n', (14190, 14210), True, 'import numpy as np\n'), ((14238, 14265), 'numpy.array', 'np.array', (['[0.5, 0.5, -zeta]'], {}), '([0.5, 0.5, -zeta])\n', (14246, 14265), True, 'import numpy as np\n'), ((14291, 14317), 'numpy.array', 'np.array', (['[0.5, 0.5, -0.5]'], {}), '([0.5, 0.5, -0.5])\n', (14299, 14317), True, 'import numpy as np\n'), ((14570, 14595), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (14578, 14595), True, 'import numpy as np\n'), ((14621, 14646), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (14629, 14646), True, 'import numpy as np\n'), ((14672, 14697), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0]'], {}), '([0.5, 0.5, 0.0])\n', (14680, 14697), True, 'import numpy as np\n'), ((14723, 14748), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.5]'], {}), '([0.0, 0.5, 0.5])\n', (14731, 14748), True, 'import numpy as np\n'), ((14774, 14799), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (14782, 14799), True, 'import numpy as np\n'), ((14825, 14850), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.0]'], {}), '([0.5, 0.0, 0.0])\n', (14833, 14850), True, 'import numpy as np\n'), ((14876, 14901), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.0]'], {}), '([0.0, 0.5, 0.0])\n', (14884, 14901), True, 'import numpy as np\n'), ((14927, 14952), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5]'], {}), '([0.0, 0.0, 0.5])\n', (14935, 14952), True, 'import numpy as np\n'), ((15341, 15366), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (15349, 15366), True, 'import numpy as np\n'), ((15392, 15425), 'numpy.array', 'np.array', (['[0.5, 0.5 + zeta, zeta]'], {}), '([0.5, 0.5 + zeta, zeta])\n', (15400, 15425), True, 'import numpy as np\n'), ((15453, 15490), 'numpy.array', 'np.array', (['[0.5, 0.5 - zeta, 1 - zeta]'], {}), '([0.5, 0.5 - zeta, 1 - zeta])\n', (15461, 15490), True, 'import numpy as np\n'), ((15516, 15541), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (15524, 15541), True, 'import numpy as np\n'), ((15567, 15590), 'numpy.array', 'np.array', (['[1, 0.5, 0.5]'], {}), '([1, 0.5, 0.5])\n', (15575, 15590), True, 'import numpy as np\n'), ((15616, 15641), 'numpy.array', 'np.array', (['[0.0, eta, eta]'], {}), '([0.0, eta, eta])\n', (15624, 15641), True, 'import numpy as np\n'), ((15669, 15700), 'numpy.array', 'np.array', (['[1, 1 - eta, 1 - eta]'], {}), '([1, 1 - eta, 1 - eta])\n', (15677, 15700), True, 'import numpy as np\n'), ((15726, 15751), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (15734, 15751), True, 'import numpy as np\n'), ((15777, 15802), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0]'], {}), '([0.5, 0.5, 0.0])\n', (15785, 15802), True, 'import numpy as np\n'), ((16252, 16277), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (16260, 16277), True, 'import numpy as np\n'), ((16303, 16338), 'numpy.array', 'np.array', (['[0.5, 0.5 - eta, 1 - eta]'], {}), '([0.5, 0.5 - eta, 1 - eta])\n', (16311, 16338), True, 'import numpy as np\n'), ((16366, 16397), 'numpy.array', 'np.array', (['[0.5, 0.5 + eta, eta]'], {}), '([0.5, 0.5 + eta, eta])\n', (16374, 16397), True, 'import numpy as np\n'), ((16423, 16462), 'numpy.array', 'np.array', (['[0.5 - delta, 0.5, 1 - delta]'], {}), '([0.5 - delta, 0.5, 1 - delta])\n', (16431, 16462), True, 'import numpy as np\n'), ((16490, 16525), 'numpy.array', 'np.array', (['[0.5 + delta, 0.5, delta]'], {}), '([0.5 + delta, 0.5, delta])\n', (16498, 16525), True, 'import numpy as np\n'), ((16551, 16576), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (16559, 16576), True, 'import numpy as np\n'), ((16602, 16637), 'numpy.array', 'np.array', (['[1 - phi, 0.5 - phi, 0.5]'], {}), '([1 - phi, 0.5 - phi, 0.5])\n', (16610, 16637), True, 'import numpy as np\n'), ((16665, 16696), 'numpy.array', 'np.array', (['[phi, 0.5 + phi, 0.5]'], {}), '([phi, 0.5 + phi, 0.5])\n', (16673, 16696), True, 'import numpy as np\n'), ((16722, 16747), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.5]'], {}), '([0.0, 0.5, 0.5])\n', (16730, 16747), True, 'import numpy as np\n'), ((16773, 16798), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (16781, 16798), True, 'import numpy as np\n'), ((16824, 16849), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0]'], {}), '([0.5, 0.5, 0.0])\n', (16832, 16849), True, 'import numpy as np\n'), ((17276, 17301), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (17284, 17301), True, 'import numpy as np\n'), ((17327, 17360), 'numpy.array', 'np.array', (['[0.5, 0.5 + zeta, zeta]'], {}), '([0.5, 0.5 + zeta, zeta])\n', (17335, 17360), True, 'import numpy as np\n'), ((17388, 17425), 'numpy.array', 'np.array', (['[0.5, 0.5 - zeta, 1 - zeta]'], {}), '([0.5, 0.5 - zeta, 1 - zeta])\n', (17396, 17425), True, 'import numpy as np\n'), ((17451, 17476), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (17459, 17476), True, 'import numpy as np\n'), ((17502, 17525), 'numpy.array', 'np.array', (['[1, 0.5, 0.5]'], {}), '([1, 0.5, 0.5])\n', (17510, 17525), True, 'import numpy as np\n'), ((17551, 17576), 'numpy.array', 'np.array', (['[0.0, eta, eta]'], {}), '([0.0, eta, eta])\n', (17559, 17576), True, 'import numpy as np\n'), ((17604, 17635), 'numpy.array', 'np.array', (['[1, 1 - eta, 1 - eta]'], {}), '([1, 1 - eta, 1 - eta])\n', (17612, 17635), True, 'import numpy as np\n'), ((17661, 17686), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (17669, 17686), True, 'import numpy as np\n'), ((17712, 17737), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0]'], {}), '([0.5, 0.5, 0.0])\n', (17720, 17737), True, 'import numpy as np\n'), ((18171, 18196), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (18179, 18196), True, 'import numpy as np\n'), ((18222, 18254), 'numpy.array', 'np.array', (['[-mu, mu, 0.5 - delta]'], {}), '([-mu, mu, 0.5 - delta])\n', (18230, 18254), True, 'import numpy as np\n'), ((18282, 18314), 'numpy.array', 'np.array', (['[mu, -mu, 0.5 + delta]'], {}), '([mu, -mu, 0.5 + delta])\n', (18290, 18314), True, 'import numpy as np\n'), ((18342, 18383), 'numpy.array', 'np.array', (['[0.5 - delta, 0.5 + delta, -mu]'], {}), '([0.5 - delta, 0.5 + delta, -mu])\n', (18350, 18383), True, 'import numpy as np\n'), ((18409, 18434), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.0]'], {}), '([0.0, 0.5, 0.0])\n', (18417, 18434), True, 'import numpy as np\n'), ((18460, 18485), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.0]'], {}), '([0.5, 0.0, 0.0])\n', (18468, 18485), True, 'import numpy as np\n'), ((18511, 18536), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5]'], {}), '([0.0, 0.0, 0.5])\n', (18519, 18536), True, 'import numpy as np\n'), ((18562, 18590), 'numpy.array', 'np.array', (['[0.25, 0.25, 0.25]'], {}), '([0.25, 0.25, 0.25])\n', (18570, 18590), True, 'import numpy as np\n'), ((18616, 18645), 'numpy.array', 'np.array', (['[-zeta, zeta, zeta]'], {}), '([-zeta, zeta, zeta])\n', (18624, 18645), True, 'import numpy as np\n'), ((18673, 18706), 'numpy.array', 'np.array', (['[zeta, 1 - zeta, -zeta]'], {}), '([zeta, 1 - zeta, -zeta])\n', (18681, 18706), True, 'import numpy as np\n'), ((18732, 18758), 'numpy.array', 'np.array', (['[eta, -eta, eta]'], {}), '([eta, -eta, eta])\n', (18740, 18758), True, 'import numpy as np\n'), ((18786, 18816), 'numpy.array', 'np.array', (['[1 - eta, eta, -eta]'], {}), '([1 - eta, eta, -eta])\n', (18794, 18816), True, 'import numpy as np\n'), ((18842, 18868), 'numpy.array', 'np.array', (['[0.5, 0.5, -0.5]'], {}), '([0.5, 0.5, -0.5])\n', (18850, 18868), True, 'import numpy as np\n'), ((19182, 19207), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (19190, 19207), True, 'import numpy as np\n'), ((19233, 19260), 'numpy.array', 'np.array', (['[zeta, zeta, 0.5]'], {}), '([zeta, zeta, 0.5])\n', (19241, 19260), True, 'import numpy as np\n'), ((19288, 19320), 'numpy.array', 'np.array', (['[-zeta, 1 - zeta, 0.5]'], {}), '([-zeta, 1 - zeta, 0.5])\n', (19296, 19320), True, 'import numpy as np\n'), ((19346, 19371), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.5]'], {}), '([0.0, 0.5, 0.5])\n', (19354, 19371), True, 'import numpy as np\n'), ((19397, 19422), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.0]'], {}), '([0.0, 0.5, 0.0])\n', (19405, 19422), True, 'import numpy as np\n'), ((19448, 19474), 'numpy.array', 'np.array', (['[-0.5, 0.5, 0.5]'], {}), '([-0.5, 0.5, 0.5])\n', (19456, 19474), True, 'import numpy as np\n'), ((19500, 19527), 'numpy.array', 'np.array', (['[zeta, zeta, 0.0]'], {}), '([zeta, zeta, 0.0])\n', (19508, 19527), True, 'import numpy as np\n'), ((19555, 19587), 'numpy.array', 'np.array', (['[-zeta, 1 - zeta, 0.0]'], {}), '([-zeta, 1 - zeta, 0.0])\n', (19563, 19587), True, 'import numpy as np\n'), ((19613, 19637), 'numpy.array', 'np.array', (['[-0.5, 0.5, 0]'], {}), '([-0.5, 0.5, 0])\n', (19621, 19637), True, 'import numpy as np\n'), ((19663, 19688), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5]'], {}), '([0.0, 0.0, 0.5])\n', (19671, 19688), True, 'import numpy as np\n'), ((19936, 19961), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (19944, 19961), True, 'import numpy as np\n'), ((19987, 20012), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5]'], {}), '([0.0, 0.0, 0.5])\n', (19995, 20012), True, 'import numpy as np\n'), ((20038, 20075), 'numpy.array', 'np.array', (['[1.0 / 3.0, 1.0 / 3.0, 0.5]'], {}), '([1.0 / 3.0, 1.0 / 3.0, 0.5])\n', (20046, 20075), True, 'import numpy as np\n'), ((20101, 20138), 'numpy.array', 'np.array', (['[1.0 / 3.0, 1.0 / 3.0, 0.0]'], {}), '([1.0 / 3.0, 1.0 / 3.0, 0.0])\n', (20109, 20138), True, 'import numpy as np\n'), ((20164, 20189), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (20172, 20189), True, 'import numpy as np\n'), ((20215, 20240), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.0]'], {}), '([0.5, 0.0, 0.0])\n', (20223, 20240), True, 'import numpy as np\n'), ((20577, 20602), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (20585, 20602), True, 'import numpy as np\n'), ((20628, 20659), 'numpy.array', 'np.array', (['[eta, 0.5, 1.0 - eta]'], {}), '([eta, 0.5, 1.0 - eta])\n', (20636, 20659), True, 'import numpy as np\n'), ((20687, 20730), 'numpy.array', 'np.array', (['[1.0 / 2.0, 1.0 - eta, eta - 1.0]'], {}), '([1.0 / 2.0, 1.0 - eta, eta - 1.0])\n', (20695, 20730), True, 'import numpy as np\n'), ((20756, 20781), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0]'], {}), '([0.5, 0.5, 0.0])\n', (20764, 20781), True, 'import numpy as np\n'), ((20807, 20832), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.0]'], {}), '([0.5, 0.0, 0.0])\n', (20815, 20832), True, 'import numpy as np\n'), ((20860, 20886), 'numpy.array', 'np.array', (['[0.0, 0.0, -0.5]'], {}), '([0.0, 0.0, -0.5])\n', (20868, 20886), True, 'import numpy as np\n'), ((20912, 20935), 'numpy.array', 'np.array', (['[eta, nu, nu]'], {}), '([eta, nu, nu])\n', (20920, 20935), True, 'import numpy as np\n'), ((20963, 21004), 'numpy.array', 'np.array', (['[1.0 - nu, 1.0 - nu, 1.0 - eta]'], {}), '([1.0 - nu, 1.0 - nu, 1.0 - eta])\n', (20971, 21004), True, 'import numpy as np\n'), ((21032, 21061), 'numpy.array', 'np.array', (['[nu, nu, eta - 1.0]'], {}), '([nu, nu, eta - 1.0])\n', (21040, 21061), True, 'import numpy as np\n'), ((21087, 21116), 'numpy.array', 'np.array', (['[1.0 - nu, nu, 0.0]'], {}), '([1.0 - nu, nu, 0.0])\n', (21095, 21116), True, 'import numpy as np\n'), ((21142, 21166), 'numpy.array', 'np.array', (['[nu, 0.0, -nu]'], {}), '([nu, 0.0, -nu])\n', (21150, 21166), True, 'import numpy as np\n'), ((21192, 21217), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (21200, 21217), True, 'import numpy as np\n'), ((21553, 21578), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (21561, 21578), True, 'import numpy as np\n'), ((21604, 21630), 'numpy.array', 'np.array', (['[0.5, -0.5, 0.0]'], {}), '([0.5, -0.5, 0.0])\n', (21612, 21630), True, 'import numpy as np\n'), ((21656, 21681), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.0]'], {}), '([0.5, 0.0, 0.0])\n', (21664, 21681), True, 'import numpy as np\n'), ((21707, 21738), 'numpy.array', 'np.array', (['[1 - nu, -nu, 1 - nu]'], {}), '([1 - nu, -nu, 1 - nu])\n', (21715, 21738), True, 'import numpy as np\n'), ((21766, 21800), 'numpy.array', 'np.array', (['[nu, nu - 1.0, nu - 1.0]'], {}), '([nu, nu - 1.0, nu - 1.0])\n', (21774, 21800), True, 'import numpy as np\n'), ((21826, 21851), 'numpy.array', 'np.array', (['[eta, eta, eta]'], {}), '([eta, eta, eta])\n', (21834, 21851), True, 'import numpy as np\n'), ((21879, 21912), 'numpy.array', 'np.array', (['[1.0 - eta, -eta, -eta]'], {}), '([1.0 - eta, -eta, -eta])\n', (21887, 21912), True, 'import numpy as np\n'), ((21938, 21964), 'numpy.array', 'np.array', (['[0.5, -0.5, 0.5]'], {}), '([0.5, -0.5, 0.5])\n', (21946, 21964), True, 'import numpy as np\n'), ((22306, 22331), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (22314, 22331), True, 'import numpy as np\n'), ((22357, 22382), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0]'], {}), '([0.5, 0.5, 0.0])\n', (22365, 22382), True, 'import numpy as np\n'), ((22408, 22433), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.5]'], {}), '([0.0, 0.5, 0.5])\n', (22416, 22433), True, 'import numpy as np\n'), ((22459, 22484), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (22467, 22484), True, 'import numpy as np\n'), ((22512, 22538), 'numpy.array', 'np.array', (['[0.5, 0.5, -0.5]'], {}), '([0.5, 0.5, -0.5])\n', (22520, 22538), True, 'import numpy as np\n'), ((22564, 22589), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (22572, 22589), True, 'import numpy as np\n'), ((22615, 22645), 'numpy.array', 'np.array', (['[0.0, eta, 1.0 - nu]'], {}), '([0.0, eta, 1.0 - nu])\n', (22623, 22645), True, 'import numpy as np\n'), ((22673, 22703), 'numpy.array', 'np.array', (['[0.0, 1.0 - eta, nu]'], {}), '([0.0, 1.0 - eta, nu])\n', (22681, 22703), True, 'import numpy as np\n'), ((22731, 22756), 'numpy.array', 'np.array', (['[0.0, eta, -nu]'], {}), '([0.0, eta, -nu])\n', (22739, 22756), True, 'import numpy as np\n'), ((22782, 22812), 'numpy.array', 'np.array', (['[0.5, eta, 1.0 - nu]'], {}), '([0.5, eta, 1.0 - nu])\n', (22790, 22812), True, 'import numpy as np\n'), ((22840, 22868), 'numpy.array', 'np.array', (['[0.5, 1 - eta, nu]'], {}), '([0.5, 1 - eta, nu])\n', (22848, 22868), True, 'import numpy as np\n'), ((22896, 22924), 'numpy.array', 'np.array', (['[0.5, 1 - eta, nu]'], {}), '([0.5, 1 - eta, nu])\n', (22904, 22924), True, 'import numpy as np\n'), ((22950, 22975), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.0]'], {}), '([0.0, 0.5, 0.0])\n', (22958, 22975), True, 'import numpy as np\n'), ((23001, 23026), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5]'], {}), '([0.0, 0.0, 0.5])\n', (23009, 23026), True, 'import numpy as np\n'), ((23054, 23080), 'numpy.array', 'np.array', (['[0.0, 0.0, -0.5]'], {}), '([0.0, 0.0, -0.5])\n', (23062, 23080), True, 'import numpy as np\n'), ((23106, 23131), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.0]'], {}), '([0.5, 0.0, 0.0])\n', (23114, 23131), True, 'import numpy as np\n'), ((23624, 23649), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (23632, 23649), True, 'import numpy as np\n'), ((23675, 23700), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.0]'], {}), '([0.5, 0.0, 0.0])\n', (23683, 23700), True, 'import numpy as np\n'), ((23728, 23754), 'numpy.array', 'np.array', (['[0.0, -0.5, 0.0]'], {}), '([0.0, -0.5, 0.0])\n', (23736, 23754), True, 'import numpy as np\n'), ((23780, 23819), 'numpy.array', 'np.array', (['[1 - zeta, 1 - zeta, 1 - eta]'], {}), '([1 - zeta, 1 - zeta, 1 - eta])\n', (23788, 23819), True, 'import numpy as np\n'), ((23847, 23874), 'numpy.array', 'np.array', (['[zeta, zeta, eta]'], {}), '([zeta, zeta, eta])\n', (23855, 23874), True, 'import numpy as np\n'), ((23902, 23935), 'numpy.array', 'np.array', (['[-zeta, -zeta, 1 - eta]'], {}), '([-zeta, -zeta, 1 - eta])\n', (23910, 23935), True, 'import numpy as np\n'), ((24026, 24055), 'numpy.array', 'np.array', (['[phi, 1 - phi, 0.5]'], {}), '([phi, 1 - phi, 0.5])\n', (24034, 24055), True, 'import numpy as np\n'), ((24083, 24116), 'numpy.array', 'np.array', (['[1 - phi, phi - 1, 0.5]'], {}), '([1 - phi, phi - 1, 0.5])\n', (24091, 24116), True, 'import numpy as np\n'), ((24142, 24167), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (24150, 24167), True, 'import numpy as np\n'), ((24193, 24218), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (24201, 24218), True, 'import numpy as np\n'), ((24244, 24277), 'numpy.array', 'np.array', (['[1 - psi, psi - 1, 0.0]'], {}), '([1 - psi, psi - 1, 0.0])\n', (24252, 24277), True, 'import numpy as np\n'), ((24305, 24334), 'numpy.array', 'np.array', (['[psi, 1 - psi, 0.0]'], {}), '([psi, 1 - psi, 0.0])\n', (24313, 24334), True, 'import numpy as np\n'), ((24362, 24392), 'numpy.array', 'np.array', (['[psi - 1, -psi, 0.0]'], {}), '([psi - 1, -psi, 0.0])\n', (24370, 24392), True, 'import numpy as np\n'), ((24418, 24443), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0]'], {}), '([0.5, 0.5, 0.0])\n', (24426, 24443), True, 'import numpy as np\n'), ((24471, 24498), 'numpy.array', 'np.array', (['[-0.5, -0.5, 0.0]'], {}), '([-0.5, -0.5, 0.0])\n', (24479, 24498), True, 'import numpy as np\n'), ((24524, 24549), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5]'], {}), '([0.0, 0.0, 0.5])\n', (24532, 24549), True, 'import numpy as np\n'), ((25063, 25088), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (25071, 25088), True, 'import numpy as np\n'), ((25114, 25139), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.0]'], {}), '([0.5, 0.0, 0.0])\n', (25122, 25139), True, 'import numpy as np\n'), ((25167, 25193), 'numpy.array', 'np.array', (['[0.0, -0.5, 0.0]'], {}), '([0.0, -0.5, 0.0])\n', (25175, 25193), True, 'import numpy as np\n'), ((25219, 25258), 'numpy.array', 'np.array', (['[1 - zeta, 1 - zeta, 1 - eta]'], {}), '([1 - zeta, 1 - zeta, 1 - eta])\n', (25227, 25258), True, 'import numpy as np\n'), ((25286, 25313), 'numpy.array', 'np.array', (['[zeta, zeta, eta]'], {}), '([zeta, zeta, eta])\n', (25294, 25313), True, 'import numpy as np\n'), ((25341, 25374), 'numpy.array', 'np.array', (['[-zeta, -zeta, 1 - eta]'], {}), '([-zeta, -zeta, 1 - eta])\n', (25349, 25374), True, 'import numpy as np\n'), ((25402, 25438), 'numpy.array', 'np.array', (['[1 - zeta, -zeta, 1 - eta]'], {}), '([1 - zeta, -zeta, 1 - eta])\n', (25410, 25438), True, 'import numpy as np\n'), ((25464, 25493), 'numpy.array', 'np.array', (['[phi, 1 - phi, 0.5]'], {}), '([phi, 1 - phi, 0.5])\n', (25472, 25493), True, 'import numpy as np\n'), ((25521, 25554), 'numpy.array', 'np.array', (['[1 - phi, phi - 1, 0.5]'], {}), '([1 - phi, phi - 1, 0.5])\n', (25529, 25554), True, 'import numpy as np\n'), ((25580, 25605), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (25588, 25605), True, 'import numpy as np\n'), ((25631, 25656), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (25639, 25656), True, 'import numpy as np\n'), ((25682, 25715), 'numpy.array', 'np.array', (['[1 - psi, psi - 1, 0.0]'], {}), '([1 - psi, psi - 1, 0.0])\n', (25690, 25715), True, 'import numpy as np\n'), ((25743, 25772), 'numpy.array', 'np.array', (['[psi, 1 - psi, 0.0]'], {}), '([psi, 1 - psi, 0.0])\n', (25751, 25772), True, 'import numpy as np\n'), ((25800, 25830), 'numpy.array', 'np.array', (['[psi - 1, -psi, 0.0]'], {}), '([psi - 1, -psi, 0.0])\n', (25808, 25830), True, 'import numpy as np\n'), ((25856, 25881), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0]'], {}), '([0.5, 0.5, 0.0])\n', (25864, 25881), True, 'import numpy as np\n'), ((25909, 25936), 'numpy.array', 'np.array', (['[-0.5, -0.5, 0.0]'], {}), '([-0.5, -0.5, 0.0])\n', (25917, 25936), True, 'import numpy as np\n'), ((25962, 25987), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5]'], {}), '([0.0, 0.0, 0.5])\n', (25970, 25987), True, 'import numpy as np\n'), ((26533, 26558), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (26541, 26558), True, 'import numpy as np\n'), ((26584, 26621), 'numpy.array', 'np.array', (['[1 - phi, 1 - phi, 1 - psi]'], {}), '([1 - phi, 1 - phi, 1 - psi])\n', (26592, 26621), True, 'import numpy as np\n'), ((26649, 26678), 'numpy.array', 'np.array', (['[phi, phi - 1, psi]'], {}), '([phi, phi - 1, psi])\n', (26657, 26678), True, 'import numpy as np\n'), ((26706, 26740), 'numpy.array', 'np.array', (['[1 - phi, -phi, 1 - psi]'], {}), '([1 - phi, -phi, 1 - psi])\n', (26714, 26740), True, 'import numpy as np\n'), ((26766, 26793), 'numpy.array', 'np.array', (['[zeta, zeta, eta]'], {}), '([zeta, zeta, eta])\n', (26774, 26793), True, 'import numpy as np\n'), ((26821, 26857), 'numpy.array', 'np.array', (['[1 - zeta, -zeta, 1 - eta]'], {}), '([1 - zeta, -zeta, 1 - eta])\n', (26829, 26857), True, 'import numpy as np\n'), ((26885, 26918), 'numpy.array', 'np.array', (['[-zeta, -zeta, 1 - eta]'], {}), '([-zeta, -zeta, 1 - eta])\n', (26893, 26918), True, 'import numpy as np\n'), ((26944, 26970), 'numpy.array', 'np.array', (['[0.5, -0.5, 0.5]'], {}), '([0.5, -0.5, 0.5])\n', (26952, 26970), True, 'import numpy as np\n'), ((26996, 27021), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (27004, 27021), True, 'import numpy as np\n'), ((27047, 27072), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.0]'], {}), '([0.5, 0.0, 0.0])\n', (27055, 27072), True, 'import numpy as np\n'), ((27100, 27126), 'numpy.array', 'np.array', (['[0.0, -0.5, 0.0]'], {}), '([0.0, -0.5, 0.0])\n', (27108, 27126), True, 'import numpy as np\n'), ((27152, 27178), 'numpy.array', 'np.array', (['[0.5, -0.5, 0.0]'], {}), '([0.5, -0.5, 0.0])\n', (27160, 27178), True, 'import numpy as np\n'), ((27204, 27229), 'numpy.array', 'np.array', (['[mu, mu, delta]'], {}), '([mu, mu, delta])\n', (27212, 27229), True, 'import numpy as np\n'), ((27257, 27288), 'numpy.array', 'np.array', (['[1 - mu, -mu, -delta]'], {}), '([1 - mu, -mu, -delta])\n', (27265, 27288), True, 'import numpy as np\n'), ((27316, 27344), 'numpy.array', 'np.array', (['[-mu, -mu, -delta]'], {}), '([-mu, -mu, -delta])\n', (27324, 27344), True, 'import numpy as np\n'), ((27372, 27401), 'numpy.array', 'np.array', (['[mu, mu - 1, delta]'], {}), '([mu, mu - 1, delta])\n', (27380, 27401), True, 'import numpy as np\n'), ((27427, 27452), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5]'], {}), '([0.0, 0.0, 0.5])\n', (27435, 27452), True, 'import numpy as np\n'), ((28020, 28045), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (28028, 28045), True, 'import numpy as np\n'), ((28071, 28108), 'numpy.array', 'np.array', (['[1 - phi, 1 - phi, 1 - psi]'], {}), '([1 - phi, 1 - phi, 1 - psi])\n', (28079, 28108), True, 'import numpy as np\n'), ((28136, 28165), 'numpy.array', 'np.array', (['[phi, phi - 1, psi]'], {}), '([phi, phi - 1, psi])\n', (28144, 28165), True, 'import numpy as np\n'), ((28193, 28227), 'numpy.array', 'np.array', (['[1 - phi, -phi, 1 - psi]'], {}), '([1 - phi, -phi, 1 - psi])\n', (28201, 28227), True, 'import numpy as np\n'), ((28253, 28280), 'numpy.array', 'np.array', (['[zeta, zeta, eta]'], {}), '([zeta, zeta, eta])\n', (28261, 28280), True, 'import numpy as np\n'), ((28308, 28344), 'numpy.array', 'np.array', (['[1 - zeta, -zeta, 1 - eta]'], {}), '([1 - zeta, -zeta, 1 - eta])\n', (28316, 28344), True, 'import numpy as np\n'), ((28372, 28405), 'numpy.array', 'np.array', (['[-zeta, -zeta, 1 - eta]'], {}), '([-zeta, -zeta, 1 - eta])\n', (28380, 28405), True, 'import numpy as np\n'), ((28431, 28457), 'numpy.array', 'np.array', (['[0.5, -0.5, 0.5]'], {}), '([0.5, -0.5, 0.5])\n', (28439, 28457), True, 'import numpy as np\n'), ((28483, 28508), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (28491, 28508), True, 'import numpy as np\n'), ((28534, 28559), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.0]'], {}), '([0.5, 0.0, 0.0])\n', (28542, 28559), True, 'import numpy as np\n'), ((28587, 28613), 'numpy.array', 'np.array', (['[0.0, -0.5, 0.0]'], {}), '([0.0, -0.5, 0.0])\n', (28595, 28613), True, 'import numpy as np\n'), ((28639, 28665), 'numpy.array', 'np.array', (['[0.5, -0.5, 0.0]'], {}), '([0.5, -0.5, 0.0])\n', (28647, 28665), True, 'import numpy as np\n'), ((28691, 28716), 'numpy.array', 'np.array', (['[mu, mu, delta]'], {}), '([mu, mu, delta])\n', (28699, 28716), True, 'import numpy as np\n'), ((28744, 28775), 'numpy.array', 'np.array', (['[1 - mu, -mu, -delta]'], {}), '([1 - mu, -mu, -delta])\n', (28752, 28775), True, 'import numpy as np\n'), ((28803, 28831), 'numpy.array', 'np.array', (['[-mu, -mu, -delta]'], {}), '([-mu, -mu, -delta])\n', (28811, 28831), True, 'import numpy as np\n'), ((28859, 28888), 'numpy.array', 'np.array', (['[mu, mu - 1, delta]'], {}), '([mu, mu - 1, delta])\n', (28867, 28888), True, 'import numpy as np\n'), ((28914, 28939), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5]'], {}), '([0.0, 0.0, 0.5])\n', (28922, 28939), True, 'import numpy as np\n'), ((29685, 29710), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (29693, 29710), True, 'import numpy as np\n'), ((29736, 29761), 'numpy.array', 'np.array', (['[nu, nu, omega]'], {}), '([nu, nu, omega])\n', (29744, 29761), True, 'import numpy as np\n'), ((29789, 29826), 'numpy.array', 'np.array', (['[1 - nu, 1 - nu, 1 - omega]'], {}), '([1 - nu, 1 - nu, 1 - omega])\n', (29797, 29826), True, 'import numpy as np\n'), ((29854, 29883), 'numpy.array', 'np.array', (['[nu, nu - 1, omega]'], {}), '([nu, nu - 1, omega])\n', (29862, 29883), True, 'import numpy as np\n'), ((29909, 29936), 'numpy.array', 'np.array', (['[zeta, zeta, eta]'], {}), '([zeta, zeta, eta])\n', (29917, 29936), True, 'import numpy as np\n'), ((29964, 30000), 'numpy.array', 'np.array', (['[1 - zeta, -zeta, 1 - eta]'], {}), '([1 - zeta, -zeta, 1 - eta])\n', (29972, 30000), True, 'import numpy as np\n'), ((30028, 30061), 'numpy.array', 'np.array', (['[-zeta, -zeta, 1 - eta]'], {}), '([-zeta, -zeta, 1 - eta])\n', (30036, 30061), True, 'import numpy as np\n'), ((30087, 30116), 'numpy.array', 'np.array', (['[rho, 1 - rho, 0.5]'], {}), '([rho, 1 - rho, 0.5])\n', (30095, 30116), True, 'import numpy as np\n'), ((30144, 30177), 'numpy.array', 'np.array', (['[1 - rho, rho - 1, 0.5]'], {}), '([1 - rho, rho - 1, 0.5])\n', (30152, 30177), True, 'import numpy as np\n'), ((30203, 30228), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (30211, 30228), True, 'import numpy as np\n'), ((30254, 30279), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (30262, 30279), True, 'import numpy as np\n'), ((30305, 30330), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.0]'], {}), '([0.5, 0.0, 0.0])\n', (30313, 30330), True, 'import numpy as np\n'), ((30358, 30384), 'numpy.array', 'np.array', (['[0.0, -0.5, 0.0]'], {}), '([0.0, -0.5, 0.0])\n', (30366, 30384), True, 'import numpy as np\n'), ((30410, 30436), 'numpy.array', 'np.array', (['[0.5, -0.5, 0.0]'], {}), '([0.5, -0.5, 0.0])\n', (30418, 30436), True, 'import numpy as np\n'), ((30462, 30487), 'numpy.array', 'np.array', (['[mu, mu, delta]'], {}), '([mu, mu, delta])\n', (30470, 30487), True, 'import numpy as np\n'), ((30515, 30546), 'numpy.array', 'np.array', (['[1 - mu, -mu, -delta]'], {}), '([1 - mu, -mu, -delta])\n', (30523, 30546), True, 'import numpy as np\n'), ((30574, 30602), 'numpy.array', 'np.array', (['[-mu, -mu, -delta]'], {}), '([-mu, -mu, -delta])\n', (30582, 30602), True, 'import numpy as np\n'), ((30630, 30659), 'numpy.array', 'np.array', (['[mu, mu - 1, delta]'], {}), '([mu, mu - 1, delta])\n', (30638, 30659), True, 'import numpy as np\n'), ((30685, 30710), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5]'], {}), '([0.0, 0.0, 0.5])\n', (30693, 30710), True, 'import numpy as np\n'), ((30983, 31008), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (30991, 31008), True, 'import numpy as np\n'), ((31034, 31059), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.0]'], {}), '([0.5, 0.5, 0.0])\n', (31042, 31059), True, 'import numpy as np\n'), ((31085, 31110), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.5]'], {}), '([0.0, 0.5, 0.5])\n', (31093, 31110), True, 'import numpy as np\n'), ((31136, 31161), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.5]'], {}), '([0.5, 0.0, 0.5])\n', (31144, 31161), True, 'import numpy as np\n'), ((31187, 31212), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (31195, 31212), True, 'import numpy as np\n'), ((31238, 31263), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.0]'], {}), '([0.5, 0.0, 0.0])\n', (31246, 31263), True, 'import numpy as np\n'), ((31289, 31314), 'numpy.array', 'np.array', (['[0.0, 0.5, 0.0]'], {}), '([0.0, 0.5, 0.0])\n', (31297, 31314), True, 'import numpy as np\n'), ((31340, 31365), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5]'], {}), '([0.0, 0.0, 0.5])\n', (31348, 31365), True, 'import numpy as np\n'), ((31610, 31635), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (31618, 31635), True, 'import numpy as np\n'), ((31661, 31687), 'numpy.array', 'np.array', (['[0.5, -0.5, 0.0]'], {}), '([0.5, -0.5, 0.0])\n', (31669, 31687), True, 'import numpy as np\n'), ((31713, 31738), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.5]'], {}), '([0.0, 0.0, 0.5])\n', (31721, 31738), True, 'import numpy as np\n'), ((31764, 31791), 'numpy.array', 'np.array', (['[-0.5, -0.5, 0.5]'], {}), '([-0.5, -0.5, 0.5])\n', (31772, 31791), True, 'import numpy as np\n'), ((31817, 31843), 'numpy.array', 'np.array', (['[0.0, -0.5, 0.5]'], {}), '([0.0, -0.5, 0.5])\n', (31825, 31843), True, 'import numpy as np\n'), ((31869, 31895), 'numpy.array', 'np.array', (['[0.0, -0.5, 0.0]'], {}), '([0.0, -0.5, 0.0])\n', (31877, 31895), True, 'import numpy as np\n'), ((31921, 31946), 'numpy.array', 'np.array', (['[0.5, 0.0, 0.0]'], {}), '([0.5, 0.0, 0.0])\n', (31929, 31946), True, 'import numpy as np\n'), ((31972, 31998), 'numpy.array', 'np.array', (['[-0.5, 0.0, 0.5]'], {}), '([-0.5, 0.0, 0.5])\n', (31980, 31998), True, 'import numpy as np\n'), ((6383, 6424), 'numpy.array', 'np.array', (["self.kpath['kpoints'][b[i - 1]]"], {}), "(self.kpath['kpoints'][b[i - 1]])\n", (6391, 6424), True, 'import numpy as np\n'), ((6447, 6484), 'numpy.array', 'np.array', (["self.kpath['kpoints'][b[i]]"], {}), "(self.kpath['kpoints'][b[i]])\n", (6455, 6484), True, 'import numpy as np\n'), ((8057, 8093), 'itertools.combinations', 'itertools.combinations', (['bz[iface]', '(2)'], {}), '(bz[iface], 2)\n', (8079, 8093), False, 'import itertools\n'), ((26277, 26287), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (26280, 26287), False, 'from math import cos\n'), ((27764, 27774), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (27767, 27774), False, 'from math import cos\n'), ((29583, 29593), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (29586, 29593), False, 'from math import cos\n'), ((6680, 6709), 'math.ceil', 'ceil', (['(distance * line_density)'], {}), '(distance * line_density)\n', (6684, 6709), False, 'from math import ceil\n'), ((20478, 20488), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (20481, 20488), False, 'from math import cos\n'), ((20501, 20511), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (20504, 20511), False, 'from math import cos\n'), ((21466, 21482), 'math.tan', 'tan', (['(alpha / 2.0)'], {}), '(alpha / 2.0)\n', (21469, 21482), False, 'from math import tan\n'), ((22218, 22227), 'math.sin', 'sin', (['beta'], {}), '(beta)\n', (22221, 22227), False, 'from math import sin\n'), ((22263, 22272), 'math.cos', 'cos', (['beta'], {}), '(beta)\n', (22266, 22272), False, 'from math import cos\n'), ((23413, 23423), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (23416, 23423), False, 'from math import sin\n'), ((23465, 23475), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (23468, 23475), False, 'from math import cos\n'), ((23580, 23590), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (23583, 23590), False, 'from math import cos\n'), ((24852, 24862), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (24855, 24862), False, 'from math import sin\n'), ((24904, 24914), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (24907, 24914), False, 'from math import cos\n'), ((25019, 25029), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (25022, 25029), False, 'from math import cos\n'), ((26427, 26437), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (26430, 26437), False, 'from math import cos\n'), ((27914, 27924), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (27917, 27924), False, 'from math import cos\n'), ((29312, 29322), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (29315, 29322), False, 'from math import cos\n'), ((29396, 29406), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (29399, 29406), False, 'from math import cos\n'), ((1898, 1954), 'warnings.warn', 'warn', (["('Unexpected value for spg_symbol: %s' % spg_symbol)"], {}), "('Unexpected value for spg_symbol: %s' % spg_symbol)\n", (1902, 1954), False, 'from warnings import warn\n'), ((2388, 2444), 'warnings.warn', 'warn', (["('Unexpected value for spg_symbol: %s' % spg_symbol)"], {}), "('Unexpected value for spg_symbol: %s' % spg_symbol)\n", (2392, 2444), False, 'from warnings import warn\n'), ((22196, 22205), 'math.cos', 'cos', (['beta'], {}), '(beta)\n', (22199, 22205), False, 'from math import cos\n'), ((23390, 23400), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (23393, 23400), False, 'from math import cos\n'), ((23524, 23534), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (23527, 23534), False, 'from math import sin\n'), ((24829, 24839), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (24832, 24839), False, 'from math import cos\n'), ((24963, 24973), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (24966, 24973), False, 'from math import sin\n'), ((26375, 26385), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (26378, 26385), False, 'from math import sin\n'), ((27862, 27872), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (27865, 27872), False, 'from math import sin\n'), ((29256, 29266), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (29259, 29266), False, 'from math import sin\n'), ((29622, 29632), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (29625, 29632), False, 'from math import cos\n'), ((26339, 26349), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (26342, 26349), False, 'from math import cos\n'), ((27826, 27836), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (27829, 27836), False, 'from math import cos\n'), ((29222, 29232), 'math.cos', 'cos', (['alpha'], {}), '(alpha)\n', (29225, 29232), False, 'from math import cos\n'), ((29529, 29539), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (29532, 29539), False, 'from math import sin\n'), ((3236, 3292), 'warnings.warn', 'warn', (["('Unexpected value for spg_symbol: %s' % spg_symbol)"], {}), "('Unexpected value for spg_symbol: %s' % spg_symbol)\n", (3240, 3292), False, 'from warnings import warn\n'), ((5651, 5697), 'warnings.warn', 'warn', (["('Unknown lattice type %s' % lattice_type)"], {}), "('Unknown lattice type %s' % lattice_type)\n", (5655, 5697), False, 'from warnings import warn\n'), ((4936, 4992), 'warnings.warn', 'warn', (["('Unexpected value for spg_symbol: %s' % spg_symbol)"], {}), "('Unexpected value for spg_symbol: %s' % spg_symbol)\n", (4940, 4992), False, 'from warnings import warn\n'), ((4329, 4350), 'math.cos', 'cos', (['(alpha * pi / 180)'], {}), '(alpha * pi / 180)\n', (4332, 4350), False, 'from math import cos\n'), ((4528, 4549), 'math.cos', 'cos', (['(alpha * pi / 180)'], {}), '(alpha * pi / 180)\n', (4531, 4549), False, 'from math import cos\n'), ((4729, 4750), 'math.cos', 'cos', (['(alpha * pi / 180)'], {}), '(alpha * pi / 180)\n', (4732, 4750), False, 'from math import cos\n'), ((4395, 4405), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (4398, 4405), False, 'from math import sin\n'), ((4595, 4605), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (4598, 4605), False, 'from math import sin\n'), ((4796, 4806), 'math.sin', 'sin', (['alpha'], {}), '(alpha)\n', (4799, 4806), False, 'from math import sin\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.